code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import os
import sys
import time
import numpy as num
#------------------------
# ANUGA Modules
#------------------------
from anuga import Domain
from anuga import Reflective_boundary
from anuga import Dirichlet_boundary
from anuga import Time_boundary
from anuga import Transmissive_boundary
from anuga import rectangular_cross
from anuga import create_domain_from_file
from anuga_parallel.sequential_distribute import sequential_distribute_dump
#--------------------------------------------------------------------------
# Setup parameters
#--------------------------------------------------------------------------
#mesh_filename = "merimbula_10785_1.tsh" ; x0 = 756000.0 ; x1 = 756500.0
mesh_filename = "merimbula_17156.tsh" ; x0 = 756000.0 ; x1 = 756500.0
#mesh_filename = "merimbula_43200_1.tsh" ; x0 = 756000.0 ; x1 = 756500.0
#mesh_filename = "test-100.tsh" ; x0 = 0.25 ; x1 = 0.5
#mesh_filename = "test-20.tsh" ; x0 = 250.0 ; x1 = 350.0
yieldstep = 50
finaltime = 1500
verbose = True
#--------------------------------------------------------------------------
# Setup procedures
#--------------------------------------------------------------------------
class Set_Stage:
"""Set an initial condition with constant water height, for x0<x<x1
"""
def __init__(self, x0=0.25, x1=0.5, h=1.0):
self.x0 = x0
self.x1 = x1
self.h = h
def __call__(self, x, y):
return self.h*((x>self.x0)&(x<self.x1))+1.0
class Set_Elevation:
"""Set an elevation
"""
def __init__(self, h=1.0):
self.x0 = x0
self.x1 = x1
self.h = h
def __call__(self, x, y):
return x/self.h
#--------------------------------------------------------------------------
# Setup Sequential Domain
#--------------------------------------------------------------------------
domain = create_domain_from_file(mesh_filename)
domain.set_quantity('stage', Set_Stage(x0, x1, 2.0))
#domain.set_datadir('.')
domain.set_name('merimbula_new')
domain.set_store(True)
#--------------------------------------------------------------------------
# Distribute sequential domain on processor 0 to other processors
#--------------------------------------------------------------------------
if verbose: print 'DISTRIBUTING DOMAIN'
sequential_distribute_dump(domain, 4, verbose=True) | examples/parallel/run_sequential_dist_distribute_merimbula.py |
import os
import sys
import time
import numpy as num
#------------------------
# ANUGA Modules
#------------------------
from anuga import Domain
from anuga import Reflective_boundary
from anuga import Dirichlet_boundary
from anuga import Time_boundary
from anuga import Transmissive_boundary
from anuga import rectangular_cross
from anuga import create_domain_from_file
from anuga_parallel.sequential_distribute import sequential_distribute_dump
#--------------------------------------------------------------------------
# Setup parameters
#--------------------------------------------------------------------------
#mesh_filename = "merimbula_10785_1.tsh" ; x0 = 756000.0 ; x1 = 756500.0
mesh_filename = "merimbula_17156.tsh" ; x0 = 756000.0 ; x1 = 756500.0
#mesh_filename = "merimbula_43200_1.tsh" ; x0 = 756000.0 ; x1 = 756500.0
#mesh_filename = "test-100.tsh" ; x0 = 0.25 ; x1 = 0.5
#mesh_filename = "test-20.tsh" ; x0 = 250.0 ; x1 = 350.0
yieldstep = 50
finaltime = 1500
verbose = True
#--------------------------------------------------------------------------
# Setup procedures
#--------------------------------------------------------------------------
class Set_Stage:
"""Set an initial condition with constant water height, for x0<x<x1
"""
def __init__(self, x0=0.25, x1=0.5, h=1.0):
self.x0 = x0
self.x1 = x1
self.h = h
def __call__(self, x, y):
return self.h*((x>self.x0)&(x<self.x1))+1.0
class Set_Elevation:
"""Set an elevation
"""
def __init__(self, h=1.0):
self.x0 = x0
self.x1 = x1
self.h = h
def __call__(self, x, y):
return x/self.h
#--------------------------------------------------------------------------
# Setup Sequential Domain
#--------------------------------------------------------------------------
domain = create_domain_from_file(mesh_filename)
domain.set_quantity('stage', Set_Stage(x0, x1, 2.0))
#domain.set_datadir('.')
domain.set_name('merimbula_new')
domain.set_store(True)
#--------------------------------------------------------------------------
# Distribute sequential domain on processor 0 to other processors
#--------------------------------------------------------------------------
if verbose: print 'DISTRIBUTING DOMAIN'
sequential_distribute_dump(domain, 4, verbose=True) | 0.474875 | 0.198064 |
from collections import OrderedDict, Mapping, Container
from pprint import pprint
from sys import getsizeof
def deep_compare(a, b, pointer='/'):
if a == b:
return
if type(a) != type(b):
reason = 'Different data types'
extra = str((type(a), type(b)))
x(pointer, reason, extra)
elif type(a) in (set, frozenset):
pointer += 'set()'
if len(a) != len(b):
pointer += 'set()'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
reason = 'Different items'
extra = (a, b)
x(pointer, reason, extra)
for i in range(len(a)):
deep_compare(a[i], b[i], pointer + 'set()'.format(i))
elif type(a) in (list, tuple):
if len(a) != len(b):
pointer += '[]'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
if sorted(a) == sorted(b):
pointer += '[]'
reason = 'Different sort order'
extra = 'N/A'
x(pointer, reason, extra)
for i in range(len(a)):
deep_compare(a[i], b[i], pointer + '[{}]'.format(i))
elif type(a) in (dict, OrderedDict):
if len(a) != len(b):
pointer += '{}'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
if set(a.keys()) != set(b.keys()):
pointer += '{}'
reason = 'Different keys'
extra = (a.keys(), b.keys())
x(pointer, reason, extra)
for k in a:
deep_compare(a[k], b[k], pointer + '[{}]'.format(k))
else:
reason = 'Different objects'
extra = (a, b)
x(pointer, reason, extra)
def x(pointer, reason, extra):
message = 'Objects are not the same. Pointer: {}. Reason: {}. Extra: {}'
raise RuntimeError(message.format(pointer, reason, extra))
def compare(a, b):
try:
deep_compare(a, b, '/')
except RuntimeError as e:
pprint(e.message)
def deep_getsizeof(o, ids):
"""Find the memory footprint of a Python object
This is a recursive function that rills down a Python object graph
like a dictionary holding nested ditionaries with lists of lists
and tuples and sets.
The sys.getsizeof function does a shallow size of only. It counts each
object inside a container as pointer only regardless of how big it
really is.
:param o: the object
:param ids:
:return:
"""
d = deep_getsizeof
if id(o) in ids:
return 0
r = getsizeof(o)
ids.add(id(o))
if isinstance(o, str) or isinstance(0, unicode):
return r
if isinstance(o, Mapping):
return r + sum(d(k, ids) + d(v, ids) for k, v in o.iteritems())
if isinstance(o, Container):
return r + sum(d(x, ids) for x in o)
return r | deeper.py | from collections import OrderedDict, Mapping, Container
from pprint import pprint
from sys import getsizeof
def deep_compare(a, b, pointer='/'):
if a == b:
return
if type(a) != type(b):
reason = 'Different data types'
extra = str((type(a), type(b)))
x(pointer, reason, extra)
elif type(a) in (set, frozenset):
pointer += 'set()'
if len(a) != len(b):
pointer += 'set()'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
reason = 'Different items'
extra = (a, b)
x(pointer, reason, extra)
for i in range(len(a)):
deep_compare(a[i], b[i], pointer + 'set()'.format(i))
elif type(a) in (list, tuple):
if len(a) != len(b):
pointer += '[]'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
if sorted(a) == sorted(b):
pointer += '[]'
reason = 'Different sort order'
extra = 'N/A'
x(pointer, reason, extra)
for i in range(len(a)):
deep_compare(a[i], b[i], pointer + '[{}]'.format(i))
elif type(a) in (dict, OrderedDict):
if len(a) != len(b):
pointer += '{}'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
if set(a.keys()) != set(b.keys()):
pointer += '{}'
reason = 'Different keys'
extra = (a.keys(), b.keys())
x(pointer, reason, extra)
for k in a:
deep_compare(a[k], b[k], pointer + '[{}]'.format(k))
else:
reason = 'Different objects'
extra = (a, b)
x(pointer, reason, extra)
def x(pointer, reason, extra):
message = 'Objects are not the same. Pointer: {}. Reason: {}. Extra: {}'
raise RuntimeError(message.format(pointer, reason, extra))
def compare(a, b):
try:
deep_compare(a, b, '/')
except RuntimeError as e:
pprint(e.message)
def deep_getsizeof(o, ids):
"""Find the memory footprint of a Python object
This is a recursive function that rills down a Python object graph
like a dictionary holding nested ditionaries with lists of lists
and tuples and sets.
The sys.getsizeof function does a shallow size of only. It counts each
object inside a container as pointer only regardless of how big it
really is.
:param o: the object
:param ids:
:return:
"""
d = deep_getsizeof
if id(o) in ids:
return 0
r = getsizeof(o)
ids.add(id(o))
if isinstance(o, str) or isinstance(0, unicode):
return r
if isinstance(o, Mapping):
return r + sum(d(k, ids) + d(v, ids) for k, v in o.iteritems())
if isinstance(o, Container):
return r + sum(d(x, ids) for x in o)
return r | 0.464659 | 0.256529 |
import sys
import os
import re
from java.lang import System
#Python Script to manage applications in weblogic server.
#This script takes input from command line and executes it.
#It can be used to check status,stop,start,deploy,undeploy of applications in weblogic server using weblogic wlst tool.
import getopt
#========================
#Usage Section
#========================
def usage():
print "Usage:"
print "java weblogic.WLST manageApplication.py -u username -p password -a adminUrl [:] -n deploymentName -f deploymentFile -t deploymentTarget\n"
sys.exit(2)
#========================
#Connect To Domain
#========================
def connectToDomain():
try:
connect(username, password, adminUrl)
print 'Successfully connected to the domain\n'
except:
print 'The domain is unreacheable. Please try again\n'
exit()
#========================
#Checking Application Status Section
#========================
def appstatus(deploymentName, deploymentTarget):
try:
domainRuntime()
cd('domainRuntime:/AppRuntimeStateRuntime/AppRuntimeStateRuntime')
currentState = cmo.getCurrentState(deploymentName, deploymentTarget)
return currentState
except:
print 'Error in getting current status of ' +deploymentName+ '\n'
exit()
#========================
#Application undeployment Section
#========================
def undeployApplication():
try:
print 'stopping and undeploying ..' +deploymentName+ '\n'
stopApplication(deploymentName, targets=deploymentTarget)
undeploy(deploymentName, targets=deploymentTarget)
except:
print 'Error during the stop and undeployment of ' +deploymentName+ '\n'
#========================
#Applications deployment Section
#========================
def deployApplication():
try:
print 'Deploying the application ' +deploymentName+ '\n'
deploy(deploymentName,deploymentFile,targets=deploymentTarget)
startApplication(deploymentName)
except:
print 'Error during the deployment of ' +deploymentName+ '\n'
exit()
#========================
#Input Values Validation Section
#========================
if __name__=='__main__' or __name__== 'main':
try:
opts, args = getopt.getopt(sys.argv[1:], "u:p:a:n:f:t:", ["username=", "password=", "adminUrl=", "deploymentName=", "deploymentFile=", "deploymentTarget="])
except getopt.GetoptError, err:
print str(err)
usage()
username = ''
password = ''
adminUrl = ''
deploymentName = ''
deploymentFile = ''
deploymentTarget = ''
for opt, arg in opts:
if opt == "-u":
username = arg
elif opt == "-p":
password = arg
elif opt == "-a":
adminUrl = arg
elif opt == "-n":
deploymentName = arg
elif opt == "-f":
deploymentFile = arg
elif opt == "-t":
deploymentTarget = arg
if username == "":
print "Missing \"-u username\" parameter.\n"
usage()
elif password == "":
print "Missing \"-p password\" parameter.\n"
usage()
elif adminUrl == "":
print "Missing \"-a adminUrl\" parameter.\n"
usage()
elif deploymentName == "":
print "Missing \"-n deploymentName\" parameter.\n"
usage()
elif deploymentFile == "":
print "Missing \"-c deploymentFile\" parameter.\n"
usage()
elif deploymentTarget == "":
print "Missing \"-c deploymentTarget\" parameter.\n"
usage()
#========================
#Main Control Block For Operations
#========================
def deployUndeployMain():
appList = re.findall(deploymentName, ls('/AppDeployments'))
if len(appList) >= 1:
print 'Application'+deploymentName+' Found on server '+deploymentTarget+', undeploying application..'
print '=============================================================================='
print 'Application Already Exists, Undeploying...'
print '=============================================================================='
undeployApplication()
print '=============================================================================='
print 'Redeploying Application '+deploymentName+' on'+deploymentTarget+' server...'
print '=============================================================================='
deployApplication()
else:
print '=============================================================================='
print 'No application with same name...'
print 'Deploying Application '+deploymentName+' on'+deploymentTarget+' server...'
print '=============================================================================='
deployApplication()
#========================
#Execute Block
#========================
print '=============================================================================='
print 'Connecting to Admin Server...'
print '=============================================================================='
connectToDomain()
print '=============================================================================='
print 'Starting Deployment...'
print '=============================================================================='
deployUndeployMain()
print '=============================================================================='
print 'Execution completed...'
print '=============================================================================='
disconnect()
exit() | files/appDeploymentScript.py | import sys
import os
import re
from java.lang import System
#Python Script to manage applications in weblogic server.
#This script takes input from command line and executes it.
#It can be used to check status,stop,start,deploy,undeploy of applications in weblogic server using weblogic wlst tool.
import getopt
#========================
#Usage Section
#========================
def usage():
print "Usage:"
print "java weblogic.WLST manageApplication.py -u username -p password -a adminUrl [:] -n deploymentName -f deploymentFile -t deploymentTarget\n"
sys.exit(2)
#========================
#Connect To Domain
#========================
def connectToDomain():
try:
connect(username, password, adminUrl)
print 'Successfully connected to the domain\n'
except:
print 'The domain is unreacheable. Please try again\n'
exit()
#========================
#Checking Application Status Section
#========================
def appstatus(deploymentName, deploymentTarget):
try:
domainRuntime()
cd('domainRuntime:/AppRuntimeStateRuntime/AppRuntimeStateRuntime')
currentState = cmo.getCurrentState(deploymentName, deploymentTarget)
return currentState
except:
print 'Error in getting current status of ' +deploymentName+ '\n'
exit()
#========================
#Application undeployment Section
#========================
def undeployApplication():
try:
print 'stopping and undeploying ..' +deploymentName+ '\n'
stopApplication(deploymentName, targets=deploymentTarget)
undeploy(deploymentName, targets=deploymentTarget)
except:
print 'Error during the stop and undeployment of ' +deploymentName+ '\n'
#========================
#Applications deployment Section
#========================
def deployApplication():
try:
print 'Deploying the application ' +deploymentName+ '\n'
deploy(deploymentName,deploymentFile,targets=deploymentTarget)
startApplication(deploymentName)
except:
print 'Error during the deployment of ' +deploymentName+ '\n'
exit()
#========================
#Input Values Validation Section
#========================
if __name__=='__main__' or __name__== 'main':
try:
opts, args = getopt.getopt(sys.argv[1:], "u:p:a:n:f:t:", ["username=", "password=", "adminUrl=", "deploymentName=", "deploymentFile=", "deploymentTarget="])
except getopt.GetoptError, err:
print str(err)
usage()
username = ''
password = ''
adminUrl = ''
deploymentName = ''
deploymentFile = ''
deploymentTarget = ''
for opt, arg in opts:
if opt == "-u":
username = arg
elif opt == "-p":
password = arg
elif opt == "-a":
adminUrl = arg
elif opt == "-n":
deploymentName = arg
elif opt == "-f":
deploymentFile = arg
elif opt == "-t":
deploymentTarget = arg
if username == "":
print "Missing \"-u username\" parameter.\n"
usage()
elif password == "":
print "Missing \"-p password\" parameter.\n"
usage()
elif adminUrl == "":
print "Missing \"-a adminUrl\" parameter.\n"
usage()
elif deploymentName == "":
print "Missing \"-n deploymentName\" parameter.\n"
usage()
elif deploymentFile == "":
print "Missing \"-c deploymentFile\" parameter.\n"
usage()
elif deploymentTarget == "":
print "Missing \"-c deploymentTarget\" parameter.\n"
usage()
#========================
#Main Control Block For Operations
#========================
def deployUndeployMain():
appList = re.findall(deploymentName, ls('/AppDeployments'))
if len(appList) >= 1:
print 'Application'+deploymentName+' Found on server '+deploymentTarget+', undeploying application..'
print '=============================================================================='
print 'Application Already Exists, Undeploying...'
print '=============================================================================='
undeployApplication()
print '=============================================================================='
print 'Redeploying Application '+deploymentName+' on'+deploymentTarget+' server...'
print '=============================================================================='
deployApplication()
else:
print '=============================================================================='
print 'No application with same name...'
print 'Deploying Application '+deploymentName+' on'+deploymentTarget+' server...'
print '=============================================================================='
deployApplication()
#========================
#Execute Block
#========================
print '=============================================================================='
print 'Connecting to Admin Server...'
print '=============================================================================='
connectToDomain()
print '=============================================================================='
print 'Starting Deployment...'
print '=============================================================================='
deployUndeployMain()
print '=============================================================================='
print 'Execution completed...'
print '=============================================================================='
disconnect()
exit() | 0.055663 | 0.076339 |
import sys
from nose.tools import assert_almost_equals, eq_
from unitbench import TimeSet, Benchmark, BenchResult, Reporter, CsvReporter
from unittest import TestCase
if sys.version_info < (3, 0):
from StringIO import StringIO
else:
from io import StringIO
class NullReporter(Reporter):
pass
class OneRun(Benchmark):
def warmup(self):
return 0
def repeats(self):
return 1
class TestBenchResult(TestCase):
def test_stats(self):
times = []
times.append(TimeSet(3, 1, 0))
times.append(TimeSet(4, 2, 0))
times.append(TimeSet(4, 4, 0))
times.append(TimeSet(5, 5, 0))
times.append(TimeSet(6, 7, 0))
times.append(TimeSet(8, 11, 0))
results = BenchResult("bench_sample1", 10, times)
eq_(results.name, "bench_sample1")
assert results.value == "10"
assert results.wall_min == 3
assert results.wall_max == 8
assert results.wall_mean == 5
assert_almost_equals(results.wall_variance, 2.67, places=2)
assert_almost_equals(results.wall_stddev, 1.63, places=2)
assert results.user_min == 1
assert results.user_max == 11
assert results.user_mean == 5
assert results.user_variance == 11.0
assert_almost_equals(results.user_stddev, 3.32, places=2)
class TestBenchmark(TestCase):
def test_warmup(self):
class sample(Benchmark):
def __init__(self):
self.count = 0
self.count2 = 0
def warmup(self):
return 4
def repeats(self):
return 0
def bench_count(self):
self.count += 1
def bench_count2(self, input):
self.count2 += 1
bm = sample()
bm.run(NullReporter())
assert bm.count == 4
assert bm.count2 == 4
def test_teardown(self):
""" teardown should be called regardless of errors
"""
class sample(Benchmark):
def __init__(self):
self.setup_count = 0
def setup(self):
self.setup_count += 1
if self.setup_count > 1:
raise ValueError
def teardown(self):
self.setup_count -= 1
if self.setup_count < 0:
raise ValueError
def bench_exception(self, input):
1/0
def bench_works(self):
pass
bm = sample()
self.assertRaises(ZeroDivisionError, bm.run)
assert bm.setup_count == 0
def test_input(self):
class SampleBase(OneRun):
def __init__(self):
self.passed_in = []
def bench_sample(self, input):
self.passed_in.append(input)
class InputGen(SampleBase):
def input(self):
i = 10
while i < 1000:
yield i
i *= 10
class InputList(SampleBase):
def input(self):
return [10, 100, 1000, 20]
bm = InputGen()
bm.run(NullReporter())
assert bm.passed_in == [10, 100]
bm = InputList()
bm.run(NullReporter())
assert bm.passed_in == [10, 100, 1000, 20]
def test_param_count(self):
class sample(OneRun):
def bench_no_params(self):
self.no_param = True
def bench_one_param(self, input):
self.one_param = True
bm = sample()
bm.run(NullReporter())
assert bm.no_param
assert bm.one_param
def test_findbenchmarks(self):
class sample(Benchmark):
def benchSample1(self, input):
pass
def bench_Sample2(self, input):
pass
def sampleBench3(self, input):
pass
def bench_sample4(self):
pass
bms = sample()._find_benchmarks()
assert "benchSample1" in bms
assert "bench_Sample2" in bms
assert not "sampleBench3" in bms
assert "bench_sample4" in bms
def test_function_name_to_title(self):
bm = OneRun()
eq_(bm._function_name_to_title("bench_sample1_sample2"), "Sample1 Sample2")
eq_(bm._function_name_to_title("benchSample1Sample2"), "Sample1 Sample2")
eq_(bm._function_name_to_title("sample1_sample2"), "Sample1 Sample2")
eq_(bm._function_name_to_title("Sample1Sample2"), "Sample1 Sample2")
eq_(bm._function_name_to_title("_sample1_sample2_"), "Sample1 Sample2")
eq_(bm._function_name_to_title("XMLBenchmark"), "Xml Benchmark")
class TestCsvReporter(TestCase):
def test_write_titles(self):
class sample(OneRun):
def warmup(self):
return 0
def repeats(self):
return 0
def bench_sample1(self):
self.no_param = True
def bench_sample2(self, input):
self.one_param = True
bm = sample()
stream = StringIO()
bm.run(CsvReporter(stream))
output = stream.getvalue()
stream.close()
eq_("Values,Sample1,Sample2\n", output) | tests/test_unitbench.py | import sys
from nose.tools import assert_almost_equals, eq_
from unitbench import TimeSet, Benchmark, BenchResult, Reporter, CsvReporter
from unittest import TestCase
if sys.version_info < (3, 0):
from StringIO import StringIO
else:
from io import StringIO
class NullReporter(Reporter):
pass
class OneRun(Benchmark):
def warmup(self):
return 0
def repeats(self):
return 1
class TestBenchResult(TestCase):
def test_stats(self):
times = []
times.append(TimeSet(3, 1, 0))
times.append(TimeSet(4, 2, 0))
times.append(TimeSet(4, 4, 0))
times.append(TimeSet(5, 5, 0))
times.append(TimeSet(6, 7, 0))
times.append(TimeSet(8, 11, 0))
results = BenchResult("bench_sample1", 10, times)
eq_(results.name, "bench_sample1")
assert results.value == "10"
assert results.wall_min == 3
assert results.wall_max == 8
assert results.wall_mean == 5
assert_almost_equals(results.wall_variance, 2.67, places=2)
assert_almost_equals(results.wall_stddev, 1.63, places=2)
assert results.user_min == 1
assert results.user_max == 11
assert results.user_mean == 5
assert results.user_variance == 11.0
assert_almost_equals(results.user_stddev, 3.32, places=2)
class TestBenchmark(TestCase):
def test_warmup(self):
class sample(Benchmark):
def __init__(self):
self.count = 0
self.count2 = 0
def warmup(self):
return 4
def repeats(self):
return 0
def bench_count(self):
self.count += 1
def bench_count2(self, input):
self.count2 += 1
bm = sample()
bm.run(NullReporter())
assert bm.count == 4
assert bm.count2 == 4
def test_teardown(self):
""" teardown should be called regardless of errors
"""
class sample(Benchmark):
def __init__(self):
self.setup_count = 0
def setup(self):
self.setup_count += 1
if self.setup_count > 1:
raise ValueError
def teardown(self):
self.setup_count -= 1
if self.setup_count < 0:
raise ValueError
def bench_exception(self, input):
1/0
def bench_works(self):
pass
bm = sample()
self.assertRaises(ZeroDivisionError, bm.run)
assert bm.setup_count == 0
def test_input(self):
class SampleBase(OneRun):
def __init__(self):
self.passed_in = []
def bench_sample(self, input):
self.passed_in.append(input)
class InputGen(SampleBase):
def input(self):
i = 10
while i < 1000:
yield i
i *= 10
class InputList(SampleBase):
def input(self):
return [10, 100, 1000, 20]
bm = InputGen()
bm.run(NullReporter())
assert bm.passed_in == [10, 100]
bm = InputList()
bm.run(NullReporter())
assert bm.passed_in == [10, 100, 1000, 20]
def test_param_count(self):
class sample(OneRun):
def bench_no_params(self):
self.no_param = True
def bench_one_param(self, input):
self.one_param = True
bm = sample()
bm.run(NullReporter())
assert bm.no_param
assert bm.one_param
def test_findbenchmarks(self):
class sample(Benchmark):
def benchSample1(self, input):
pass
def bench_Sample2(self, input):
pass
def sampleBench3(self, input):
pass
def bench_sample4(self):
pass
bms = sample()._find_benchmarks()
assert "benchSample1" in bms
assert "bench_Sample2" in bms
assert not "sampleBench3" in bms
assert "bench_sample4" in bms
def test_function_name_to_title(self):
bm = OneRun()
eq_(bm._function_name_to_title("bench_sample1_sample2"), "Sample1 Sample2")
eq_(bm._function_name_to_title("benchSample1Sample2"), "Sample1 Sample2")
eq_(bm._function_name_to_title("sample1_sample2"), "Sample1 Sample2")
eq_(bm._function_name_to_title("Sample1Sample2"), "Sample1 Sample2")
eq_(bm._function_name_to_title("_sample1_sample2_"), "Sample1 Sample2")
eq_(bm._function_name_to_title("XMLBenchmark"), "Xml Benchmark")
class TestCsvReporter(TestCase):
def test_write_titles(self):
class sample(OneRun):
def warmup(self):
return 0
def repeats(self):
return 0
def bench_sample1(self):
self.no_param = True
def bench_sample2(self, input):
self.one_param = True
bm = sample()
stream = StringIO()
bm.run(CsvReporter(stream))
output = stream.getvalue()
stream.close()
eq_("Values,Sample1,Sample2\n", output) | 0.432782 | 0.36557 |
import os, sys, re
from typing import List
class Node:
def __init__(self, pycore_dir: str, node_dir: str) -> None:
self.name = node_dir.split(".")[0]
self.path = "/tmp/{}/{}".format(pycore_dir, node_dir)
self.protocols = self._get_protocols()
def get_log_path(self, protocol: str) -> str:
return "{}/{}.log".format(self.path, protocol)
def get_log(self, protocol: str) -> str:
s = ""
with open(self.get_log_path(protocol)) as f:
s = f.read()
return s
def __str__(self):
s = ["{}: ".format(self.name)]
first = True
for protocol in self.protocols:
if first:
first = False
s.append("{}".format(protocol))
else:
s.append(", {}".format(protocol))
return "".join(s)
def _get_protocols(self) -> List[str]:
## Match all files/directories with '.log' suffix
files = os.listdir(self.path)
r = re.compile(r".*\.log")
node_logs = list(filter(r.match, files))
node_logs.sort()
## Strip '.log' file suffix
protocols = [s[:-4] for s in node_logs]
## Remove Quagga 'var' that got accidentally added due
## to CORE virtual filesystem formatting (var/log -> var.log)
return list(filter(lambda x: x != "var", protocols))
def get_node_dirs() -> List[Node]:
dirs = os.listdir("/tmp")
pycore_dir = None
for dir in dirs:
## Example: pycore.32777
if dir[:7] == "pycore.":
pycore_dir = dir
break
if pycore_dir is None:
print("pycore directory not found: Have you started running CORE?")
quit()
## Filter sub-directories to get node config directories: n1.conf, n2.conf, ...
dirs = os.listdir("/tmp/{}".format(pycore_dir))
r = re.compile(r"n\d+\.conf")
node_dirs = list(filter(r.match, dirs))
node_dirs.sort()
if len(node_dirs) == 0:
print("No nodes found: Have you started running CORE?")
quit()
return [Node(pycore_dir, node_dir) for node_dir in node_dirs]
def print_available_nodes(nodes: List[Node]) -> None:
print("Available nodes and protocols:")
for node in nodes:
print(node)
if __name__ == "__main__":
nodes: List[Node] = get_node_dirs()
if len(sys.argv) != 3:
print_available_nodes(nodes)
quit()
found_node = False
for node in nodes:
if sys.argv[1] == node.name:
try:
print(node.get_log(sys.argv[2]))
found_node = True
except FileNotFoundError:
print(
"Node '{}' has no log file for '{}': is it running the correct protocol?"
.format(node.name, sys.argv[2]))
if not found_node:
print()
print_available_nodes(nodes) | tools/logs.py | import os, sys, re
from typing import List
class Node:
def __init__(self, pycore_dir: str, node_dir: str) -> None:
self.name = node_dir.split(".")[0]
self.path = "/tmp/{}/{}".format(pycore_dir, node_dir)
self.protocols = self._get_protocols()
def get_log_path(self, protocol: str) -> str:
return "{}/{}.log".format(self.path, protocol)
def get_log(self, protocol: str) -> str:
s = ""
with open(self.get_log_path(protocol)) as f:
s = f.read()
return s
def __str__(self):
s = ["{}: ".format(self.name)]
first = True
for protocol in self.protocols:
if first:
first = False
s.append("{}".format(protocol))
else:
s.append(", {}".format(protocol))
return "".join(s)
def _get_protocols(self) -> List[str]:
## Match all files/directories with '.log' suffix
files = os.listdir(self.path)
r = re.compile(r".*\.log")
node_logs = list(filter(r.match, files))
node_logs.sort()
## Strip '.log' file suffix
protocols = [s[:-4] for s in node_logs]
## Remove Quagga 'var' that got accidentally added due
## to CORE virtual filesystem formatting (var/log -> var.log)
return list(filter(lambda x: x != "var", protocols))
def get_node_dirs() -> List[Node]:
dirs = os.listdir("/tmp")
pycore_dir = None
for dir in dirs:
## Example: pycore.32777
if dir[:7] == "pycore.":
pycore_dir = dir
break
if pycore_dir is None:
print("pycore directory not found: Have you started running CORE?")
quit()
## Filter sub-directories to get node config directories: n1.conf, n2.conf, ...
dirs = os.listdir("/tmp/{}".format(pycore_dir))
r = re.compile(r"n\d+\.conf")
node_dirs = list(filter(r.match, dirs))
node_dirs.sort()
if len(node_dirs) == 0:
print("No nodes found: Have you started running CORE?")
quit()
return [Node(pycore_dir, node_dir) for node_dir in node_dirs]
def print_available_nodes(nodes: List[Node]) -> None:
print("Available nodes and protocols:")
for node in nodes:
print(node)
if __name__ == "__main__":
nodes: List[Node] = get_node_dirs()
if len(sys.argv) != 3:
print_available_nodes(nodes)
quit()
found_node = False
for node in nodes:
if sys.argv[1] == node.name:
try:
print(node.get_log(sys.argv[2]))
found_node = True
except FileNotFoundError:
print(
"Node '{}' has no log file for '{}': is it running the correct protocol?"
.format(node.name, sys.argv[2]))
if not found_node:
print()
print_available_nodes(nodes) | 0.330579 | 0.136292 |
"""Tests for ranking_policy."""
from absl.testing import parameterized
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.networks import global_and_arm_feature_network as arm_net
from tf_agents.bandits.policies import ranking_policy
from tf_agents.specs import bandit_spec_utils
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import test_utils
class RankingPolicyTest(test_utils.TestCase, parameterized.TestCase):
@parameterized.parameters(dict(batch_size=1, num_items=20, num_slots=5),
dict(batch_size=3, num_items=15, num_slots=15),
dict(batch_size=30, num_items=115, num_slots=100))
def testPolicy(self, batch_size, num_items, num_slots):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, 5, num_items)
time_step_spec = ts.time_step_spec(obs_spec)
network = arm_net.create_feed_forward_common_tower_network(
obs_spec, [3], [4], [5])
policy = ranking_policy.PenalizeCosineDistanceRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network)
observation = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=[batch_size], minimum=-1, maximum=1)
time_spec = ts.restart(observation, batch_size=batch_size)
action_step = policy.action(time_spec)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(action_step.action.shape, [batch_size, num_slots])
def testTemperature(self):
if not tf.executing_eagerly():
self.skipTest('This test is only run in eager mode.')
batch_size = 1
num_items = 20
num_slots = 4
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, 5, num_items)
time_step_spec = ts.time_step_spec(obs_spec)
network = arm_net.create_feed_forward_common_tower_network(
obs_spec, [3], [4], [5])
low_temp_policy = ranking_policy.NoPenaltyRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network,
logits_temperature=0.001)
high_temp_policy = ranking_policy.NoPenaltyRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network,
logits_temperature=1000.)
observation = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=[batch_size], minimum=-1, maximum=1)
time_step = ts.restart(observation, batch_size=batch_size)
low_temp_first_items = tf.stack(
[low_temp_policy.action(time_step).action[0][0] for _ in range(30)])
num_low_temp_items = tf.shape(tf.unique(low_temp_first_items)[0])[0]
high_temp_first_items = tf.stack(
[high_temp_policy.action(time_step).action[0][0] for _ in range(30)])
num_high_temp_items = tf.shape(tf.unique(high_temp_first_items)[0])[0]
self.evaluate(tf.compat.v1.global_variables_initializer())
# The high temperature policy is more random, so when called repeatedly, it
# chooses more diverse items for the first slot. Hence, the number of unique
# elements will be more.
self.assertLess(num_low_temp_items, num_high_temp_items)
@parameterized.parameters(dict(batch_size=1, num_items=20, num_slots=5),
dict(batch_size=3, num_items=15, num_slots=15))
def testNumActionsPolicy(self, batch_size, num_items, num_slots):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7,
5,
num_items,
add_num_actions_feature=True)
time_step_spec = ts.time_step_spec(obs_spec)
network = arm_net.create_feed_forward_common_tower_network(
obs_spec, [3], [4], [5])
policy = ranking_policy.PenalizeCosineDistanceRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network,
penalty_mixture_coefficient=0.3)
observation = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=[batch_size])
time_spec = ts.restart(observation, batch_size=batch_size)
action_step = policy.action(time_spec)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(action_step.action.shape, [batch_size, num_slots])
@parameterized.parameters(dict(batch_size=1, num_items=20, num_slots=5),
dict(batch_size=3, num_items=15, num_slots=15))
def testDescendingScorePolicy(self, batch_size, num_items, num_slots):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, 5, num_items)
time_step_spec = ts.time_step_spec(obs_spec)
network = arm_net.create_feed_forward_common_tower_network(
obs_spec, [3], [4], [5])
policy = ranking_policy.DescendingScoreRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network)
observation = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=[batch_size], minimum=-1, maximum=1)
time_spec = ts.restart(observation, batch_size=batch_size)
action_step = policy.action(time_spec)
self.assertAllEqual(action_step.action.shape, [batch_size, num_slots])
# Check that the policy is deterministic.
action_step_again = policy.action(time_spec)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(action_step.action, action_step_again.action)
if __name__ == '__main__':
tf.test.main() | tf_agents/bandits/policies/ranking_policy_test.py |
"""Tests for ranking_policy."""
from absl.testing import parameterized
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.networks import global_and_arm_feature_network as arm_net
from tf_agents.bandits.policies import ranking_policy
from tf_agents.specs import bandit_spec_utils
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import test_utils
class RankingPolicyTest(test_utils.TestCase, parameterized.TestCase):
@parameterized.parameters(dict(batch_size=1, num_items=20, num_slots=5),
dict(batch_size=3, num_items=15, num_slots=15),
dict(batch_size=30, num_items=115, num_slots=100))
def testPolicy(self, batch_size, num_items, num_slots):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, 5, num_items)
time_step_spec = ts.time_step_spec(obs_spec)
network = arm_net.create_feed_forward_common_tower_network(
obs_spec, [3], [4], [5])
policy = ranking_policy.PenalizeCosineDistanceRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network)
observation = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=[batch_size], minimum=-1, maximum=1)
time_spec = ts.restart(observation, batch_size=batch_size)
action_step = policy.action(time_spec)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(action_step.action.shape, [batch_size, num_slots])
def testTemperature(self):
if not tf.executing_eagerly():
self.skipTest('This test is only run in eager mode.')
batch_size = 1
num_items = 20
num_slots = 4
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, 5, num_items)
time_step_spec = ts.time_step_spec(obs_spec)
network = arm_net.create_feed_forward_common_tower_network(
obs_spec, [3], [4], [5])
low_temp_policy = ranking_policy.NoPenaltyRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network,
logits_temperature=0.001)
high_temp_policy = ranking_policy.NoPenaltyRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network,
logits_temperature=1000.)
observation = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=[batch_size], minimum=-1, maximum=1)
time_step = ts.restart(observation, batch_size=batch_size)
low_temp_first_items = tf.stack(
[low_temp_policy.action(time_step).action[0][0] for _ in range(30)])
num_low_temp_items = tf.shape(tf.unique(low_temp_first_items)[0])[0]
high_temp_first_items = tf.stack(
[high_temp_policy.action(time_step).action[0][0] for _ in range(30)])
num_high_temp_items = tf.shape(tf.unique(high_temp_first_items)[0])[0]
self.evaluate(tf.compat.v1.global_variables_initializer())
# The high temperature policy is more random, so when called repeatedly, it
# chooses more diverse items for the first slot. Hence, the number of unique
# elements will be more.
self.assertLess(num_low_temp_items, num_high_temp_items)
@parameterized.parameters(dict(batch_size=1, num_items=20, num_slots=5),
dict(batch_size=3, num_items=15, num_slots=15))
def testNumActionsPolicy(self, batch_size, num_items, num_slots):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7,
5,
num_items,
add_num_actions_feature=True)
time_step_spec = ts.time_step_spec(obs_spec)
network = arm_net.create_feed_forward_common_tower_network(
obs_spec, [3], [4], [5])
policy = ranking_policy.PenalizeCosineDistanceRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network,
penalty_mixture_coefficient=0.3)
observation = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=[batch_size])
time_spec = ts.restart(observation, batch_size=batch_size)
action_step = policy.action(time_spec)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(action_step.action.shape, [batch_size, num_slots])
@parameterized.parameters(dict(batch_size=1, num_items=20, num_slots=5),
dict(batch_size=3, num_items=15, num_slots=15))
def testDescendingScorePolicy(self, batch_size, num_items, num_slots):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, 5, num_items)
time_step_spec = ts.time_step_spec(obs_spec)
network = arm_net.create_feed_forward_common_tower_network(
obs_spec, [3], [4], [5])
policy = ranking_policy.DescendingScoreRankingPolicy(
num_items=num_items,
num_slots=num_slots,
time_step_spec=time_step_spec,
network=network)
observation = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=[batch_size], minimum=-1, maximum=1)
time_spec = ts.restart(observation, batch_size=batch_size)
action_step = policy.action(time_spec)
self.assertAllEqual(action_step.action.shape, [batch_size, num_slots])
# Check that the policy is deterministic.
action_step_again = policy.action(time_spec)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(action_step.action, action_step_again.action)
if __name__ == '__main__':
tf.test.main() | 0.806396 | 0.569553 |
from dotenv import load_dotenv
from expertai.nlapi.cloud.client import ExpertAiClient
import os
import sys
import re
import tweepy
import json
# Load and set environment variables
load_dotenv()
# Load NLP API
client = ExpertAiClient()
# Authenticate and fetch tweets
auth = tweepy.OAuthHandler(os.getenv("consumer_key"),
os.getenv("consumer_secret"))
auth.set_access_token(os.getenv("access_token"),
os.getenv("access_token_secret"))
api = tweepy.API(auth)
#print("Username from extension:", str(sys.argv[1]))
tweets = api.user_timeline(screen_name=str(sys.argv[1]),
count=50,
include_rts=True,
tweet_mode='extended'
)
oldest_id = tweets[-1].id
all_tweets = []
all_tweets.extend(tweets)
#print('Number of tweets downloaded till now {}'.format(len(all_tweets)))
# Clean text
def removeEmoji(text):
regrex_pattern = re.compile(pattern="["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def removeUsername(text):
regrex_pattern = re.compile(pattern="@[A-Za-z0-9\w]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def removeURL(text):
regrex_pattern = re.compile(
pattern="(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,})", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def removeLine(text):
regrex_pattern = re.compile(
pattern="\n", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def removeSpecialChar(text):
regrex_pattern = re.compile(
pattern="[^0-9a-zA-Z ]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
# print(all_tweets[0])
# Individual sentiment of tweets
individual_sentiment = []
def sentiment(tweets, all_tweets):
for i in all_tweets:
texttweets = [i.full_text]
text = [''.join(ele) for ele in texttweets]
text = " ".join(str(x) for x in text)
text = removeEmoji(text)
text = removeUsername(text)
text = removeURL(text)
text = removeLine(text)
text = removeSpecialChar(text)
language = 'en'
document = client.specific_resource_analysis(
body={"document": {"text": text}},
params={'language': language, 'resource': 'sentiment'})
individual_sentiment.append(document.sentiment.overall)
return individual_sentiment
rate_list = sentiment(tweets, all_tweets)
# No. of neg, pos, neutral tweets
neg_count = len(list(filter(lambda x: (x < 0), rate_list)))
pos_count = len(list(filter(lambda x: (x > 0), rate_list)))
neutral_count = len(list(filter(lambda x: (x == 0), rate_list)))
# Overall sentiment of the tweets
texttweets = [[tweet.full_text] for idx, tweet in enumerate(all_tweets)]
text = [''.join(ele) for ele in texttweets]
text = " ".join(str(x) for x in text)
text = removeEmoji(text)
text = removeUsername(text)
text = removeURL(text)
text = removeLine(text)
text = removeSpecialChar(text)
# print(text)
document = client.specific_resource_analysis(
body={"document": {"text": text}},
params={'language': 'en', 'resource': 'sentiment'})
# Get user details
user = api.get_user(sys.argv[1])
output = {'username': user.name, 'joined': user.created_at.ctime(), 'followers': user.followers_count, 'following': user.friends_count,
'positive': pos_count, 'negative': neg_count, 'neutral': neutral_count, 'overall': document.sentiment.overall}
print(json.dumps(output)) | get_rating.py | from dotenv import load_dotenv
from expertai.nlapi.cloud.client import ExpertAiClient
import os
import sys
import re
import tweepy
import json
# Load and set environment variables
load_dotenv()
# Load NLP API
client = ExpertAiClient()
# Authenticate and fetch tweets
auth = tweepy.OAuthHandler(os.getenv("consumer_key"),
os.getenv("consumer_secret"))
auth.set_access_token(os.getenv("access_token"),
os.getenv("access_token_secret"))
api = tweepy.API(auth)
#print("Username from extension:", str(sys.argv[1]))
tweets = api.user_timeline(screen_name=str(sys.argv[1]),
count=50,
include_rts=True,
tweet_mode='extended'
)
oldest_id = tweets[-1].id
all_tweets = []
all_tweets.extend(tweets)
#print('Number of tweets downloaded till now {}'.format(len(all_tweets)))
# Clean text
def removeEmoji(text):
regrex_pattern = re.compile(pattern="["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def removeUsername(text):
regrex_pattern = re.compile(pattern="@[A-Za-z0-9\w]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def removeURL(text):
regrex_pattern = re.compile(
pattern="(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,})", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def removeLine(text):
regrex_pattern = re.compile(
pattern="\n", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def removeSpecialChar(text):
regrex_pattern = re.compile(
pattern="[^0-9a-zA-Z ]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
# print(all_tweets[0])
# Individual sentiment of tweets
individual_sentiment = []
def sentiment(tweets, all_tweets):
for i in all_tweets:
texttweets = [i.full_text]
text = [''.join(ele) for ele in texttweets]
text = " ".join(str(x) for x in text)
text = removeEmoji(text)
text = removeUsername(text)
text = removeURL(text)
text = removeLine(text)
text = removeSpecialChar(text)
language = 'en'
document = client.specific_resource_analysis(
body={"document": {"text": text}},
params={'language': language, 'resource': 'sentiment'})
individual_sentiment.append(document.sentiment.overall)
return individual_sentiment
rate_list = sentiment(tweets, all_tweets)
# No. of neg, pos, neutral tweets
neg_count = len(list(filter(lambda x: (x < 0), rate_list)))
pos_count = len(list(filter(lambda x: (x > 0), rate_list)))
neutral_count = len(list(filter(lambda x: (x == 0), rate_list)))
# Overall sentiment of the tweets
texttweets = [[tweet.full_text] for idx, tweet in enumerate(all_tweets)]
text = [''.join(ele) for ele in texttweets]
text = " ".join(str(x) for x in text)
text = removeEmoji(text)
text = removeUsername(text)
text = removeURL(text)
text = removeLine(text)
text = removeSpecialChar(text)
# print(text)
document = client.specific_resource_analysis(
body={"document": {"text": text}},
params={'language': 'en', 'resource': 'sentiment'})
# Get user details
user = api.get_user(sys.argv[1])
output = {'username': user.name, 'joined': user.created_at.ctime(), 'followers': user.followers_count, 'following': user.friends_count,
'positive': pos_count, 'negative': neg_count, 'neutral': neutral_count, 'overall': document.sentiment.overall}
print(json.dumps(output)) | 0.199698 | 0.106041 |
import os
import PySimpleGUIQt as sg
from multiprocessing import Process
from toolbox_creator.globe_icon import globe_icon
from toolbox_creator.function_window import create_function_window
from toolbox_creator.function_validation import validate_tool_list
from toolbox_creator.utils import get_list_of_keys
global window_opened
window_opened = False
def tool_selector_layout(functions, scalar=1.0, top_menu=True):
"""Creates the layout for the tool selector."""
description = "Select a function to run."
menu_def = [
["&File", ["E&xit"]],
[
"&Options",
["Paths", "Defaults"],
],
[
"&Help",
["Documentation", "About"],
],
]
col1 = sg.Column(
[
[
sg.Listbox(
[str(i) for i in functions],
key="-FUNC-LIST-",
size_px=(round(300 * scalar), None),
pad=((0, 0), (0, 0)),
enable_events=True,
default_values=[functions[0]],
)
]
],
size=(round(300 * scalar), None),
pad=((0, 0), (0, 0)),
)
col2 = sg.Column(
[
[
sg.Multiline(
description,
size_px=(None, None),
key="-DESC-",
disabled=True,
background_color="#f1f1f1",
pad=((0, 0), (0, 0)),
)
],
[
sg.Button(
"Open Function",
key="-BUTTON1-",
size_px=(round(500 * scalar), 60),
pad=((0, 0), (10, 0)),
bind_return_key=True,
border_width=0,
)
],
],
size=(round(500 * scalar), None),
element_justification="left",
pad=((0, 0), (0, 0)),
)
base_layout = [
sg.Column(
[[col1, col2]],
size=(round(920 * scalar), None),
pad=((0, 0), (0, 0)),
scrollable=True,
element_justification="left",
)
]
if top_menu:
return [
[
sg.Menu(
menu_def,
tearoff=False,
)
],
base_layout,
]
return [base_layout]
def select_function(function_name, window, tools):
"""Prints the description of the selected function."""
description = tools[function_name]["description"]
window["-DESC-"].update(value=description)
def create_gui(
tools_list,
name="toolbox",
theme="Reddit",
create_console=False,
icon=False,
auto_scale=True,
scalar=0.6,
top_menu=False,
run_subprocess=False,
):
"""Creates a GUI for the toolbox from a list of tools."""
global window_opened
if window_opened:
return
if not validate_tool_list(tools_list):
print("Unable to create GUI due to invalid setup list.")
return
if auto_scale:
os.environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
sg.theme(theme)
KEY_UP_QT = "special 16777235"
KEY_DOWN_QT = "special 16777237"
KEY_ENTER_QT = "special 16777220"
sg.set_options(
element_padding=(0, 0),
margins=(0, 0),
font=("Helvetica", 10),
border_width=0,
)
if icon is False:
icon = globe_icon
available_functions = get_list_of_keys(tools_list)
window = sg.Window(
name,
tool_selector_layout(
available_functions,
scalar=scalar,
top_menu=top_menu,
),
resizable=True,
auto_size_buttons=True,
size=(round(800 * scalar), round(600 * scalar)),
finalize=True,
icon=globe_icon,
element_justification="center",
return_keyboard_events=True,
border_depth=0,
)
window_opened = True
select_function(available_functions[0], window, tools_list)
current_selection = 0
max_selection = len(available_functions) - 1
list_not_clicked = True
ignore_list_update = False
open_windows = []
while True:
event, values = window.read()
if event == "Exit" or event == sg.WIN_CLOSED or event is None:
break
elif (
event == "-BUTTON1-"
or event == "-FUNC-LIST-DOUBLE-CLICK-"
or event == KEY_ENTER_QT
):
if (
isinstance(values["-FUNC-LIST-"], list)
and len(values["-FUNC-LIST-"]) != 0
):
function_name = values["-FUNC-LIST-"][0]
if run_subprocess:
p = Process(
target=create_function_window,
args=(
function_name,
tools_list,
create_console,
icon,
theme,
scalar,
),
)
p.start()
open_windows.append(p)
else:
create_function_window(
function_name,
tools_list,
create_console,
icon,
theme,
scalar,
)
elif event == "-FUNC-LIST-":
if ignore_list_update:
ignore_list_update = False
continue
list_not_clicked = False
current_selection = available_functions.index(values[event][0])
select_function(available_functions[current_selection], window, tools_list)
elif event == KEY_DOWN_QT and list_not_clicked:
if current_selection < max_selection:
ignore_list_update = True
current_selection += 1
select_function(
available_functions[current_selection], window, tools_list
)
window["-FUNC-LIST-"].update(set_to_index=current_selection)
elif event == KEY_UP_QT and list_not_clicked:
if current_selection > 0:
ignore_list_update = True
current_selection -= 1
select_function(
available_functions[current_selection], window, tools_list
)
window["-FUNC-LIST-"].update(set_to_index=current_selection)
window.close()
for p in open_windows:
try:
p.terminate()
except Exception:
pass | toolbox_creator/gui.py | import os
import PySimpleGUIQt as sg
from multiprocessing import Process
from toolbox_creator.globe_icon import globe_icon
from toolbox_creator.function_window import create_function_window
from toolbox_creator.function_validation import validate_tool_list
from toolbox_creator.utils import get_list_of_keys
global window_opened
window_opened = False
def tool_selector_layout(functions, scalar=1.0, top_menu=True):
"""Creates the layout for the tool selector."""
description = "Select a function to run."
menu_def = [
["&File", ["E&xit"]],
[
"&Options",
["Paths", "Defaults"],
],
[
"&Help",
["Documentation", "About"],
],
]
col1 = sg.Column(
[
[
sg.Listbox(
[str(i) for i in functions],
key="-FUNC-LIST-",
size_px=(round(300 * scalar), None),
pad=((0, 0), (0, 0)),
enable_events=True,
default_values=[functions[0]],
)
]
],
size=(round(300 * scalar), None),
pad=((0, 0), (0, 0)),
)
col2 = sg.Column(
[
[
sg.Multiline(
description,
size_px=(None, None),
key="-DESC-",
disabled=True,
background_color="#f1f1f1",
pad=((0, 0), (0, 0)),
)
],
[
sg.Button(
"Open Function",
key="-BUTTON1-",
size_px=(round(500 * scalar), 60),
pad=((0, 0), (10, 0)),
bind_return_key=True,
border_width=0,
)
],
],
size=(round(500 * scalar), None),
element_justification="left",
pad=((0, 0), (0, 0)),
)
base_layout = [
sg.Column(
[[col1, col2]],
size=(round(920 * scalar), None),
pad=((0, 0), (0, 0)),
scrollable=True,
element_justification="left",
)
]
if top_menu:
return [
[
sg.Menu(
menu_def,
tearoff=False,
)
],
base_layout,
]
return [base_layout]
def select_function(function_name, window, tools):
"""Prints the description of the selected function."""
description = tools[function_name]["description"]
window["-DESC-"].update(value=description)
def create_gui(
tools_list,
name="toolbox",
theme="Reddit",
create_console=False,
icon=False,
auto_scale=True,
scalar=0.6,
top_menu=False,
run_subprocess=False,
):
"""Creates a GUI for the toolbox from a list of tools."""
global window_opened
if window_opened:
return
if not validate_tool_list(tools_list):
print("Unable to create GUI due to invalid setup list.")
return
if auto_scale:
os.environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
sg.theme(theme)
KEY_UP_QT = "special 16777235"
KEY_DOWN_QT = "special 16777237"
KEY_ENTER_QT = "special 16777220"
sg.set_options(
element_padding=(0, 0),
margins=(0, 0),
font=("Helvetica", 10),
border_width=0,
)
if icon is False:
icon = globe_icon
available_functions = get_list_of_keys(tools_list)
window = sg.Window(
name,
tool_selector_layout(
available_functions,
scalar=scalar,
top_menu=top_menu,
),
resizable=True,
auto_size_buttons=True,
size=(round(800 * scalar), round(600 * scalar)),
finalize=True,
icon=globe_icon,
element_justification="center",
return_keyboard_events=True,
border_depth=0,
)
window_opened = True
select_function(available_functions[0], window, tools_list)
current_selection = 0
max_selection = len(available_functions) - 1
list_not_clicked = True
ignore_list_update = False
open_windows = []
while True:
event, values = window.read()
if event == "Exit" or event == sg.WIN_CLOSED or event is None:
break
elif (
event == "-BUTTON1-"
or event == "-FUNC-LIST-DOUBLE-CLICK-"
or event == KEY_ENTER_QT
):
if (
isinstance(values["-FUNC-LIST-"], list)
and len(values["-FUNC-LIST-"]) != 0
):
function_name = values["-FUNC-LIST-"][0]
if run_subprocess:
p = Process(
target=create_function_window,
args=(
function_name,
tools_list,
create_console,
icon,
theme,
scalar,
),
)
p.start()
open_windows.append(p)
else:
create_function_window(
function_name,
tools_list,
create_console,
icon,
theme,
scalar,
)
elif event == "-FUNC-LIST-":
if ignore_list_update:
ignore_list_update = False
continue
list_not_clicked = False
current_selection = available_functions.index(values[event][0])
select_function(available_functions[current_selection], window, tools_list)
elif event == KEY_DOWN_QT and list_not_clicked:
if current_selection < max_selection:
ignore_list_update = True
current_selection += 1
select_function(
available_functions[current_selection], window, tools_list
)
window["-FUNC-LIST-"].update(set_to_index=current_selection)
elif event == KEY_UP_QT and list_not_clicked:
if current_selection > 0:
ignore_list_update = True
current_selection -= 1
select_function(
available_functions[current_selection], window, tools_list
)
window["-FUNC-LIST-"].update(set_to_index=current_selection)
window.close()
for p in open_windows:
try:
p.terminate()
except Exception:
pass | 0.53777 | 0.174551 |
import configparser
import os
import redis
from ansible_collections.nordsec.team_password_manager.plugins.module_utils.manager import (
TeamPasswordManager,
CachingTeamPasswordManager,
BaseTeamPasswordManager
)
import tpm
ENV_VARIABLE_TPM_CONFIGURATION = "TPM_CONFIGURATION"
ENV_VARIABLE_TPM_CONFIGURATION_FILE_PATH = "TPM_CONFIGURATION_FILE_PATH"
DEFAULT_TPM_CONFIGURATION_FILE_PATH = "~/.tpm_password.ini"
CONFIG_CACHE_OPTION_ENCRYPTION_KEY = "cache_encryption_key"
CONFIG_CACHE_OPTION_CACHE_TTL = "cache_ttl"
def _parse_config_from_file(file_path: str) -> configparser.ConfigParser:
if os.path.isfile(file_path) is False:
raise Exception('Could not find configration file at %s' % (file_path))
config = configparser.ConfigParser()
config.read(file_path)
return config
def _parse_config(config_data: str):
config = configparser.ConfigParser()
config.read_string(config_data)
return config
def _create_connection(
config: configparser.ConfigParser,
section: str
) -> tpm.TpmApiv4:
url = config.get(section, 'url')
if config.getboolean(section, 'hmac'):
return tpm.TpmApiv4(
url,
private_key=config.get(section, 'private_key'),
public_key=config.get(section, 'public_key'),
)
return tpm.TpmApiv4(
url,
username=config.get(section, 'username'),
password=config.get(section, 'password'),
)
def _create_config_from_file() -> configparser.ConfigParser:
file_path = os.environ.get(ENV_VARIABLE_TPM_CONFIGURATION_FILE_PATH)
if file_path is None:
file_path = DEFAULT_TPM_CONFIGURATION_FILE_PATH
return _parse_config_from_file(file_path)
def _create_config_from_env_variable() -> configparser.ConfigParser:
data = os.environ.get(ENV_VARIABLE_TPM_CONFIGURATION)
if data is None:
raise Exception(
'Could not load configration because %s variable is empty or does not exist' %
(ENV_VARIABLE_TPM_CONFIGURATION)
)
return _parse_config(data)
class TpmApiFactory():
def create(self, configuration_section: str) -> tpm.TpmApiv4:
if os.environ.get(ENV_VARIABLE_TPM_CONFIGURATION, ""):
return self.create_from_env_variable(configuration_section)
return self.create_from_file(configuration_section)
def create_from_file(self, configuration_section: str) -> tpm.TpmApiv4:
return _create_connection(_create_config_from_file(), configuration_section)
def create_from_env_variable(self, configuration_section: str) -> tpm.TpmApiv4:
return _create_connection(_create_config_from_env_variable(), configuration_section)
class TeamPasswordManagerFactory():
def _get_config(self) -> configparser.ConfigParser:
if os.environ.get(ENV_VARIABLE_TPM_CONFIGURATION, ""):
return _create_config_from_env_variable()
return _create_config_from_file()
def create(
self,
configuration_section: str,
update_cache: bool = False
) -> BaseTeamPasswordManager:
tpm_api = (TpmApiFactory()).create(configuration_section)
redis_connection = redis.StrictRedis()
password_manager = TeamPasswordManager(tpm_api)
config = self._get_config()
if config.has_option(configuration_section, CONFIG_CACHE_OPTION_ENCRYPTION_KEY):
cache_ttl = config.get(
section=configuration_section,
option=CONFIG_CACHE_OPTION_CACHE_TTL,
fallback=None
)
encryption_key = config.get(
section=configuration_section,
option=CONFIG_CACHE_OPTION_ENCRYPTION_KEY,
fallback=None
)
if cache_ttl is not None:
cache_ttl = int(cache_ttl)
try:
redis_connection.ping()
password_manager = CachingTeamPasswordManager(
password_manager,
redis_connection,
encryption_key,
cache_ttl,
update_cache
)
except (
redis.exceptions.ConnectionError,
redis.exceptions.BusyLoadingError
):
pass
return password_manager | plugins/module_utils/factory.py | import configparser
import os
import redis
from ansible_collections.nordsec.team_password_manager.plugins.module_utils.manager import (
TeamPasswordManager,
CachingTeamPasswordManager,
BaseTeamPasswordManager
)
import tpm
ENV_VARIABLE_TPM_CONFIGURATION = "TPM_CONFIGURATION"
ENV_VARIABLE_TPM_CONFIGURATION_FILE_PATH = "TPM_CONFIGURATION_FILE_PATH"
DEFAULT_TPM_CONFIGURATION_FILE_PATH = "~/.tpm_password.ini"
CONFIG_CACHE_OPTION_ENCRYPTION_KEY = "cache_encryption_key"
CONFIG_CACHE_OPTION_CACHE_TTL = "cache_ttl"
def _parse_config_from_file(file_path: str) -> configparser.ConfigParser:
if os.path.isfile(file_path) is False:
raise Exception('Could not find configration file at %s' % (file_path))
config = configparser.ConfigParser()
config.read(file_path)
return config
def _parse_config(config_data: str):
config = configparser.ConfigParser()
config.read_string(config_data)
return config
def _create_connection(
config: configparser.ConfigParser,
section: str
) -> tpm.TpmApiv4:
url = config.get(section, 'url')
if config.getboolean(section, 'hmac'):
return tpm.TpmApiv4(
url,
private_key=config.get(section, 'private_key'),
public_key=config.get(section, 'public_key'),
)
return tpm.TpmApiv4(
url,
username=config.get(section, 'username'),
password=config.get(section, 'password'),
)
def _create_config_from_file() -> configparser.ConfigParser:
file_path = os.environ.get(ENV_VARIABLE_TPM_CONFIGURATION_FILE_PATH)
if file_path is None:
file_path = DEFAULT_TPM_CONFIGURATION_FILE_PATH
return _parse_config_from_file(file_path)
def _create_config_from_env_variable() -> configparser.ConfigParser:
data = os.environ.get(ENV_VARIABLE_TPM_CONFIGURATION)
if data is None:
raise Exception(
'Could not load configration because %s variable is empty or does not exist' %
(ENV_VARIABLE_TPM_CONFIGURATION)
)
return _parse_config(data)
class TpmApiFactory():
def create(self, configuration_section: str) -> tpm.TpmApiv4:
if os.environ.get(ENV_VARIABLE_TPM_CONFIGURATION, ""):
return self.create_from_env_variable(configuration_section)
return self.create_from_file(configuration_section)
def create_from_file(self, configuration_section: str) -> tpm.TpmApiv4:
return _create_connection(_create_config_from_file(), configuration_section)
def create_from_env_variable(self, configuration_section: str) -> tpm.TpmApiv4:
return _create_connection(_create_config_from_env_variable(), configuration_section)
class TeamPasswordManagerFactory():
def _get_config(self) -> configparser.ConfigParser:
if os.environ.get(ENV_VARIABLE_TPM_CONFIGURATION, ""):
return _create_config_from_env_variable()
return _create_config_from_file()
def create(
self,
configuration_section: str,
update_cache: bool = False
) -> BaseTeamPasswordManager:
tpm_api = (TpmApiFactory()).create(configuration_section)
redis_connection = redis.StrictRedis()
password_manager = TeamPasswordManager(tpm_api)
config = self._get_config()
if config.has_option(configuration_section, CONFIG_CACHE_OPTION_ENCRYPTION_KEY):
cache_ttl = config.get(
section=configuration_section,
option=CONFIG_CACHE_OPTION_CACHE_TTL,
fallback=None
)
encryption_key = config.get(
section=configuration_section,
option=CONFIG_CACHE_OPTION_ENCRYPTION_KEY,
fallback=None
)
if cache_ttl is not None:
cache_ttl = int(cache_ttl)
try:
redis_connection.ping()
password_manager = CachingTeamPasswordManager(
password_manager,
redis_connection,
encryption_key,
cache_ttl,
update_cache
)
except (
redis.exceptions.ConnectionError,
redis.exceptions.BusyLoadingError
):
pass
return password_manager | 0.309232 | 0.051942 |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from .models import Museo, Comentario, Museo_añadido, Seleccion
from .parser import parserXML
from django.views.decorators.csrf import csrf_exempt
from django.contrib import auth
from django.contrib.auth.models import User
from django.utils import timezone
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import FormView
from django.http.response import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
# Create your views here.
solo_museos = 0
@csrf_exempt
def auth_login(request):
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
login(request, user)
return HttpResponseRedirect("/")
def logout(request):
logout(request);
return HttpResponseRedirect("/")
@csrf_exempt
def register(request):
username = request.POST['user']
email = request.POST['email']
password = request.POST['password']
user = User.objects.create_user(username, email, password)
user.save()
titulo = "Página de " + str(username)
pagina_personal = Seleccion (propietario=str(username), nombre = titulo)
pagina_personal.save()
return HttpResponseRedirect("/")
@csrf_exempt
def personal (request, propietario):
global solo_museos
if not request.user.is_authenticated():
inicio = "<form action='/login' method='post'>"
inicio += "<label for='username'>Username:</label>"
inicio += "<input type='text' name='username'"
inicio += "<label for='password'>Password:</label>"
inicio += "<input type='password' name='password'>"
inicio += "<input type='submit' value='LOGIN' />"
inicio += "</form>"
registro = "<form action='/register/' method='post'>"
registro += "User: <input type= 'text' name='user'>"
registro += "Email: <input type= 'text' name='email'>"
registro += "Password: <input type= 'password' name='password'>"
registro += "<input type= 'submit' value='enviar'>"
registro += "</form>"
if request.user.is_authenticated():
inicio = "<p>Bienvenido, "
inicio += request.user.username
inicio += "<a href='/logout'> Logout </a></p>"
registro = "<p> Se ha identificado usted como: "
registro += "<h2>" + request.user.username + "</h2>"
registro +="<br> Esperamos que su visita a la página sea satisfactoria"
if request.method == "GET":
seleccion = Seleccion.objects.get (propietario = propietario)
favoritos = seleccion.museos_fav.all()
content_title=seleccion.nombre
content=""
for favorito in favoritos:
if solo_museos==0:
content += "<br><li><a href='" + favorito.museo.url + "'><p>" + str(favorito.museo.nombre) + ":</p></a></li></br>"
content += "<br>Dirección: " + favorito.museo.localizacion +"</br>"
content += "<br><a href=museos/" + favorito.museo.identidad + "><p> Más información</p></a></br>"
content+="<br><p> Añadido: " + str(favorito.añadido) + "</br></p>"
elif solo_museos==1:
if favorito.museo.accesibilidad=='1':
content += "<br><li><a href='" + favorito.museo.url + "'><p>" + str(favorito.museo.nombre) + ":</p></a></li></br>"
content += "<br>Dirección: " + favorito.museo.localizacion +"</br>"
content += "<br><a href=museos/" + favorito.museo.identidad + "><p> Más información</p></a></br>"
content+="<br><p> Añadido: " + str(favorito.añadido) + "</br></p>"
if len(favoritos)!=0:
content += "<li><a href='/" + propietario + "/XML'>"
content += "XML</a></li>"
elif request.method =="POST" and request.POST['opcion']=='1':
seleccion = Seleccion.objects.get (propietario = propietario)
seleccion.nombre = request.POST['texto']
seleccion.save()
favoritos = seleccion.museos_fav.all()
content_title=seleccion.nombre
content=""
for favorito in favoritos:
if solo_museos==0:
content += "<br><li><a href='" + favorito.museo.url + "'><p>" + str(favorito.museo.nombre) + ":</p></a></li></br>"
content += "<br>Dirección: " + favorito.museo.localizacion +"</br>"
content += "<br><a href=museos/" + favorito.museo.identidad + "><p> Más información</p></a></br>"
content+="<br><p> Añadido: " + str(favorito.añadido) + "</br></p>"
elif solo_museos==1:
if favorito.museo.accesibilidad=='1':
content += "<br><li><a href='" + favorito.museo.url + "'><p>" + str(favorito.museo.nombre) + ":</p></a></li></br>"
content += "<br>Dirección: " + favorito.museo.localizacion +"</br>"
content += "<br><a href=museos/" + favorito.museo.identidad + "><p> Más información</p></a></br>"
content+="<br><p> Añadido: " + str(favorito.añadido) + "</br></p>"
if len(favoritos)!=0:
content += "<li><a href='/" + propietario + "/XML'>"
content += "XML</a></li>"
if request.user.is_authenticated() and request.user.username == propietario:
personales = "<form action='/" + propietario + "' method=POST>"
personales += "Cambiar nombre de tu página personal<input type= 'text' name='texto'>"
personales += "<input type= 'hidden' name='opcion' value='1'>"
personales += "<input type= 'submit' value='Cambiar'>"
personales += "</form>"
registro = "<li> Personalizar color fondo (en ocasiones será necesario recargar la página para que se apliquen los cambios)</li>"
registro += "<br><form action='/css_color' method=GET>"
registro += "<select name='Colores de fondo'>"
registro += "<option value=Blanco> Blanco</option>"
registro += "<option value=Crema> Crema</option>"
registro += "<option value=Plata> Plata</option>"
registro += "<input type= 'submit' value='FILTRAR'>"
registro += "</form></br>"
registro += "<li> Cambiar tamaño letra (en ocasiones será necesario recargar la página para que se apliquen los cambios) </li>"
registro += "<br><form action='/css_letra' method=GET>"
registro += "<select name='Tamaño letra cuerpo página'>"
registro += "<option value=Pequeña> Pequeña</option>"
registro += "<option value=Mediana> Mediana</option>"
registro += "<option value=Grande> Grande</option>"
registro += "<input type= 'submit' value='FILTRAR'>"
registro += "</form></br>"
else:
pag_personales = Seleccion.objects.all()
personales = ""
for pag_personal in pag_personales:
personales += "<br><a href='/" + pag_personal.propietario + "'>"
personales += pag_personal.nombre + "</a></br>"
user=str(request.user.username)
personales_title="Páginas personales"
todos_comentarios = Comentario.objects.all()
maximo = len(todos_comentarios) - 5
num=0
comentarios =""
if len(todos_comentarios)!=0:
for comentario in todos_comentarios:
if num >= maximo:
comentarios += "<li>" + comentario.museo.nombre + ":</li>"
comentarios += "<p>    " + comentario.texto + "</p>"
num = num+1
c = Context({'inicio': inicio, 'user': user, 'registro': registro, 'personales_title': personales_title, 'personales':personales, 'content_title': content_title, 'content': content, 'comentarios':comentarios})
template = get_template ('museo_pers.html')
respuesta = template.render(c)
return HttpResponse(respuesta)
@csrf_exempt
def museo_pers(request, id):
if not request.user.is_authenticated():
inicio = "<form action='/login' method='post'>"
inicio += "<label for='username'>Username:</label>"
inicio += "<input type='text' name='username'"
inicio += "<label for='password'>Password:</label>"
inicio += "<input type='password' name='password'>"
inicio += "<input type='submit' value='LOGIN' />"
inicio += "</form>"
registro = "<form action='/register/' method='post'>"
registro += "User: <input type= 'text' name='user'>"
registro += "Email: <input type= 'text' name='email'>"
registro += "Password: <input type= 'password' name='password'>"
registro += "<input type= 'submit' value='enviar'>"
registro += "</form>"
if request.user.is_authenticated():
inicio = "<p>Bienvenido, "
inicio += request.user.username
inicio += "<a href='/logout'> Logout </a></p>"
registro = "<p> Se ha identificado usted como: "
registro += "<h2>" + request.user.username + "</h2>"
registro +="<br> Esperamos que su visita a la página sea satisfactoria"
museo=Museo.objects.get(identidad=id)
content_title = str(museo.nombre)
if request.method=="POST" and request.POST['opcion']=='1':
texto = request.POST['texto']
comentario = Comentario (museo = museo , texto= texto)
comentario.save()
museo.num_comentarios=museo.num_comentarios + 1
museo.save()
if request.method=="POST" and request.POST['opcion']=='2':
museo_añadido = Museo_añadido (museo = museo, añadido = timezone.now())
museo_añadido.save()
seleccion = Seleccion.objects.get (propietario = request.user.username)
favoritos = seleccion.museos_fav.all()
if favoritos.filter(museo=museo_añadido.museo).count() == 0:
seleccion.museos_fav.add(museo_añadido)
seleccion.save()
content = "<br><li>Descripción entidad:</li><p>" + str(museo.descripcion_entidad) + "</p><br>"
content += "<br><li>Transporte:</li><p>" + str(museo.transporte) + "</p><br>"
content += "<br><li>Equipamiento:</li><p>" + str(museo.equipamiento) + "</p><br>"
content += "<br><li>Descripción:</li><p>" + str(museo.descripcion) + "</p><br>"
content += "<br><li>Horario:</li><p>" + str(museo.horario) + "</p><br>"
content += "<br><li>URL del sitio:</li> <a href ='" + museo.url + "'>" + str(museo.url) + "</a><br>"
content += "<br><li>Localización:</li><p>" + str(museo.localizacion) + "</p><br>"
content += "<br><li>Contacto:</li>"
content += "<p>Telefono: " + str(museo.telefono) + "</p>"
content += "<p>Fax: " + str(museo.fax) + "</p>"
content += "<p>Email: " + str(museo.email) + "</p>"
opiniones = Comentario.objects.filter(museo=museo)
content += "<br><li>Comentarios:</li>"
num=1
for opinion in opiniones:
content += "<p>"+ str(num) + " - " + opinion.texto + "</p>"
num=num+1
if request.user.is_authenticated():
content += "<br><li> Añadir comentario: </li></br>"
content += "<form action='/museos/" + str(id) + "' method=POST>"
content += "<input type= 'text' name='texto'>"
content += "<input type= 'hidden' name='opcion' value='1'>"
content += "<input type= 'submit' value='enviar'>"
content += "</form>"
content += "<br><form action='/museos/" + str(id) + "' method=POST>"
content += "<input type= 'hidden' name='opcion' value='2'>"
content += "<input type= 'submit' value='Añadir a mi página personal'>"
content += "</form></br>"
if not request.user.is_authenticated():
content += "<br><li>Podrá añadir comentarios cuando se registre en la página y se identifique. </li>"
user=str(request.user.username)
pag_personales = Seleccion.objects.all()
personales = ""
for pag_personal in pag_personales:
personales += "<br><a href='/" + pag_personal.propietario + "'>"
personales += pag_personal.nombre + "</a></br>"
personales_title="Páginas personales"
todos_comentarios = Comentario.objects.all()
maximo = len(todos_comentarios) - 5
num=0
comentarios =""
if len(todos_comentarios)!=0:
for comentario in todos_comentarios:
if num >= maximo:
comentarios += "<li>" + comentario.museo.nombre + ":</li>"
comentarios += "<p>    " + comentario.texto + "</p>"
num = num+1
c = Context({'inicio': inicio, 'registro': registro,'personales_title':personales_title,
'personales':personales, 'user': user, 'content_title': content_title, 'content': content, 'comentarios':comentarios})
template = get_template ('museo_pers.html')
respuesta = template.render(c)
return HttpResponse(respuesta)
@csrf_exempt
def allmuseums(request):
global solo_museos
if not request.user.is_authenticated():
inicio = "<form action='/login' method='post'>"
inicio += "<label for='username'>Username:</label>"
inicio += "<input type='text' name='username'"
inicio += "<label for='password'>Password:</label>"
inicio += "<input type='password' name='password'>"
inicio += "<input type='submit' value='LOGIN' />"
inicio += "</form>"
registro = "<form action='register/' method='post'>"
registro += "User: <input type= 'text' name='user'>"
registro += "Email: <input type= 'text' name='email'>"
registro += "Password: <input type= 'password' name='password'>"
registro += "<input type= 'submit' value='enviar'>"
registro += "</form>"
if request.user.is_authenticated():
inicio = "<p>Bienvenido, "
inicio += request.user.username
inicio += "<a href='/logout'> Logout </a></p>"
registro = "<p> Se ha identificado usted como: "
registro += "<h2>" + request.user.username + "</h2>"
registro +="<br> Esperamos que su visita a la página sea satisfactoria"
if request.method=="GET":
if len(Museo.objects.all())==0:
content_title ="Aún no hay museos cargados"
content = "<form action='/museos/' method='post'>"
content += "<button type='submit'>CARGAR</button>"
content += "</form>"
else:
content_title="Todos los museos comunidad"
museos = Museo.objects.all ()
content = ""
for museo in museos:
if solo_museos ==0:
museoid = str(museo.identidad)
content += "<br><a href=museos/" + museoid + ">"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
elif solo_museos ==1:
if museo.accesibilidad=='1':
museoid = str(museo.identidad)
content += "<br><a href=museos/" + museoid + ">"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
elif request.method=="POST":
if len(Museo.objects.all())==0:
parserXML('./museos/museos.xml')
content_title ="Todos los museos cargados"
museos = Museo.objects.all ()
content = ""
if len(Museo.objects.all())==0:
for museo in museos:
if solo_museos==0:
museoid = str(museo.identidad)
content += "<br><a href='/" + museoid + "'>"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
elif solo_museos ==1:
if museo.accesibilidad=='1':
museoid = str(museo.identidad)
content += "<br><a href='/" + museoid + "'>"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
else:
dist_ele = request.body.decode('utf-8').split("=")[1] #Saca el valor del distrito del POST mandado por la opcion de filtrar
distrito = str(dist_ele)
for museo in museos:
if solo_museos==0:
if distrito == str(museo.distrito):
museoid = str(museo.identidad)
content += "<br><a href='/museos/" + museoid + "'>"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
elif solo_museos==1:
if museo.accesibilidad=='1':
museoid = str(museo.identidad)
content += "<br><a href='/museos/" + museoid + "'>"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
museos_distritos= Museo.objects.all()
distritos = museos_distritos.values_list('distrito', flat=True).distinct()
personales = "<form action='/museos' method='post'>"
personales += "<select name='Distrito'>"
for distrito in distritos:
personales += "<option value='" + distrito + "'>" + distrito
personales += "</option>"
if len(Museo.objects.all())!=0:
personales += "<input type= 'submit' value='FILTRAR'>"
else:
personales += "<input type= 'submit' value='CARGAR MUSEOS'>"
personales += "</form>"
user=str(request.user.username)
personales_title="Filtrar por distrito"
todos_comentarios = Comentario.objects.all()
maximo = len(todos_comentarios) - 5
num=0
comentarios =""
if len(todos_comentarios)!=0:
for comentario in todos_comentarios:
if num >= maximo:
comentarios += "<li>" + comentario.museo.nombre + ":</li>"
comentarios += "<p>    " + comentario.texto + "</p>"
num = num+1
c = Context({'inicio': inicio, 'personales_title': personales_title, 'personales': personales, 'registro': registro, 'user': user, 'content_title': content_title, 'content': content, 'comentarios':comentarios})
template = get_template ('museos.html')
respuesta = template.render(c)
return HttpResponse(respuesta)
@csrf_exempt
def main(request):
global solo_museos
if solo_museos == 0:
accesibilidad ="Mostrar solo museos"
elif solo_museos == 1:
accesibilidad = "Mostrar todos los establecimientos tengan accesibilidad 1 o no"
if len(Museo.objects.all())==0:
content_title ="Aún no hay museos cargados"
content = "<form action='/' method='post'>"
content += "<button type='submit'>CARGAR</button>"
content += "</form>"
else:
content_title="5 museos más comentados"
content=""
if request.method =="GET":
if not request.user.is_authenticated():
inicio = "<form action='/login' method='post'>"
inicio += "<label for='username'>Username:</label>"
inicio += "<input type='text' name='username'"
inicio += "<label for='password'>Password:</label>"
inicio += "<input type='password' name='password'>"
inicio += "<input type='submit' value='LOGIN' />"
inicio += "</form>"
registro = "<form action='/register/' method='post'>"
registro += "User: <input type= 'text' name='user'>"
registro += "Email: <input type= 'text' name='email'>"
registro += "Password: <input type= 'password' name='password'>"
registro += "<input type= 'submit' value='enviar'>"
registro += "</form>"
if request.user.is_authenticated():
inicio = "<p>Bienvenido, "
inicio += request.user.username
inicio += "<a href='/logout'> Logout </a></p>"
registro = "<p> Se ha identificado usted como: "
registro += "<h2>" + request.user.username + "</h2>"
registro +="<br> Esperamos que su visita a la página sea satisfactoria"
museos = Museo.objects.order_by('-num_comentarios')
limite=0
for museo in museos:
if not museo.num_comentarios==0:
if solo_museos == 0:
content += "<li><a href='" + museo.url + "'><p>" + str(museo.nombre) + ":</p></a></li>"
content += "<br>Dirección: " + museo.localizacion +"</br>"
content += "<br><a href=museos/" + museo.identidad + "><p> Más información</p></a></br>"
limite=limite+1;
if limite == 5:
break
elif solo_museos ==1:
if museo.accesibilidad=='1':
content += "<li><a href='" + museo.url + "'><p>" + str(museo.nombre) + ":</p></a></li>"
content += "<br>Dirección: " + museo.localizacion +"</br>"
content += "<br><a href=museos/" + museo.identidad + "><p> Más información</p></a></br>"
limite=limite+1;
if limite == 5:
break
pag_personales = Seleccion.objects.all()
personales = ""
for pag_personal in pag_personales:
personales += "<br><a href='/" + pag_personal.propietario + "'>"
personales += pag_personal.nombre + "</a></br>"
elif request.method == "POST":
if len(Museo.objects.all()) == 0:
parserXML('./museos/museos.xml')
return redirect("/")
user=str(request.user.username)
personales_title="Páginas personales"
global solo_museos
if solo_museos == 0:
accesibilidad ="Mostrar solo museos"
elif solo_museos == 1:
accesibilidad = "Mostrar todos emplazamientos"
todos_comentarios = Comentario.objects.all()
maximo = len(todos_comentarios) - 5
num=0
comentarios =""
if len(todos_comentarios)!=0:
for comentario in todos_comentarios:
if num >= maximo:
comentarios += "<li>" + comentario.museo.nombre + ":</li>"
comentarios += "<p>    " + comentario.texto + "</p>"
num = num+1
c = Context({'inicio': inicio, 'registro': registro, 'personales_title': personales_title, 'personales': personales,
'user': user, 'content_title':content_title, 'content': content, 'accesibilidad': accesibilidad, 'comentarios':comentarios})
template = get_template ('home.html')
respuesta = template.render(c)
return HttpResponse(respuesta)
def css_color (request):
color = request.GET['Colores de fondo']
css_old = open ('museos/static/css/templatemo_style.css', 'r') #Lo utilizo para saber las filas del css
lines = []
for line in css_old:
lines.append(line)
css_old.close()
css_new = open ('museos/static/css/templatemo_style.css', 'w') #Lo utilizo para saber las filas del css
maximo = 0
for line in lines:
if color != 'None':
if maximo == 11: #Aquí está la línea del background
if color == 'Crema':
line = "background: #F3E2A9;\n"
elif color == 'Blanco':
line = "background: #FFFFFF;\n"
elif color == 'Plata':
line = "background: #E6E6E6;\n"
maximo=maximo+1
css_new.write(line)
css_new.close()
return HttpResponseRedirect("/")
def css_letra (request):
letra = request.GET['Tamaño letra cuerpo página']
css_old = open ('museos/static/css/templatemo_style.css', 'r') #Lo utilizo para saber las filas del css
lines = []
for line in css_old:
lines.append(line)
css_old.close()
css_new = open ('museos/static/css/templatemo_style.css', 'w') #Lo utilizo para saber las filas del css
maximo = 0
for line in lines:
if letra != 'None':
if maximo == 18: #Aquí está la línea del background
if letra == 'Pequeña':
line = "font-size: 8px;\n"
elif letra == 'Mediana':
line = "font-size: 12px;\n"
elif letra == 'Grande':
line = "font-size: 16px;\n"
maximo=maximo+1
css_new.write(line)
css_new.close()
return HttpResponseRedirect("/")
def XML(request, propietario):
seleccion = Seleccion.objects.get(propietario=propietario)
museos = seleccion.museos_fav.all()
respuesta = "<?xml version='1.0' encoding='UTF-8'?>\n"
respuesta += "<Contenidos>"
for museo in museos:
respuesta += "<contenido>"
respuesta += "<atributo nombre='ID-ENTIDAD'>" + museo.museo.identidad + "</atributo>"
respuesta += "<atributo nombre='NOMBRE'>" + museo.museo.nombre + "</atributo>"
respuesta += "<atributo nombre='EQUIPAMIENTO'>" + museo.museo.equipamiento + "</atributo>"
respuesta += "<atributo nombre='TRANSPORTE'>" + museo.museo.transporte + "</atributo>"
respuesta += "<atributo nombre='DESCRIPCION'>" + museo.museo.descripcion + "</atributo>"
respuesta += "<atributo nombre='HORARIO'>" + museo.museo.horario + "</atributo>"
respuesta += "<atributo nombre='ACCESIBILIDAD'>" + museo.museo.accesibilidad + "</atributo>"
respuesta += "<atributo nombre='LOCALIZACION'>" + museo.museo.localizacion + "</atributo>"
respuesta += "<atributo nombre='DISTRITO'>" + museo.museo.distrito + "</atributo>"
respuesta += "<atributo nombre='TELEFONO'>" + museo.museo.telefono + "</atributo>"
respuesta += "<atributo nombre='FAX'>" + museo.museo.fax + "</atributo>"
respuesta += "<atributo nombre='EMAIL'>" + museo.museo.email + "</atributo>"
respuesta += "<atributo nombre='TIPO'>" + museo.museo.tipo + "</atributo>"
respuesta += "<atributo nombre='NUM-COMENTARIOS'>" + str(museo.museo.num_comentarios) + "</atributo>"
respuesta += "</contenido>"
respuesta += "</Contenidos>"
return HttpResponse(respuesta, content_type="text/xml")
def accesibilidad (request):
if solo_museos == 1:
global solo_museos
solo_museos = 0
elif solo_museos == 0:
global solo_museos
solo_museos = 1
return HttpResponseRedirect("/")
def about (request):
respuesta = "<p> Práctica realizada por: </p>"
respuesta += "<p> - <NAME> </p>"
respuesta += "<p> - DNI : 49101460W </p>"
respuesta += "<p> - Doble Grado Sistemas de Telecomunicaciones y ADE </p>"
respuesta += "<p> Si es la primera vez que accedes al sitio, te aparecerá un boton para cargar los museos que llamara a la función parser para parsear el XML de la comunidad, y guardarlo en la base de datos. </p>"
respuesta += "<p> Una vez realizado esto se tendrá acceso al sitio completo. </p>"
respuesta += "<p> Para registrar a un usuario bastará con rellenar los campos de registro que se encuentran a la derecha y pulsar en registro </p>"
respuesta += "<p> Esto registrará al usuario, que ya podrá entrar en su cuenta haciendo login en la esquina superior derecha</p>"
respuesta += "<p> Una vez identificado, cuando vaya a los museos y entre en la página propia de alguno (pinchando en Más información) podrá añadirlo a su página personal y añadir comentarios para el mismo, que serán anónimos </p>"
respuesta += "<p> En la página personal del usuario verá los museos que ha añadido y podrá cambiar el nombre a la derecha de su página. También podrás acceder a un XML con tus museos favoritos pinchando en el enlace 'XML' </p>"
respuesta += "<p> En caso de entrar en una página de la cual no eres propietario verás simplemente los museos añadidos pero no podrás cambiar el nombre ni obtener el HTML </p>"
respuesta += "<p> En la página principal, abajo te aparecerá un enlace en Accesibilidad, que al pincharlo la página pasará a mostrar todos los museos de la base de datos, o en su defecto solo los que tienen accesibilidad=1, depende lo que esté mostrando en ese momento.</p>"
respuesta += "<p> Para acceder a /about, abajo en el template pinchando en 'RubénGarcíaLlorens'</p>"
respuesta += "<p> Además de las funciones obligatorias, está implementado el favicon, y abajo a la izquierda hay un feed que siempre muestra los últimos 5 comentarios añadidos"
return HttpResponse(respuesta) | guiamuseos/museos/views.py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from .models import Museo, Comentario, Museo_añadido, Seleccion
from .parser import parserXML
from django.views.decorators.csrf import csrf_exempt
from django.contrib import auth
from django.contrib.auth.models import User
from django.utils import timezone
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import FormView
from django.http.response import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
# Create your views here.
solo_museos = 0
@csrf_exempt
def auth_login(request):
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
login(request, user)
return HttpResponseRedirect("/")
def logout(request):
logout(request);
return HttpResponseRedirect("/")
@csrf_exempt
def register(request):
username = request.POST['user']
email = request.POST['email']
password = request.POST['password']
user = User.objects.create_user(username, email, password)
user.save()
titulo = "Página de " + str(username)
pagina_personal = Seleccion (propietario=str(username), nombre = titulo)
pagina_personal.save()
return HttpResponseRedirect("/")
@csrf_exempt
def personal (request, propietario):
global solo_museos
if not request.user.is_authenticated():
inicio = "<form action='/login' method='post'>"
inicio += "<label for='username'>Username:</label>"
inicio += "<input type='text' name='username'"
inicio += "<label for='password'>Password:</label>"
inicio += "<input type='password' name='password'>"
inicio += "<input type='submit' value='LOGIN' />"
inicio += "</form>"
registro = "<form action='/register/' method='post'>"
registro += "User: <input type= 'text' name='user'>"
registro += "Email: <input type= 'text' name='email'>"
registro += "Password: <input type= 'password' name='password'>"
registro += "<input type= 'submit' value='enviar'>"
registro += "</form>"
if request.user.is_authenticated():
inicio = "<p>Bienvenido, "
inicio += request.user.username
inicio += "<a href='/logout'> Logout </a></p>"
registro = "<p> Se ha identificado usted como: "
registro += "<h2>" + request.user.username + "</h2>"
registro +="<br> Esperamos que su visita a la página sea satisfactoria"
if request.method == "GET":
seleccion = Seleccion.objects.get (propietario = propietario)
favoritos = seleccion.museos_fav.all()
content_title=seleccion.nombre
content=""
for favorito in favoritos:
if solo_museos==0:
content += "<br><li><a href='" + favorito.museo.url + "'><p>" + str(favorito.museo.nombre) + ":</p></a></li></br>"
content += "<br>Dirección: " + favorito.museo.localizacion +"</br>"
content += "<br><a href=museos/" + favorito.museo.identidad + "><p> Más información</p></a></br>"
content+="<br><p> Añadido: " + str(favorito.añadido) + "</br></p>"
elif solo_museos==1:
if favorito.museo.accesibilidad=='1':
content += "<br><li><a href='" + favorito.museo.url + "'><p>" + str(favorito.museo.nombre) + ":</p></a></li></br>"
content += "<br>Dirección: " + favorito.museo.localizacion +"</br>"
content += "<br><a href=museos/" + favorito.museo.identidad + "><p> Más información</p></a></br>"
content+="<br><p> Añadido: " + str(favorito.añadido) + "</br></p>"
if len(favoritos)!=0:
content += "<li><a href='/" + propietario + "/XML'>"
content += "XML</a></li>"
elif request.method =="POST" and request.POST['opcion']=='1':
seleccion = Seleccion.objects.get (propietario = propietario)
seleccion.nombre = request.POST['texto']
seleccion.save()
favoritos = seleccion.museos_fav.all()
content_title=seleccion.nombre
content=""
for favorito in favoritos:
if solo_museos==0:
content += "<br><li><a href='" + favorito.museo.url + "'><p>" + str(favorito.museo.nombre) + ":</p></a></li></br>"
content += "<br>Dirección: " + favorito.museo.localizacion +"</br>"
content += "<br><a href=museos/" + favorito.museo.identidad + "><p> Más información</p></a></br>"
content+="<br><p> Añadido: " + str(favorito.añadido) + "</br></p>"
elif solo_museos==1:
if favorito.museo.accesibilidad=='1':
content += "<br><li><a href='" + favorito.museo.url + "'><p>" + str(favorito.museo.nombre) + ":</p></a></li></br>"
content += "<br>Dirección: " + favorito.museo.localizacion +"</br>"
content += "<br><a href=museos/" + favorito.museo.identidad + "><p> Más información</p></a></br>"
content+="<br><p> Añadido: " + str(favorito.añadido) + "</br></p>"
if len(favoritos)!=0:
content += "<li><a href='/" + propietario + "/XML'>"
content += "XML</a></li>"
if request.user.is_authenticated() and request.user.username == propietario:
personales = "<form action='/" + propietario + "' method=POST>"
personales += "Cambiar nombre de tu página personal<input type= 'text' name='texto'>"
personales += "<input type= 'hidden' name='opcion' value='1'>"
personales += "<input type= 'submit' value='Cambiar'>"
personales += "</form>"
registro = "<li> Personalizar color fondo (en ocasiones será necesario recargar la página para que se apliquen los cambios)</li>"
registro += "<br><form action='/css_color' method=GET>"
registro += "<select name='Colores de fondo'>"
registro += "<option value=Blanco> Blanco</option>"
registro += "<option value=Crema> Crema</option>"
registro += "<option value=Plata> Plata</option>"
registro += "<input type= 'submit' value='FILTRAR'>"
registro += "</form></br>"
registro += "<li> Cambiar tamaño letra (en ocasiones será necesario recargar la página para que se apliquen los cambios) </li>"
registro += "<br><form action='/css_letra' method=GET>"
registro += "<select name='Tamaño letra cuerpo página'>"
registro += "<option value=Pequeña> Pequeña</option>"
registro += "<option value=Mediana> Mediana</option>"
registro += "<option value=Grande> Grande</option>"
registro += "<input type= 'submit' value='FILTRAR'>"
registro += "</form></br>"
else:
pag_personales = Seleccion.objects.all()
personales = ""
for pag_personal in pag_personales:
personales += "<br><a href='/" + pag_personal.propietario + "'>"
personales += pag_personal.nombre + "</a></br>"
user=str(request.user.username)
personales_title="Páginas personales"
todos_comentarios = Comentario.objects.all()
maximo = len(todos_comentarios) - 5
num=0
comentarios =""
if len(todos_comentarios)!=0:
for comentario in todos_comentarios:
if num >= maximo:
comentarios += "<li>" + comentario.museo.nombre + ":</li>"
comentarios += "<p>    " + comentario.texto + "</p>"
num = num+1
c = Context({'inicio': inicio, 'user': user, 'registro': registro, 'personales_title': personales_title, 'personales':personales, 'content_title': content_title, 'content': content, 'comentarios':comentarios})
template = get_template ('museo_pers.html')
respuesta = template.render(c)
return HttpResponse(respuesta)
@csrf_exempt
def museo_pers(request, id):
if not request.user.is_authenticated():
inicio = "<form action='/login' method='post'>"
inicio += "<label for='username'>Username:</label>"
inicio += "<input type='text' name='username'"
inicio += "<label for='password'>Password:</label>"
inicio += "<input type='password' name='password'>"
inicio += "<input type='submit' value='LOGIN' />"
inicio += "</form>"
registro = "<form action='/register/' method='post'>"
registro += "User: <input type= 'text' name='user'>"
registro += "Email: <input type= 'text' name='email'>"
registro += "Password: <input type= 'password' name='password'>"
registro += "<input type= 'submit' value='enviar'>"
registro += "</form>"
if request.user.is_authenticated():
inicio = "<p>Bienvenido, "
inicio += request.user.username
inicio += "<a href='/logout'> Logout </a></p>"
registro = "<p> Se ha identificado usted como: "
registro += "<h2>" + request.user.username + "</h2>"
registro +="<br> Esperamos que su visita a la página sea satisfactoria"
museo=Museo.objects.get(identidad=id)
content_title = str(museo.nombre)
if request.method=="POST" and request.POST['opcion']=='1':
texto = request.POST['texto']
comentario = Comentario (museo = museo , texto= texto)
comentario.save()
museo.num_comentarios=museo.num_comentarios + 1
museo.save()
if request.method=="POST" and request.POST['opcion']=='2':
museo_añadido = Museo_añadido (museo = museo, añadido = timezone.now())
museo_añadido.save()
seleccion = Seleccion.objects.get (propietario = request.user.username)
favoritos = seleccion.museos_fav.all()
if favoritos.filter(museo=museo_añadido.museo).count() == 0:
seleccion.museos_fav.add(museo_añadido)
seleccion.save()
content = "<br><li>Descripción entidad:</li><p>" + str(museo.descripcion_entidad) + "</p><br>"
content += "<br><li>Transporte:</li><p>" + str(museo.transporte) + "</p><br>"
content += "<br><li>Equipamiento:</li><p>" + str(museo.equipamiento) + "</p><br>"
content += "<br><li>Descripción:</li><p>" + str(museo.descripcion) + "</p><br>"
content += "<br><li>Horario:</li><p>" + str(museo.horario) + "</p><br>"
content += "<br><li>URL del sitio:</li> <a href ='" + museo.url + "'>" + str(museo.url) + "</a><br>"
content += "<br><li>Localización:</li><p>" + str(museo.localizacion) + "</p><br>"
content += "<br><li>Contacto:</li>"
content += "<p>Telefono: " + str(museo.telefono) + "</p>"
content += "<p>Fax: " + str(museo.fax) + "</p>"
content += "<p>Email: " + str(museo.email) + "</p>"
opiniones = Comentario.objects.filter(museo=museo)
content += "<br><li>Comentarios:</li>"
num=1
for opinion in opiniones:
content += "<p>"+ str(num) + " - " + opinion.texto + "</p>"
num=num+1
if request.user.is_authenticated():
content += "<br><li> Añadir comentario: </li></br>"
content += "<form action='/museos/" + str(id) + "' method=POST>"
content += "<input type= 'text' name='texto'>"
content += "<input type= 'hidden' name='opcion' value='1'>"
content += "<input type= 'submit' value='enviar'>"
content += "</form>"
content += "<br><form action='/museos/" + str(id) + "' method=POST>"
content += "<input type= 'hidden' name='opcion' value='2'>"
content += "<input type= 'submit' value='Añadir a mi página personal'>"
content += "</form></br>"
if not request.user.is_authenticated():
content += "<br><li>Podrá añadir comentarios cuando se registre en la página y se identifique. </li>"
user=str(request.user.username)
pag_personales = Seleccion.objects.all()
personales = ""
for pag_personal in pag_personales:
personales += "<br><a href='/" + pag_personal.propietario + "'>"
personales += pag_personal.nombre + "</a></br>"
personales_title="Páginas personales"
todos_comentarios = Comentario.objects.all()
maximo = len(todos_comentarios) - 5
num=0
comentarios =""
if len(todos_comentarios)!=0:
for comentario in todos_comentarios:
if num >= maximo:
comentarios += "<li>" + comentario.museo.nombre + ":</li>"
comentarios += "<p>    " + comentario.texto + "</p>"
num = num+1
c = Context({'inicio': inicio, 'registro': registro,'personales_title':personales_title,
'personales':personales, 'user': user, 'content_title': content_title, 'content': content, 'comentarios':comentarios})
template = get_template ('museo_pers.html')
respuesta = template.render(c)
return HttpResponse(respuesta)
@csrf_exempt
def allmuseums(request):
global solo_museos
if not request.user.is_authenticated():
inicio = "<form action='/login' method='post'>"
inicio += "<label for='username'>Username:</label>"
inicio += "<input type='text' name='username'"
inicio += "<label for='password'>Password:</label>"
inicio += "<input type='password' name='password'>"
inicio += "<input type='submit' value='LOGIN' />"
inicio += "</form>"
registro = "<form action='register/' method='post'>"
registro += "User: <input type= 'text' name='user'>"
registro += "Email: <input type= 'text' name='email'>"
registro += "Password: <input type= 'password' name='password'>"
registro += "<input type= 'submit' value='enviar'>"
registro += "</form>"
if request.user.is_authenticated():
inicio = "<p>Bienvenido, "
inicio += request.user.username
inicio += "<a href='/logout'> Logout </a></p>"
registro = "<p> Se ha identificado usted como: "
registro += "<h2>" + request.user.username + "</h2>"
registro +="<br> Esperamos que su visita a la página sea satisfactoria"
if request.method=="GET":
if len(Museo.objects.all())==0:
content_title ="Aún no hay museos cargados"
content = "<form action='/museos/' method='post'>"
content += "<button type='submit'>CARGAR</button>"
content += "</form>"
else:
content_title="Todos los museos comunidad"
museos = Museo.objects.all ()
content = ""
for museo in museos:
if solo_museos ==0:
museoid = str(museo.identidad)
content += "<br><a href=museos/" + museoid + ">"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
elif solo_museos ==1:
if museo.accesibilidad=='1':
museoid = str(museo.identidad)
content += "<br><a href=museos/" + museoid + ">"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
elif request.method=="POST":
if len(Museo.objects.all())==0:
parserXML('./museos/museos.xml')
content_title ="Todos los museos cargados"
museos = Museo.objects.all ()
content = ""
if len(Museo.objects.all())==0:
for museo in museos:
if solo_museos==0:
museoid = str(museo.identidad)
content += "<br><a href='/" + museoid + "'>"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
elif solo_museos ==1:
if museo.accesibilidad=='1':
museoid = str(museo.identidad)
content += "<br><a href='/" + museoid + "'>"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
else:
dist_ele = request.body.decode('utf-8').split("=")[1] #Saca el valor del distrito del POST mandado por la opcion de filtrar
distrito = str(dist_ele)
for museo in museos:
if solo_museos==0:
if distrito == str(museo.distrito):
museoid = str(museo.identidad)
content += "<br><a href='/museos/" + museoid + "'>"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
elif solo_museos==1:
if museo.accesibilidad=='1':
museoid = str(museo.identidad)
content += "<br><a href='/museos/" + museoid + "'>"
content += museo.nombre + "</a><br>"
content += "<br><li>URL del sitio: </li> <a href='" + museo.url +"'> " + museo.url + "</a></br>"
content += "<br> ---------------------------------------------------------------------------------------------------------------------------------------- </br>"
museos_distritos= Museo.objects.all()
distritos = museos_distritos.values_list('distrito', flat=True).distinct()
personales = "<form action='/museos' method='post'>"
personales += "<select name='Distrito'>"
for distrito in distritos:
personales += "<option value='" + distrito + "'>" + distrito
personales += "</option>"
if len(Museo.objects.all())!=0:
personales += "<input type= 'submit' value='FILTRAR'>"
else:
personales += "<input type= 'submit' value='CARGAR MUSEOS'>"
personales += "</form>"
user=str(request.user.username)
personales_title="Filtrar por distrito"
todos_comentarios = Comentario.objects.all()
maximo = len(todos_comentarios) - 5
num=0
comentarios =""
if len(todos_comentarios)!=0:
for comentario in todos_comentarios:
if num >= maximo:
comentarios += "<li>" + comentario.museo.nombre + ":</li>"
comentarios += "<p>    " + comentario.texto + "</p>"
num = num+1
c = Context({'inicio': inicio, 'personales_title': personales_title, 'personales': personales, 'registro': registro, 'user': user, 'content_title': content_title, 'content': content, 'comentarios':comentarios})
template = get_template ('museos.html')
respuesta = template.render(c)
return HttpResponse(respuesta)
@csrf_exempt
def main(request):
global solo_museos
if solo_museos == 0:
accesibilidad ="Mostrar solo museos"
elif solo_museos == 1:
accesibilidad = "Mostrar todos los establecimientos tengan accesibilidad 1 o no"
if len(Museo.objects.all())==0:
content_title ="Aún no hay museos cargados"
content = "<form action='/' method='post'>"
content += "<button type='submit'>CARGAR</button>"
content += "</form>"
else:
content_title="5 museos más comentados"
content=""
if request.method =="GET":
if not request.user.is_authenticated():
inicio = "<form action='/login' method='post'>"
inicio += "<label for='username'>Username:</label>"
inicio += "<input type='text' name='username'"
inicio += "<label for='password'>Password:</label>"
inicio += "<input type='password' name='password'>"
inicio += "<input type='submit' value='LOGIN' />"
inicio += "</form>"
registro = "<form action='/register/' method='post'>"
registro += "User: <input type= 'text' name='user'>"
registro += "Email: <input type= 'text' name='email'>"
registro += "Password: <input type= 'password' name='password'>"
registro += "<input type= 'submit' value='enviar'>"
registro += "</form>"
if request.user.is_authenticated():
inicio = "<p>Bienvenido, "
inicio += request.user.username
inicio += "<a href='/logout'> Logout </a></p>"
registro = "<p> Se ha identificado usted como: "
registro += "<h2>" + request.user.username + "</h2>"
registro +="<br> Esperamos que su visita a la página sea satisfactoria"
museos = Museo.objects.order_by('-num_comentarios')
limite=0
for museo in museos:
if not museo.num_comentarios==0:
if solo_museos == 0:
content += "<li><a href='" + museo.url + "'><p>" + str(museo.nombre) + ":</p></a></li>"
content += "<br>Dirección: " + museo.localizacion +"</br>"
content += "<br><a href=museos/" + museo.identidad + "><p> Más información</p></a></br>"
limite=limite+1;
if limite == 5:
break
elif solo_museos ==1:
if museo.accesibilidad=='1':
content += "<li><a href='" + museo.url + "'><p>" + str(museo.nombre) + ":</p></a></li>"
content += "<br>Dirección: " + museo.localizacion +"</br>"
content += "<br><a href=museos/" + museo.identidad + "><p> Más información</p></a></br>"
limite=limite+1;
if limite == 5:
break
pag_personales = Seleccion.objects.all()
personales = ""
for pag_personal in pag_personales:
personales += "<br><a href='/" + pag_personal.propietario + "'>"
personales += pag_personal.nombre + "</a></br>"
elif request.method == "POST":
if len(Museo.objects.all()) == 0:
parserXML('./museos/museos.xml')
return redirect("/")
user=str(request.user.username)
personales_title="Páginas personales"
global solo_museos
if solo_museos == 0:
accesibilidad ="Mostrar solo museos"
elif solo_museos == 1:
accesibilidad = "Mostrar todos emplazamientos"
todos_comentarios = Comentario.objects.all()
maximo = len(todos_comentarios) - 5
num=0
comentarios =""
if len(todos_comentarios)!=0:
for comentario in todos_comentarios:
if num >= maximo:
comentarios += "<li>" + comentario.museo.nombre + ":</li>"
comentarios += "<p>    " + comentario.texto + "</p>"
num = num+1
c = Context({'inicio': inicio, 'registro': registro, 'personales_title': personales_title, 'personales': personales,
'user': user, 'content_title':content_title, 'content': content, 'accesibilidad': accesibilidad, 'comentarios':comentarios})
template = get_template ('home.html')
respuesta = template.render(c)
return HttpResponse(respuesta)
def css_color (request):
color = request.GET['Colores de fondo']
css_old = open ('museos/static/css/templatemo_style.css', 'r') #Lo utilizo para saber las filas del css
lines = []
for line in css_old:
lines.append(line)
css_old.close()
css_new = open ('museos/static/css/templatemo_style.css', 'w') #Lo utilizo para saber las filas del css
maximo = 0
for line in lines:
if color != 'None':
if maximo == 11: #Aquí está la línea del background
if color == 'Crema':
line = "background: #F3E2A9;\n"
elif color == 'Blanco':
line = "background: #FFFFFF;\n"
elif color == 'Plata':
line = "background: #E6E6E6;\n"
maximo=maximo+1
css_new.write(line)
css_new.close()
return HttpResponseRedirect("/")
def css_letra (request):
letra = request.GET['Tamaño letra cuerpo página']
css_old = open ('museos/static/css/templatemo_style.css', 'r') #Lo utilizo para saber las filas del css
lines = []
for line in css_old:
lines.append(line)
css_old.close()
css_new = open ('museos/static/css/templatemo_style.css', 'w') #Lo utilizo para saber las filas del css
maximo = 0
for line in lines:
if letra != 'None':
if maximo == 18: #Aquí está la línea del background
if letra == 'Pequeña':
line = "font-size: 8px;\n"
elif letra == 'Mediana':
line = "font-size: 12px;\n"
elif letra == 'Grande':
line = "font-size: 16px;\n"
maximo=maximo+1
css_new.write(line)
css_new.close()
return HttpResponseRedirect("/")
def XML(request, propietario):
seleccion = Seleccion.objects.get(propietario=propietario)
museos = seleccion.museos_fav.all()
respuesta = "<?xml version='1.0' encoding='UTF-8'?>\n"
respuesta += "<Contenidos>"
for museo in museos:
respuesta += "<contenido>"
respuesta += "<atributo nombre='ID-ENTIDAD'>" + museo.museo.identidad + "</atributo>"
respuesta += "<atributo nombre='NOMBRE'>" + museo.museo.nombre + "</atributo>"
respuesta += "<atributo nombre='EQUIPAMIENTO'>" + museo.museo.equipamiento + "</atributo>"
respuesta += "<atributo nombre='TRANSPORTE'>" + museo.museo.transporte + "</atributo>"
respuesta += "<atributo nombre='DESCRIPCION'>" + museo.museo.descripcion + "</atributo>"
respuesta += "<atributo nombre='HORARIO'>" + museo.museo.horario + "</atributo>"
respuesta += "<atributo nombre='ACCESIBILIDAD'>" + museo.museo.accesibilidad + "</atributo>"
respuesta += "<atributo nombre='LOCALIZACION'>" + museo.museo.localizacion + "</atributo>"
respuesta += "<atributo nombre='DISTRITO'>" + museo.museo.distrito + "</atributo>"
respuesta += "<atributo nombre='TELEFONO'>" + museo.museo.telefono + "</atributo>"
respuesta += "<atributo nombre='FAX'>" + museo.museo.fax + "</atributo>"
respuesta += "<atributo nombre='EMAIL'>" + museo.museo.email + "</atributo>"
respuesta += "<atributo nombre='TIPO'>" + museo.museo.tipo + "</atributo>"
respuesta += "<atributo nombre='NUM-COMENTARIOS'>" + str(museo.museo.num_comentarios) + "</atributo>"
respuesta += "</contenido>"
respuesta += "</Contenidos>"
return HttpResponse(respuesta, content_type="text/xml")
def accesibilidad (request):
if solo_museos == 1:
global solo_museos
solo_museos = 0
elif solo_museos == 0:
global solo_museos
solo_museos = 1
return HttpResponseRedirect("/")
def about (request):
respuesta = "<p> Práctica realizada por: </p>"
respuesta += "<p> - <NAME> </p>"
respuesta += "<p> - DNI : 49101460W </p>"
respuesta += "<p> - Doble Grado Sistemas de Telecomunicaciones y ADE </p>"
respuesta += "<p> Si es la primera vez que accedes al sitio, te aparecerá un boton para cargar los museos que llamara a la función parser para parsear el XML de la comunidad, y guardarlo en la base de datos. </p>"
respuesta += "<p> Una vez realizado esto se tendrá acceso al sitio completo. </p>"
respuesta += "<p> Para registrar a un usuario bastará con rellenar los campos de registro que se encuentran a la derecha y pulsar en registro </p>"
respuesta += "<p> Esto registrará al usuario, que ya podrá entrar en su cuenta haciendo login en la esquina superior derecha</p>"
respuesta += "<p> Una vez identificado, cuando vaya a los museos y entre en la página propia de alguno (pinchando en Más información) podrá añadirlo a su página personal y añadir comentarios para el mismo, que serán anónimos </p>"
respuesta += "<p> En la página personal del usuario verá los museos que ha añadido y podrá cambiar el nombre a la derecha de su página. También podrás acceder a un XML con tus museos favoritos pinchando en el enlace 'XML' </p>"
respuesta += "<p> En caso de entrar en una página de la cual no eres propietario verás simplemente los museos añadidos pero no podrás cambiar el nombre ni obtener el HTML </p>"
respuesta += "<p> En la página principal, abajo te aparecerá un enlace en Accesibilidad, que al pincharlo la página pasará a mostrar todos los museos de la base de datos, o en su defecto solo los que tienen accesibilidad=1, depende lo que esté mostrando en ese momento.</p>"
respuesta += "<p> Para acceder a /about, abajo en el template pinchando en 'RubénGarcíaLlorens'</p>"
respuesta += "<p> Además de las funciones obligatorias, está implementado el favicon, y abajo a la izquierda hay un feed que siempre muestra los últimos 5 comentarios añadidos"
return HttpResponse(respuesta) | 0.151718 | 0.052086 |
from __future__ import with_statement
import time
from . import portalocker
DEFAULT_TIMEOUT = 5
DEFAULT_CHECK_INTERVAL = 0.25
LOCK_METHOD = portalocker.LOCK_EX | portalocker.LOCK_NB
__all__ = [
'Lock',
'AlreadyLocked',
]
class AlreadyLocked(Exception):
pass
class Lock(object):
def __init__(
self, filename, mode='a', truncate=0, timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=True):
'''Lock manager with build-in timeout
filename -- filename
mode -- the open mode, 'a' or 'ab' should be used for writing
truncate -- use truncate to emulate 'w' mode, None is disabled, 0 is
truncate to 0 bytes
timeout -- timeout when trying to acquire a lock
check_interval -- check interval while waiting
fail_when_locked -- after the initial lock failed, return an error
or lock the file
fail_when_locked is useful when multiple threads/processes can race
when creating a file. If set to true than the system will wait till
the lock was acquired and then return an AlreadyLocked exception.
Note that the file is opened first and locked later. So using 'w' as
mode will result in truncate _BEFORE_ the lock is checked.
'''
self.fh = None
self.filename = filename
self.mode = mode
self.truncate = truncate
self.timeout = timeout
self.check_interval = check_interval
self.fail_when_locked = fail_when_locked
assert 'w' not in mode, 'Mode "w" clears the file before locking'
def acquire(
self, timeout=None, check_interval=None, fail_when_locked=None):
'''Acquire the locked filehandle'''
if timeout is None:
timeout = self.timeout
if check_interval is None:
check_interval = self.check_interval
if fail_when_locked is None:
fail_when_locked = self.fail_when_locked
# If we already have a filehandle, return it
fh = self.fh
if fh:
return fh
# Get a new filehandler
fh = self._get_fh()
try:
# Try to lock
fh = self._get_lock(fh)
except portalocker.LockException, exception:
# Try till the timeout is 0
while timeout > 0:
# Wait a bit
time.sleep(check_interval)
timeout -= check_interval
# Try again
try:
# We already tried to the get the lock
# If fail_when_locked is true, then stop trying
if fail_when_locked:
raise AlreadyLocked(*exception)
else: # pragma: no cover
# We've got the lock
fh = self._get_lock(fh)
break
except portalocker.LockException:
pass
else:
# We got a timeout... reraising
raise portalocker.LockException(*exception)
# Prepare the filehandle (truncate if needed)
fh = self._prepare_fh(fh)
self.fh = fh
return fh
def _get_fh(self):
'''Get a new filehandle'''
return open(self.filename, self.mode)
def _get_lock(self, fh):
'''
Try to lock the given filehandle
returns LockException if it fails'''
portalocker.lock(fh, LOCK_METHOD)
return fh
def _prepare_fh(self, fh, truncate=None):
'''
Prepare the filehandle for usage
If truncate is a number, the file will be truncated to that amount of
bytes
'''
if truncate is None:
truncate = self.truncate
if truncate is not None:
fh.seek(truncate)
fh.truncate(truncate)
return fh
def __enter__(self):
self.fh = self.acquire()
return self.fh
def __exit__(self, type, value, tb):
if self.fh:
self.fh.close() | metasync/portalocker/utils.py | from __future__ import with_statement
import time
from . import portalocker
DEFAULT_TIMEOUT = 5
DEFAULT_CHECK_INTERVAL = 0.25
LOCK_METHOD = portalocker.LOCK_EX | portalocker.LOCK_NB
__all__ = [
'Lock',
'AlreadyLocked',
]
class AlreadyLocked(Exception):
pass
class Lock(object):
def __init__(
self, filename, mode='a', truncate=0, timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=True):
'''Lock manager with build-in timeout
filename -- filename
mode -- the open mode, 'a' or 'ab' should be used for writing
truncate -- use truncate to emulate 'w' mode, None is disabled, 0 is
truncate to 0 bytes
timeout -- timeout when trying to acquire a lock
check_interval -- check interval while waiting
fail_when_locked -- after the initial lock failed, return an error
or lock the file
fail_when_locked is useful when multiple threads/processes can race
when creating a file. If set to true than the system will wait till
the lock was acquired and then return an AlreadyLocked exception.
Note that the file is opened first and locked later. So using 'w' as
mode will result in truncate _BEFORE_ the lock is checked.
'''
self.fh = None
self.filename = filename
self.mode = mode
self.truncate = truncate
self.timeout = timeout
self.check_interval = check_interval
self.fail_when_locked = fail_when_locked
assert 'w' not in mode, 'Mode "w" clears the file before locking'
def acquire(
self, timeout=None, check_interval=None, fail_when_locked=None):
'''Acquire the locked filehandle'''
if timeout is None:
timeout = self.timeout
if check_interval is None:
check_interval = self.check_interval
if fail_when_locked is None:
fail_when_locked = self.fail_when_locked
# If we already have a filehandle, return it
fh = self.fh
if fh:
return fh
# Get a new filehandler
fh = self._get_fh()
try:
# Try to lock
fh = self._get_lock(fh)
except portalocker.LockException, exception:
# Try till the timeout is 0
while timeout > 0:
# Wait a bit
time.sleep(check_interval)
timeout -= check_interval
# Try again
try:
# We already tried to the get the lock
# If fail_when_locked is true, then stop trying
if fail_when_locked:
raise AlreadyLocked(*exception)
else: # pragma: no cover
# We've got the lock
fh = self._get_lock(fh)
break
except portalocker.LockException:
pass
else:
# We got a timeout... reraising
raise portalocker.LockException(*exception)
# Prepare the filehandle (truncate if needed)
fh = self._prepare_fh(fh)
self.fh = fh
return fh
def _get_fh(self):
'''Get a new filehandle'''
return open(self.filename, self.mode)
def _get_lock(self, fh):
'''
Try to lock the given filehandle
returns LockException if it fails'''
portalocker.lock(fh, LOCK_METHOD)
return fh
def _prepare_fh(self, fh, truncate=None):
'''
Prepare the filehandle for usage
If truncate is a number, the file will be truncated to that amount of
bytes
'''
if truncate is None:
truncate = self.truncate
if truncate is not None:
fh.seek(truncate)
fh.truncate(truncate)
return fh
def __enter__(self):
self.fh = self.acquire()
return self.fh
def __exit__(self, type, value, tb):
if self.fh:
self.fh.close() | 0.636579 | 0.165458 |
import unittest
import pycompiler.util
from pycompiler.util import *
import os.path, sys, StringIO, types
class TestStack(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_stack_push_an_int(self):
stack = pycompiler.util.Stack()
stack.push(1)
self.assertEqual(stack.data, [1])
def test_stack_push_2_ints_and_pop_1_int(self):
stack = pycompiler.util.Stack()
stack.push(1)
stack.push(-256)
self.assertEqual(stack.data, [1, -256])
top = stack.pop()
self.assertEqual(top, -256)
self.assertEqual(stack.data, [1])
def test_push_and_pop_string_and_int(self):
stack = pycompiler.util.Stack()
stack.push('hello world')
stack.push(7)
self.assertEqual(stack.data, ['hello world', 7])
top = stack.pop()
self.assertEqual(top, 7)
self.assertEqual(stack.data, ['hello world'])
def test_pop_empty_stack_throws_AssertionError(self):
stack = pycompiler.util.Stack()
self.assertRaises(AssertionError, stack.pop)
def test_string_representation_of_stack(self):
stack = pycompiler.util.Stack()
stack.data = [1, 2, 'hello']
strval = "%s" % stack
self.assertEqual(strval, "1\t2\thello")
def test_stack_has_top_method(self):
stack = pycompiler.util.Stack()
stack.push(1)
stack.push('wow')
self.assertEqual(stack.top(), 'wow')
def test_top_called_on_empty_stack_throws_AssertionError(self):
stack = pycompiler.util.Stack()
self.assertRaises(AssertionError, stack.top)
def test_push_multiple_elements_in_stack_order(self):
stack = pycompiler.util.Stack()
stack.multipush([1, 2, 3])
self.assertEqual(stack.data, [3, 2, 1])
stack = pycompiler.util.Stack()
stack.multipush(['a', 'b', 'c'])
self.assertEqual(stack.data, ['c', 'b', 'a'])
def test_stack_prints_reverse(self):
stack = pycompiler.util.Stack()
stack.multipush(['a', 'b', 'c'])
self.assertEqual(stack.data, ['c', 'b', 'a'])
strval = stack.print_reverse()
self.assertEqual(strval, "a\tb\tc")
self.assertEqual(stack.data, ['c', 'b', 'a'])
# add ability to specify delimiter other than '\t'
strval = stack.print_reverse(' ')
self.assertEqual(strval, "a b c")
self.assertEqual(stack.data, ['c', 'b', 'a'])
def test_push_then_pop_returns_item_not_copy(self):
stack = pycompiler.util.Stack()
stack2 = pycompiler.util.Stack()
stack2.push('stack2')
stack.push(stack2)
self.assertEqual(stack.data, [stack2])
alias_stack = stack.pop()
self.assertEqual(stack2, alias_stack)
stack3 = pycompiler.util.Stack()
stack3.push('stack2')
self.assertNotEqual(stack3, stack2)
self.assertNotEqual(stack3, alias_stack)
class TestConvertFromString(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_util_convert_from_string_utility(self):
self.assertTrue( isinstance( pycompiler.util.convert_from_str('42'), int) )
self.assertTrue( isinstance( pycompiler.util.convert_from_str('3.1415'), float) )
self.assertTrue( isinstance( pycompiler.util.convert_from_str('howdy'), str) )
## add None
self.assertTrue( isinstance( pycompiler.util.convert_from_str('None'), types.NoneType) )
class TestMyWrite(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.outfile = open('my_write_outfile', 'w')
def tearDown(self):
unittest.TestCase.tearDown(self)
self.outfile.close()
os.remove('my_write_outfile')
def test_write_to(self):
string1 = \
"""Multiline string
that goes to
stdout and outfile"""
string2 = 'only goes to file'
string3 = 'only goes to stdout'
old_stdout = sys.stdout
try:
sys.stdout = StringIO.StringIO()
pycompiler.util.write_to(string1, sys.stdout, self.outfile)
pycompiler.util.write_to(string2, self.outfile)
pycompiler.util.write_to(string3, sys.stdout)
self.outfile.close()
outfile_contents = open('my_write_outfile').read()
stdout_contents = sys.stdout.getvalue()
self.assertNotEqual(outfile_contents.find(string1), -1)
self.assertNotEqual(outfile_contents.find(string2), -1)
self.assertEqual(outfile_contents.find(string3), -1)
self.assertNotEqual(stdout_contents.find(string1), -1)
self.assertNotEqual(stdout_contents.find(string3), -1)
finally:
sys.stdout = old_stdout
def test_file_none_no_problem(self):
# make sure that if an argument == None is passed, it doesn't
# cause an exception
string1 = "only goes to that which exists"
file2 = None
pycompiler.util.write_to(string1, self.outfile, file2)
self.outfile.close()
outfile_contents = open('my_write_outfile').read()
self.assertNotEqual(outfile_contents.find(string1), -1)
class TestMisc(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_equal_w_out_whitespace(self):
string1 = ' hi there dave, what up?'
string2 = '''hi there dave,
what up?'''
string3 = 'hi there dave, what up?'
self.assertEqual(equal_without_whitespace(string1, string2), True)
self.assertEqual(equal_without_whitespace(string1, string3), True)
def test_find_without_whitespace(self):
string1 = ' hi there dave, what up?'
string2 = '''hi there dave,
'''
string3 = 'dave, what up?'
self.assertEqual(find_without_whitespace(string1, string2), True)
self.assertEqual(find_without_whitespace(string1, string3), True)
self.assertEqual(find_without_whitespace(string2, string1), False)
def test_token_stack_str(self):
stack = Stack()
for (name, val) in [('goal',''), ('$id', 'varname'), ('$int', 5), ('t', '')]:
stack.push( {'name': name, 'value': val} )
self.assertEqual(token_stack_str(stack, reverse=True, delim=' '),
't ($int, 5) ($id, varname) goal')
if __name__ == '__main__':
unittest.main() | src/test/test_util.py |
import unittest
import pycompiler.util
from pycompiler.util import *
import os.path, sys, StringIO, types
class TestStack(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_stack_push_an_int(self):
stack = pycompiler.util.Stack()
stack.push(1)
self.assertEqual(stack.data, [1])
def test_stack_push_2_ints_and_pop_1_int(self):
stack = pycompiler.util.Stack()
stack.push(1)
stack.push(-256)
self.assertEqual(stack.data, [1, -256])
top = stack.pop()
self.assertEqual(top, -256)
self.assertEqual(stack.data, [1])
def test_push_and_pop_string_and_int(self):
stack = pycompiler.util.Stack()
stack.push('hello world')
stack.push(7)
self.assertEqual(stack.data, ['hello world', 7])
top = stack.pop()
self.assertEqual(top, 7)
self.assertEqual(stack.data, ['hello world'])
def test_pop_empty_stack_throws_AssertionError(self):
stack = pycompiler.util.Stack()
self.assertRaises(AssertionError, stack.pop)
def test_string_representation_of_stack(self):
stack = pycompiler.util.Stack()
stack.data = [1, 2, 'hello']
strval = "%s" % stack
self.assertEqual(strval, "1\t2\thello")
def test_stack_has_top_method(self):
stack = pycompiler.util.Stack()
stack.push(1)
stack.push('wow')
self.assertEqual(stack.top(), 'wow')
def test_top_called_on_empty_stack_throws_AssertionError(self):
stack = pycompiler.util.Stack()
self.assertRaises(AssertionError, stack.top)
def test_push_multiple_elements_in_stack_order(self):
stack = pycompiler.util.Stack()
stack.multipush([1, 2, 3])
self.assertEqual(stack.data, [3, 2, 1])
stack = pycompiler.util.Stack()
stack.multipush(['a', 'b', 'c'])
self.assertEqual(stack.data, ['c', 'b', 'a'])
def test_stack_prints_reverse(self):
stack = pycompiler.util.Stack()
stack.multipush(['a', 'b', 'c'])
self.assertEqual(stack.data, ['c', 'b', 'a'])
strval = stack.print_reverse()
self.assertEqual(strval, "a\tb\tc")
self.assertEqual(stack.data, ['c', 'b', 'a'])
# add ability to specify delimiter other than '\t'
strval = stack.print_reverse(' ')
self.assertEqual(strval, "a b c")
self.assertEqual(stack.data, ['c', 'b', 'a'])
def test_push_then_pop_returns_item_not_copy(self):
stack = pycompiler.util.Stack()
stack2 = pycompiler.util.Stack()
stack2.push('stack2')
stack.push(stack2)
self.assertEqual(stack.data, [stack2])
alias_stack = stack.pop()
self.assertEqual(stack2, alias_stack)
stack3 = pycompiler.util.Stack()
stack3.push('stack2')
self.assertNotEqual(stack3, stack2)
self.assertNotEqual(stack3, alias_stack)
class TestConvertFromString(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_util_convert_from_string_utility(self):
self.assertTrue( isinstance( pycompiler.util.convert_from_str('42'), int) )
self.assertTrue( isinstance( pycompiler.util.convert_from_str('3.1415'), float) )
self.assertTrue( isinstance( pycompiler.util.convert_from_str('howdy'), str) )
## add None
self.assertTrue( isinstance( pycompiler.util.convert_from_str('None'), types.NoneType) )
class TestMyWrite(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.outfile = open('my_write_outfile', 'w')
def tearDown(self):
unittest.TestCase.tearDown(self)
self.outfile.close()
os.remove('my_write_outfile')
def test_write_to(self):
string1 = \
"""Multiline string
that goes to
stdout and outfile"""
string2 = 'only goes to file'
string3 = 'only goes to stdout'
old_stdout = sys.stdout
try:
sys.stdout = StringIO.StringIO()
pycompiler.util.write_to(string1, sys.stdout, self.outfile)
pycompiler.util.write_to(string2, self.outfile)
pycompiler.util.write_to(string3, sys.stdout)
self.outfile.close()
outfile_contents = open('my_write_outfile').read()
stdout_contents = sys.stdout.getvalue()
self.assertNotEqual(outfile_contents.find(string1), -1)
self.assertNotEqual(outfile_contents.find(string2), -1)
self.assertEqual(outfile_contents.find(string3), -1)
self.assertNotEqual(stdout_contents.find(string1), -1)
self.assertNotEqual(stdout_contents.find(string3), -1)
finally:
sys.stdout = old_stdout
def test_file_none_no_problem(self):
# make sure that if an argument == None is passed, it doesn't
# cause an exception
string1 = "only goes to that which exists"
file2 = None
pycompiler.util.write_to(string1, self.outfile, file2)
self.outfile.close()
outfile_contents = open('my_write_outfile').read()
self.assertNotEqual(outfile_contents.find(string1), -1)
class TestMisc(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_equal_w_out_whitespace(self):
string1 = ' hi there dave, what up?'
string2 = '''hi there dave,
what up?'''
string3 = 'hi there dave, what up?'
self.assertEqual(equal_without_whitespace(string1, string2), True)
self.assertEqual(equal_without_whitespace(string1, string3), True)
def test_find_without_whitespace(self):
string1 = ' hi there dave, what up?'
string2 = '''hi there dave,
'''
string3 = 'dave, what up?'
self.assertEqual(find_without_whitespace(string1, string2), True)
self.assertEqual(find_without_whitespace(string1, string3), True)
self.assertEqual(find_without_whitespace(string2, string1), False)
def test_token_stack_str(self):
stack = Stack()
for (name, val) in [('goal',''), ('$id', 'varname'), ('$int', 5), ('t', '')]:
stack.push( {'name': name, 'value': val} )
self.assertEqual(token_stack_str(stack, reverse=True, delim=' '),
't ($int, 5) ($id, varname) goal')
if __name__ == '__main__':
unittest.main() | 0.348534 | 0.58059 |
from decouple import config
from imapclient import IMAPClient
import mailparser
from bs4 import BeautifulSoup
import pickle
import pandas as pd
import re
import numpy as np
from datetime import datetime
from dateutil import parser
import datefinder
import parsing
class dataExtraction:
"""Class that extracts all previous transactions for the user"""
def __init__(self, email, psswd):
self.email = email
self.passwd = <PASSWORD>
self.card_payment = []
self.pay_anyone = []
self.all_payments = []
self.parsed_data = []
def extract_emails(self, date, folder):
server = IMAPClient('imap-mail.outlook.com', ssl=True)
server.login(self.email, self.passwd)
server.select_folder(folder, readonly=True)
UID1 = server.search(['FROM','<EMAIL>','SINCE', date])
for email in UID1:
rawMSG = server.fetch([email], ['BODY[]'])
msg = mailparser.parse_from_bytes(rawMSG[email][b'BODY[]'])
soup = BeautifulSoup(msg.body, 'html.parser')
eg = soup.get_text().strip()
self.card_payment.append({'subject': msg.headers['Subject'], 'desc': ''.join([text.strip() for text in eg.split('\r')])})
self.card_payment = (pd.DataFrame(self.card_payment)[pd.DataFrame(self.card_payment).subject == 'CARD TRANSACTION ALERT']).to_dict('records')
UID2 = server.search(['FROM','<EMAIL>','SINCE', date])
for email in UID2:
rawMSG = server.fetch([email], ['BODY[]'])
msg = mailparser.parse_from_bytes(rawMSG[email][b'BODY[]'])
soup = BeautifulSoup(msg.body, 'html.parser')
eg = soup.get_text().strip()
self.pay_anyone.append({'subject': msg.headers['Subject'], 'msg': ''.join([text.strip() for text in eg.split('\r')])})
self.card_payment = pd.DataFrame(self.card_payment)
self.card_payment.columns = ['subject', 'msg']
self.pay_anyone = pd.DataFrame(self.pay_anyone)
self.all_payments = pd.concat([self.card_payment, self.pay_anyone])
self.all_payments.index = np.arange(len(self.all_payments))
def parse_key_values(self):
key_data = []
subjects = ['CARD TRANSACTION ALERT', 'Successful NETS Payment', 'You have sent money via OCBC Pay Anyone', 'You have successfully sent money via OCBC Pay Anyone', 'You have sent money to','Successful eNETS payment','You have sent money via PayNow' ,'OCBC Alert: Successful ATM QR Withdrawal', 'OCBC Alert: You have successfully sent money via PayNow', 'We have received your funds transfer request', 'OCBC Alert: Deposit on your account', 'OCBC Alert: Deposit in your account']
# print(set(subjects))
# print(set((np.append(self.all_payments.subject.unique()[~pd.Series(self.all_payments.subject.unique()).str.contains('You have sent money to')],np.array(['You have sent money to'])))))
# print(set(subjects) == set((np.append(self.all_payments.subject.unique()[~pd.Series(self.all_payments.subject.unique()).str.contains('You have sent money to')],np.array(['You have sent money to'])))))
for i in range(len(subjects)):
if(i==4):
exec('type_'+str(i)+'= self.all_payments[self.all_payments.subject.str.findall(subjects['+str(i)+']).str.join(\',\').str.len() !=0]')
else:
exec('type_'+str(i)+'= self.all_payments[self.all_payments.subject == subjects['+str(i)+']]')
exec('key_data.append(parsing.key_extractor_'+str(i)+'(type_'+str(i)+'))')
self.parsed_data = pd.concat(key_data)
self.parsed_data.index = np.arange(len(self.parsed_data))
self.parsed_data.payee = self.parsed_data.payee.str.strip()
# data = dataExtraction(config('EMAIL'), config('PSSWD'))
# data.extract_emails() | extractData.py | from decouple import config
from imapclient import IMAPClient
import mailparser
from bs4 import BeautifulSoup
import pickle
import pandas as pd
import re
import numpy as np
from datetime import datetime
from dateutil import parser
import datefinder
import parsing
class dataExtraction:
"""Class that extracts all previous transactions for the user"""
def __init__(self, email, psswd):
self.email = email
self.passwd = <PASSWORD>
self.card_payment = []
self.pay_anyone = []
self.all_payments = []
self.parsed_data = []
def extract_emails(self, date, folder):
server = IMAPClient('imap-mail.outlook.com', ssl=True)
server.login(self.email, self.passwd)
server.select_folder(folder, readonly=True)
UID1 = server.search(['FROM','<EMAIL>','SINCE', date])
for email in UID1:
rawMSG = server.fetch([email], ['BODY[]'])
msg = mailparser.parse_from_bytes(rawMSG[email][b'BODY[]'])
soup = BeautifulSoup(msg.body, 'html.parser')
eg = soup.get_text().strip()
self.card_payment.append({'subject': msg.headers['Subject'], 'desc': ''.join([text.strip() for text in eg.split('\r')])})
self.card_payment = (pd.DataFrame(self.card_payment)[pd.DataFrame(self.card_payment).subject == 'CARD TRANSACTION ALERT']).to_dict('records')
UID2 = server.search(['FROM','<EMAIL>','SINCE', date])
for email in UID2:
rawMSG = server.fetch([email], ['BODY[]'])
msg = mailparser.parse_from_bytes(rawMSG[email][b'BODY[]'])
soup = BeautifulSoup(msg.body, 'html.parser')
eg = soup.get_text().strip()
self.pay_anyone.append({'subject': msg.headers['Subject'], 'msg': ''.join([text.strip() for text in eg.split('\r')])})
self.card_payment = pd.DataFrame(self.card_payment)
self.card_payment.columns = ['subject', 'msg']
self.pay_anyone = pd.DataFrame(self.pay_anyone)
self.all_payments = pd.concat([self.card_payment, self.pay_anyone])
self.all_payments.index = np.arange(len(self.all_payments))
def parse_key_values(self):
key_data = []
subjects = ['CARD TRANSACTION ALERT', 'Successful NETS Payment', 'You have sent money via OCBC Pay Anyone', 'You have successfully sent money via OCBC Pay Anyone', 'You have sent money to','Successful eNETS payment','You have sent money via PayNow' ,'OCBC Alert: Successful ATM QR Withdrawal', 'OCBC Alert: You have successfully sent money via PayNow', 'We have received your funds transfer request', 'OCBC Alert: Deposit on your account', 'OCBC Alert: Deposit in your account']
# print(set(subjects))
# print(set((np.append(self.all_payments.subject.unique()[~pd.Series(self.all_payments.subject.unique()).str.contains('You have sent money to')],np.array(['You have sent money to'])))))
# print(set(subjects) == set((np.append(self.all_payments.subject.unique()[~pd.Series(self.all_payments.subject.unique()).str.contains('You have sent money to')],np.array(['You have sent money to'])))))
for i in range(len(subjects)):
if(i==4):
exec('type_'+str(i)+'= self.all_payments[self.all_payments.subject.str.findall(subjects['+str(i)+']).str.join(\',\').str.len() !=0]')
else:
exec('type_'+str(i)+'= self.all_payments[self.all_payments.subject == subjects['+str(i)+']]')
exec('key_data.append(parsing.key_extractor_'+str(i)+'(type_'+str(i)+'))')
self.parsed_data = pd.concat(key_data)
self.parsed_data.index = np.arange(len(self.parsed_data))
self.parsed_data.payee = self.parsed_data.payee.str.strip()
# data = dataExtraction(config('EMAIL'), config('PSSWD'))
# data.extract_emails() | 0.144903 | 0.065276 |
#
# # Intro
# In this tutorial we will execute a `dvmdostem` simulation from a Jupyter Notebook (IPython Notebook). The tutorial assumes:
# * You have built all the `dvmdostem` Docker images (see the comments and directions in the [Dockerfile](https://github.com/ua-snap/dvm-dos-tem/blob/master/Dockerfile) and [Dockerfile-mapping-support](https://github.com/ua-snap/dvm-dos-tem/blob/master/Dockerfile-mapping-support).
# * You can launch the stack using `docker compose` (see directions and examples in the [docker-compose.yml](https://github.com/ua-snap/dvm-dos-tem/blob/master/docker-compose.yml) file).
# * You have the volumes setup as specified in the project's `docker-compose.yml` file and you can access the files both from your host computer and from within the docker container(s).
# * You have installed the Jupyter package and can run jupyter notebooks, specifically an `IPython` notebook.
#
# > Note about packages and environments: the demo here shows a mixture of running dvm-dos-tem's supporting Python scripts "natively" (i.e. on your host computer, not in a Docker container) and running dvm-dos-tem's supporting Python scripts inside the Docker container. For the most consistent Python environment, it is best to run everyting through (inside) the Docker containers. However sometimes the extra typing required is onerous (`docker compose exec dvmdostem-run ...`), and there is a little extra overhead involved in interacting with the containers. So if you have the appropriate packages installed on your host machine you can run some of the scripts on your host as it shown here. This takes a little care to keep the paths straight for each command, i.e. whether you are referncing the path inside the guest/container, or on your host machine.
#
# > Note that while `dmvdostem`'s supporting scripts are largely written in Python, for the most part there is not an exposed Python API. Instead the scripts are generally provided with a command line interface. So much of this tutorial, while running inside an IPython notebook, could easily be run directly from your shell.
#
# For convenience we will add the `dvm-dos-tem` repo directory to our `PATH` on the host **before** launching the notebook (this should already be done for you inside the Docker container):
#
# ```bash
# $ cd ~/path/to/your/dvm-dos-tem
# $ export PATH=$(PWD)/scripts:$(PWD):$PATH
# ```
#
# Now start the notebook server:
#
# ```bash
# $ jupyter notebook
# ```
#
# and open this notebook in a browser tab.
#
# **Note that when you open the notebook, the current working directory of the notebook will be the dvm-dos-tem scripts directory. In order for the docker commands to work correctly, you must change your working directory for this notebook to the main dvm-dos-tem repo directory, which contains the Dockerfile(s).** You can do this a variety of ways, but the easiest is the builtin IPython magic, `%cd </path/to/your/dvm-dos-tem>`. There is also the useful `%pwd` function to see where you are.
#
#
# # Setup
#
# As noted in [this wiki page](https://github.com/ua-snap/dvm-dos-tem/wiki/How-To:-Run-dvmdostem-and-plot-output-via-Docker) there are 6 general steps for any modeling task:
#
# 1. Decide where on your computer you want to store your model run(s).
# 2. Decide what spatial (geographic) area you want to run.
# 3. Decide what variables you want to have output
# 4. Decide on all other run settings/parameters:
# * Which stages to run and for how many years.
# * Is the community type (CMT) fixed or driven by input vegetation.nc map?
# * For which stages should the output files be generated and saved?
# * Calibration settings if necessary (`--cal-mode`).
# * Any other command line options or environment settings.
# 5. Launch the run
# 6. Make plots or other analysis.
#
# ## Setup data location and geographic location.
#
# Start by setting up a directory for this example run. There are several ways you could do this. For this example we will run the script inside the docker container. Hence the paths will be from the point of view of inside the docker container.
# In[8]:
# Cleanup:
get_ipython().system('docker compose exec dvmdostem-run rm -r /data/workflows/testcase_0001')
# In[9]:
get_ipython().system('docker compose exec dvmdostem-run scripts/setup_working_directory.py --input-data-path /data/input-catalog/cru-ts40_ar5_rcp85_ncar-ccsm4_TOOLIK_FIELD_STATION_10x10/ /data/workflows/testcase_0001')
# Now note that if you investigate **from your host** (i.e. not inside the docker container) you can see the new directory you just created (in my case, I keep the workflows up two directories from my dvm-dos-tem repo; your paths might be different):
# In[10]:
get_ipython().system('ls ../dvmdostem-workflows/testcase_0001/')
# Notice that you can see the same folders and files by running `ls` from within the docker container. Notice the paths are different because you are referencing the locations **inside** the container as oppsed to from your host computer:
# In[11]:
get_ipython().system('docker compose exec dvmdostem-run ls /data/workflows/testcase_0001')
# ## Adjust spatial mask
# Now adjust your run mask so as to only run one pixel:
# In[12]:
get_ipython().system('docker compose exec dvmdostem-run runmask-util.py --reset --yx 0 0 --show /data/workflows/testcase_0001/run-mask.nc')
# ## Choose output variables
# OK, now its time to turn on some outputs. In this case, lets keep it simple and just turn on monthly GPP for PFTs. The output from this command needs a wider display for easier reading. First check on what is already on:
# In[13]:
get_ipython().system('docker compose exec dvmdostem-run outspec_utils.py -s /data/workflows/testcase_0001/config/output_spec.csv')
# This is super annoying because it needs a wider screen to display this table nicely. But we can use `pandas` to display nicely in this notebook.
# In[14]:
import pandas as pd
outspec = pd.read_csv('../dvmdostem-workflows/testcase_0001/config/output_spec.csv')
outspec.head(15)
# Now use the utility helper script to change the file:
# In[15]:
get_ipython().system('docker compose exec dvmdostem-run outspec_utils.py /data/workflows/testcase_0001/config/output_spec.csv --on GPP p m')
get_ipython().system('docker compose exec dvmdostem-run outspec_utils.py /data/workflows/testcase_0001/config/output_spec.csv --on CMTNUM y')
# ## Adjust other settings
#
# Now modify the config file to turn on equilibrium stage outputs. Again there are about 100 ways to do this (on the host, in the container, with a text editor, programatically, etc). For these purposes, we will work from the host, mostly so we can record (and run) the Python snippet here in the notebook:
# In[16]:
import json
CONFIG_FILE = '../dvmdostem-workflows/testcase_0001/config/config.js'
# Read the existing data into memory
with open(CONFIG_FILE, 'r') as f:
config = json.load(f)
# Modify it
config['IO']['output_nc_eq'] = 1
# Write it back..
with open(CONFIG_FILE, 'w') as f:
json.dump(config, f, indent=2)
# Note you can check on the file with `!cat {CONFIG_FILE}`.
# # RUN
#
# Now finally we will launch our `dvmdostem` run. For these purposes, we are just doing a 200 year equlibrium run. We are going to force this to run as CMT 4 (Tussock Tundra).
# In[17]:
get_ipython().system('docker compose exec --workdir /data/workflows/testcase_0001 dvmdostem-run dvmdostem -p 50 -e 200 -s 0 -t 0 -n 0 -l err --force-cmt 4')
# Thats it! If we look in the output directory, we expect to see one output file for the equlibrium stage, GPP:
# In[18]:
get_ipython().system('docker compose exec dvmdostem-run ls /data/workflows/testcase_0001/output')
# In[19]:
get_ipython().system('ncdump ../dvmdostem-workflows/testcase_0001/output/run_status.nc')
# # Plotting
#
# Again, this can be done a few ways (host or guest). For these purposes, we will run on the host.
# In[20]:
import netCDF4 as nc
import scripts.output_utils as ou
import matplotlib.pyplot as plt
# The output_utils script has a bunch of unfinished plotting tools in it and some useful funcitons for summarizing outputs (i.e. summing PFTs or converting monthly outputs to yearly).
#
# First just see what output files we have to work with:
# In[21]:
get_ipython().system('ls ../dvmdostem-workflows/testcase_0001/output/')
# ### Summarize to yearly, plot first 5 PFTs
# In[22]:
ds = nc.Dataset("../dvmdostem-workflows/testcase_0001/output/GPP_monthly_eq.nc")
gpp = ds.variables['GPP'][:]
yrly_gpp = ou.sum_monthly_flux_to_yearly(gpp)
for pft in range(0,5):
plt.plot(yrly_gpp[:,pft,0,0], label='pft{}'.format(pft))
plt.xlabel('run year')
plt.ylabel('{} ({})'.format('GPP', 'g/m2/year'))
_ = plt.legend()
# ### Summarize to yearly, sum PFTs
# In[23]:
plt.plot(ou.sum_across_pfts(yrly_gpp)[:,0,0], label='all pfts')
plt.xlabel('run year')
_ = plt.ylabel('GPP (g/m2/year)')
# ### Plot the last 10 years at monthly resolution, first 5 PFTs
# In[24]:
import netCDF4 as nc
ds = nc.Dataset("../dvmdostem-workflows/testcase_0001/output/GPP_monthly_eq.nc")
gpp = ds.variables['GPP'][:]
for pft in range(0,5):
plt.plot(gpp[-120:,pft,0,0], label='pft{}'.format(pft))
plt.xlabel('month')
plt.ylabel('GPP ({})'.format(ds.variables['GPP'].units))
_ = plt.legend()
# ### Unfinished output_utils.py plot function
# In[25]:
ou.plot_basic_timeseries(['GPP'],0,0,'monthly',['eq'],'../dvmdostem-workflows/testcase_0001/output/')
# In[ ]: | scripts/docker-ipython-notebook-run.py |
#
# # Intro
# In this tutorial we will execute a `dvmdostem` simulation from a Jupyter Notebook (IPython Notebook). The tutorial assumes:
# * You have built all the `dvmdostem` Docker images (see the comments and directions in the [Dockerfile](https://github.com/ua-snap/dvm-dos-tem/blob/master/Dockerfile) and [Dockerfile-mapping-support](https://github.com/ua-snap/dvm-dos-tem/blob/master/Dockerfile-mapping-support).
# * You can launch the stack using `docker compose` (see directions and examples in the [docker-compose.yml](https://github.com/ua-snap/dvm-dos-tem/blob/master/docker-compose.yml) file).
# * You have the volumes setup as specified in the project's `docker-compose.yml` file and you can access the files both from your host computer and from within the docker container(s).
# * You have installed the Jupyter package and can run jupyter notebooks, specifically an `IPython` notebook.
#
# > Note about packages and environments: the demo here shows a mixture of running dvm-dos-tem's supporting Python scripts "natively" (i.e. on your host computer, not in a Docker container) and running dvm-dos-tem's supporting Python scripts inside the Docker container. For the most consistent Python environment, it is best to run everyting through (inside) the Docker containers. However sometimes the extra typing required is onerous (`docker compose exec dvmdostem-run ...`), and there is a little extra overhead involved in interacting with the containers. So if you have the appropriate packages installed on your host machine you can run some of the scripts on your host as it shown here. This takes a little care to keep the paths straight for each command, i.e. whether you are referncing the path inside the guest/container, or on your host machine.
#
# > Note that while `dmvdostem`'s supporting scripts are largely written in Python, for the most part there is not an exposed Python API. Instead the scripts are generally provided with a command line interface. So much of this tutorial, while running inside an IPython notebook, could easily be run directly from your shell.
#
# For convenience we will add the `dvm-dos-tem` repo directory to our `PATH` on the host **before** launching the notebook (this should already be done for you inside the Docker container):
#
# ```bash
# $ cd ~/path/to/your/dvm-dos-tem
# $ export PATH=$(PWD)/scripts:$(PWD):$PATH
# ```
#
# Now start the notebook server:
#
# ```bash
# $ jupyter notebook
# ```
#
# and open this notebook in a browser tab.
#
# **Note that when you open the notebook, the current working directory of the notebook will be the dvm-dos-tem scripts directory. In order for the docker commands to work correctly, you must change your working directory for this notebook to the main dvm-dos-tem repo directory, which contains the Dockerfile(s).** You can do this a variety of ways, but the easiest is the builtin IPython magic, `%cd </path/to/your/dvm-dos-tem>`. There is also the useful `%pwd` function to see where you are.
#
#
# # Setup
#
# As noted in [this wiki page](https://github.com/ua-snap/dvm-dos-tem/wiki/How-To:-Run-dvmdostem-and-plot-output-via-Docker) there are 6 general steps for any modeling task:
#
# 1. Decide where on your computer you want to store your model run(s).
# 2. Decide what spatial (geographic) area you want to run.
# 3. Decide what variables you want to have output
# 4. Decide on all other run settings/parameters:
# * Which stages to run and for how many years.
# * Is the community type (CMT) fixed or driven by input vegetation.nc map?
# * For which stages should the output files be generated and saved?
# * Calibration settings if necessary (`--cal-mode`).
# * Any other command line options or environment settings.
# 5. Launch the run
# 6. Make plots or other analysis.
#
# ## Setup data location and geographic location.
#
# Start by setting up a directory for this example run. There are several ways you could do this. For this example we will run the script inside the docker container. Hence the paths will be from the point of view of inside the docker container.
# In[8]:
# Cleanup:
get_ipython().system('docker compose exec dvmdostem-run rm -r /data/workflows/testcase_0001')
# In[9]:
get_ipython().system('docker compose exec dvmdostem-run scripts/setup_working_directory.py --input-data-path /data/input-catalog/cru-ts40_ar5_rcp85_ncar-ccsm4_TOOLIK_FIELD_STATION_10x10/ /data/workflows/testcase_0001')
# Now note that if you investigate **from your host** (i.e. not inside the docker container) you can see the new directory you just created (in my case, I keep the workflows up two directories from my dvm-dos-tem repo; your paths might be different):
# In[10]:
get_ipython().system('ls ../dvmdostem-workflows/testcase_0001/')
# Notice that you can see the same folders and files by running `ls` from within the docker container. Notice the paths are different because you are referencing the locations **inside** the container as oppsed to from your host computer:
# In[11]:
get_ipython().system('docker compose exec dvmdostem-run ls /data/workflows/testcase_0001')
# ## Adjust spatial mask
# Now adjust your run mask so as to only run one pixel:
# In[12]:
get_ipython().system('docker compose exec dvmdostem-run runmask-util.py --reset --yx 0 0 --show /data/workflows/testcase_0001/run-mask.nc')
# ## Choose output variables
# OK, now its time to turn on some outputs. In this case, lets keep it simple and just turn on monthly GPP for PFTs. The output from this command needs a wider display for easier reading. First check on what is already on:
# In[13]:
get_ipython().system('docker compose exec dvmdostem-run outspec_utils.py -s /data/workflows/testcase_0001/config/output_spec.csv')
# This is super annoying because it needs a wider screen to display this table nicely. But we can use `pandas` to display nicely in this notebook.
# In[14]:
import pandas as pd
outspec = pd.read_csv('../dvmdostem-workflows/testcase_0001/config/output_spec.csv')
outspec.head(15)
# Now use the utility helper script to change the file:
# In[15]:
get_ipython().system('docker compose exec dvmdostem-run outspec_utils.py /data/workflows/testcase_0001/config/output_spec.csv --on GPP p m')
get_ipython().system('docker compose exec dvmdostem-run outspec_utils.py /data/workflows/testcase_0001/config/output_spec.csv --on CMTNUM y')
# ## Adjust other settings
#
# Now modify the config file to turn on equilibrium stage outputs. Again there are about 100 ways to do this (on the host, in the container, with a text editor, programatically, etc). For these purposes, we will work from the host, mostly so we can record (and run) the Python snippet here in the notebook:
# In[16]:
import json
CONFIG_FILE = '../dvmdostem-workflows/testcase_0001/config/config.js'
# Read the existing data into memory
with open(CONFIG_FILE, 'r') as f:
config = json.load(f)
# Modify it
config['IO']['output_nc_eq'] = 1
# Write it back..
with open(CONFIG_FILE, 'w') as f:
json.dump(config, f, indent=2)
# Note you can check on the file with `!cat {CONFIG_FILE}`.
# # RUN
#
# Now finally we will launch our `dvmdostem` run. For these purposes, we are just doing a 200 year equlibrium run. We are going to force this to run as CMT 4 (Tussock Tundra).
# In[17]:
get_ipython().system('docker compose exec --workdir /data/workflows/testcase_0001 dvmdostem-run dvmdostem -p 50 -e 200 -s 0 -t 0 -n 0 -l err --force-cmt 4')
# Thats it! If we look in the output directory, we expect to see one output file for the equlibrium stage, GPP:
# In[18]:
get_ipython().system('docker compose exec dvmdostem-run ls /data/workflows/testcase_0001/output')
# In[19]:
get_ipython().system('ncdump ../dvmdostem-workflows/testcase_0001/output/run_status.nc')
# # Plotting
#
# Again, this can be done a few ways (host or guest). For these purposes, we will run on the host.
# In[20]:
import netCDF4 as nc
import scripts.output_utils as ou
import matplotlib.pyplot as plt
# The output_utils script has a bunch of unfinished plotting tools in it and some useful funcitons for summarizing outputs (i.e. summing PFTs or converting monthly outputs to yearly).
#
# First just see what output files we have to work with:
# In[21]:
get_ipython().system('ls ../dvmdostem-workflows/testcase_0001/output/')
# ### Summarize to yearly, plot first 5 PFTs
# In[22]:
ds = nc.Dataset("../dvmdostem-workflows/testcase_0001/output/GPP_monthly_eq.nc")
gpp = ds.variables['GPP'][:]
yrly_gpp = ou.sum_monthly_flux_to_yearly(gpp)
for pft in range(0,5):
plt.plot(yrly_gpp[:,pft,0,0], label='pft{}'.format(pft))
plt.xlabel('run year')
plt.ylabel('{} ({})'.format('GPP', 'g/m2/year'))
_ = plt.legend()
# ### Summarize to yearly, sum PFTs
# In[23]:
plt.plot(ou.sum_across_pfts(yrly_gpp)[:,0,0], label='all pfts')
plt.xlabel('run year')
_ = plt.ylabel('GPP (g/m2/year)')
# ### Plot the last 10 years at monthly resolution, first 5 PFTs
# In[24]:
import netCDF4 as nc
ds = nc.Dataset("../dvmdostem-workflows/testcase_0001/output/GPP_monthly_eq.nc")
gpp = ds.variables['GPP'][:]
for pft in range(0,5):
plt.plot(gpp[-120:,pft,0,0], label='pft{}'.format(pft))
plt.xlabel('month')
plt.ylabel('GPP ({})'.format(ds.variables['GPP'].units))
_ = plt.legend()
# ### Unfinished output_utils.py plot function
# In[25]:
ou.plot_basic_timeseries(['GPP'],0,0,'monthly',['eq'],'../dvmdostem-workflows/testcase_0001/output/')
# In[ ]: | 0.800146 | 0.777088 |
import os, sys
import joblib
from copy import deepcopy
from typing import Optional, Dict, Tuple, List
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import wfdb
import torch
from torch import nn
from scipy.signal import resample, resample_poly
from easydict import EasyDict as ED
from get_12ECG_features import get_12ECG_features
from models.special_detectors import special_detectors
from models.ecg_crnn import ECG_CRNN
from model_configs.ecg_crnn import ECG_CRNN_CONFIG
from utils.misc import (
rdheader,
ensure_lead_fmt, ensure_siglen,
)
from utils.utils_nn import extend_predictions
from utils.utils_signal import butter_bandpass_filter
from utils.misc import dict_to_str
from cfg import ModelCfg, TrainCfg
if ModelCfg.torch_dtype.lower() == "double":
torch.set_default_tensor_type(torch.DoubleTensor)
__all__ = [
"load_12ECG_model",
"run_12ECG_classifier",
]
def run_12ECG_classifier(data:np.ndarray, header_data:List[str], loaded_model:Dict[str, nn.Module], verbose:int=0) -> Tuple[List[int], List[float], List[str]]:
""" finished, checked,
Parameters:
-----------
data: ndarray,
header_data: list of str,
lines read from header file
loaded_model: dict,
models loaded for making predictions (except for classes treated by special detectors)
verbose: int, default 0,
Returns:
--------
current_label: list,
binary prediction
current_score:
scalar prediction
classes:
prediction classes, with ordering in accordance with `current_label` and `current_score`
"""
dtype = np.float32 if ModelCfg.torch_dtype == "float" else np.float64
_header_data = [l if l.endswith("\n") else l+"\n" for l in header_data]
header = rdheader(_header_data)
raw_data = ensure_lead_fmt(data.copy(), fmt="lead_first")
baseline = np.array(header.baseline).reshape(raw_data.shape[0], -1)
adc_gain = np.array(header.adc_gain).reshape(raw_data.shape[0], -1)
raw_data = (raw_data - baseline) / adc_gain
freq = header.fs
if freq != ModelCfg.fs:
raw_data = resample_poly(raw_data, ModelCfg.fs, freq, axis=1)
freq = ModelCfg.fs
final_scores, final_conclusions = [], []
partial_conclusion = special_detectors(raw_data.copy(), freq, sig_fmt="lead_first")
is_brady = partial_conclusion.is_brady
is_tachy = partial_conclusion.is_tachy
is_LAD = partial_conclusion.is_LAD
is_RAD = partial_conclusion.is_RAD
is_PR = partial_conclusion.is_PR
is_LQRSV = partial_conclusion.is_LQRSV
if verbose >= 1:
print(f"results from special detectors: {dict_to_str(partial_conclusion)}")
tmp = np.zeros(shape=(len(ModelCfg.full_classes,)))
tmp[ModelCfg.full_classes.index('Brady')] = int(is_brady)
tmp[ModelCfg.full_classes.index('LAD')] = int(is_LAD)
tmp[ModelCfg.full_classes.index('RAD')] = int(is_RAD)
tmp[ModelCfg.full_classes.index('PR')] = int(is_PR)
tmp[ModelCfg.full_classes.index('LQRSV')] = int(is_LQRSV)
partial_conclusion = tmp
final_scores.append(partial_conclusion)
final_conclusions.append(partial_conclusion)
# DL part
dl_data = raw_data.copy()
if TrainCfg.bandpass is not None:
# bandpass
dl_data = butter_bandpass_filter(
dl_data,
lowcut=TrainCfg.bandpass[0],
highcut=TrainCfg.bandpass[1],
order=TrainCfg.bandpass_order,
fs=TrainCfg.fs,
)
if dl_data.shape[1] >= ModelCfg.dl_siglen:
dl_data = ensure_siglen(dl_data, siglen=ModelCfg.dl_siglen, fmt="lead_first")
if TrainCfg.normalize_data:
# normalize
dl_data = ((dl_data - np.mean(dl_data)) / np.std(dl_data)).astype(dtype)
else:
if TrainCfg.normalize_data:
# normalize
dl_data = ((dl_data - np.mean(dl_data)) / np.std(dl_data)).astype(dtype)
dl_data = ensure_siglen(dl_data, siglen=ModelCfg.dl_siglen, fmt="lead_first")
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# unsqueeze to add a batch dimention
dl_data = (torch.from_numpy(dl_data)).unsqueeze(0).to(device=device)
dl_scores = []
for subset, model in loaded_model.items():
model.eval()
subset_scores, subset_bin = model.inference(dl_data)
if verbose >= 1:
print(f"for tranches `{subset}`")
print(f"subset_scores = {subset_scores}")
print(f"subset_bin = {subset_bin}")
if subset in ModelCfg.tranche_classes.keys():
subset_scores = extend_predictions(
subset_scores,
ModelCfg.tranche_classes[subset],
ModelCfg.dl_classes,
)
subset_scores = subset_scores[0] # remove the batch dimension
dl_scores.append(subset_scores)
if "NSR" in ModelCfg.dl_classes:
dl_nsr_cid = ModelCfg.dl_classes.index("NSR")
elif "426783006" in ModelCfg.dl_classes:
dl_nsr_cid = ModelCfg.dl_classes.index("426783006")
else:
dl_nsr_cid = None
# TODO: make a classifier using the scores from the 4 different dl models
dl_scores = np.max(np.array(dl_scores), axis=0)
dl_conclusions = (dl_scores >= ModelCfg.bin_pred_thr).astype(int)
# treat exceptional cases
max_prob = dl_scores.max()
if max_prob < ModelCfg.bin_pred_nsr_thr and dl_nsr_cid is not None:
dl_conclusions[row_idx, dl_nsr_cid] = 1
elif dl_conclusions.sum() == 0:
dl_conclusions = ((dl_scores+ModelCfg.bin_pred_look_again_tol) >= max_prob)
dl_conclusions = (dl_conclusions & (dl_scores >= ModelCfg.bin_pred_nsr_thr))
dl_conclusions = dl_conclusions.astype(int)
dl_scores = extend_predictions(
dl_scores,
ModelCfg.dl_classes,
ModelCfg.full_classes,
)
dl_conclusions = extend_predictions(
dl_conclusions,
ModelCfg.dl_classes,
ModelCfg.full_classes,
)
final_scores.append(dl_scores)
final_conclusions.append(dl_conclusions)
final_scores = np.max(final_scores, axis=0)
final_conclusions = np.max(final_conclusions, axis=0)
current_label = final_conclusions
current_score = final_scores
classes = ModelCfg.full_classes
return current_label, current_score, classes
def load_12ECG_model(input_directory:Optional[str]=None):
"""
"""
loaded_model = ED()
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
for k in ["AB", "E", "F"]:
model_config = deepcopy(ECG_CRNN_CONFIG)
model_config.cnn.name = ModelCfg.cnn_name
model_config.rnn.name = ModelCfg.rnn_name
classes = ModelCfg.tranche_classes[k]
loaded_model[k] = ECG_CRNN(
classes=classes,
config=model_config,
)
model_weight_path = ModelCfg.tranche_model[k]
loaded_model[k].load_state_dict(torch.load(model_weight_path, map_location=device))
loaded_model[k].eval()
loaded_model["all"] = ECG_CRNN(
classes=ModelCfg.dl_classes,
config=deepcopy(ECG_CRNN_CONFIG),
)
loaded_model["all"].load_state_dict(torch.load(ModelCfg.tranche_model["all"], map_location=device))
loaded_model["all"].eval()
return loaded_model | official_phase_legacy/run_12ECG_classifier.py |
import os, sys
import joblib
from copy import deepcopy
from typing import Optional, Dict, Tuple, List
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import wfdb
import torch
from torch import nn
from scipy.signal import resample, resample_poly
from easydict import EasyDict as ED
from get_12ECG_features import get_12ECG_features
from models.special_detectors import special_detectors
from models.ecg_crnn import ECG_CRNN
from model_configs.ecg_crnn import ECG_CRNN_CONFIG
from utils.misc import (
rdheader,
ensure_lead_fmt, ensure_siglen,
)
from utils.utils_nn import extend_predictions
from utils.utils_signal import butter_bandpass_filter
from utils.misc import dict_to_str
from cfg import ModelCfg, TrainCfg
if ModelCfg.torch_dtype.lower() == "double":
torch.set_default_tensor_type(torch.DoubleTensor)
__all__ = [
"load_12ECG_model",
"run_12ECG_classifier",
]
def run_12ECG_classifier(data:np.ndarray, header_data:List[str], loaded_model:Dict[str, nn.Module], verbose:int=0) -> Tuple[List[int], List[float], List[str]]:
""" finished, checked,
Parameters:
-----------
data: ndarray,
header_data: list of str,
lines read from header file
loaded_model: dict,
models loaded for making predictions (except for classes treated by special detectors)
verbose: int, default 0,
Returns:
--------
current_label: list,
binary prediction
current_score:
scalar prediction
classes:
prediction classes, with ordering in accordance with `current_label` and `current_score`
"""
dtype = np.float32 if ModelCfg.torch_dtype == "float" else np.float64
_header_data = [l if l.endswith("\n") else l+"\n" for l in header_data]
header = rdheader(_header_data)
raw_data = ensure_lead_fmt(data.copy(), fmt="lead_first")
baseline = np.array(header.baseline).reshape(raw_data.shape[0], -1)
adc_gain = np.array(header.adc_gain).reshape(raw_data.shape[0], -1)
raw_data = (raw_data - baseline) / adc_gain
freq = header.fs
if freq != ModelCfg.fs:
raw_data = resample_poly(raw_data, ModelCfg.fs, freq, axis=1)
freq = ModelCfg.fs
final_scores, final_conclusions = [], []
partial_conclusion = special_detectors(raw_data.copy(), freq, sig_fmt="lead_first")
is_brady = partial_conclusion.is_brady
is_tachy = partial_conclusion.is_tachy
is_LAD = partial_conclusion.is_LAD
is_RAD = partial_conclusion.is_RAD
is_PR = partial_conclusion.is_PR
is_LQRSV = partial_conclusion.is_LQRSV
if verbose >= 1:
print(f"results from special detectors: {dict_to_str(partial_conclusion)}")
tmp = np.zeros(shape=(len(ModelCfg.full_classes,)))
tmp[ModelCfg.full_classes.index('Brady')] = int(is_brady)
tmp[ModelCfg.full_classes.index('LAD')] = int(is_LAD)
tmp[ModelCfg.full_classes.index('RAD')] = int(is_RAD)
tmp[ModelCfg.full_classes.index('PR')] = int(is_PR)
tmp[ModelCfg.full_classes.index('LQRSV')] = int(is_LQRSV)
partial_conclusion = tmp
final_scores.append(partial_conclusion)
final_conclusions.append(partial_conclusion)
# DL part
dl_data = raw_data.copy()
if TrainCfg.bandpass is not None:
# bandpass
dl_data = butter_bandpass_filter(
dl_data,
lowcut=TrainCfg.bandpass[0],
highcut=TrainCfg.bandpass[1],
order=TrainCfg.bandpass_order,
fs=TrainCfg.fs,
)
if dl_data.shape[1] >= ModelCfg.dl_siglen:
dl_data = ensure_siglen(dl_data, siglen=ModelCfg.dl_siglen, fmt="lead_first")
if TrainCfg.normalize_data:
# normalize
dl_data = ((dl_data - np.mean(dl_data)) / np.std(dl_data)).astype(dtype)
else:
if TrainCfg.normalize_data:
# normalize
dl_data = ((dl_data - np.mean(dl_data)) / np.std(dl_data)).astype(dtype)
dl_data = ensure_siglen(dl_data, siglen=ModelCfg.dl_siglen, fmt="lead_first")
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# unsqueeze to add a batch dimention
dl_data = (torch.from_numpy(dl_data)).unsqueeze(0).to(device=device)
dl_scores = []
for subset, model in loaded_model.items():
model.eval()
subset_scores, subset_bin = model.inference(dl_data)
if verbose >= 1:
print(f"for tranches `{subset}`")
print(f"subset_scores = {subset_scores}")
print(f"subset_bin = {subset_bin}")
if subset in ModelCfg.tranche_classes.keys():
subset_scores = extend_predictions(
subset_scores,
ModelCfg.tranche_classes[subset],
ModelCfg.dl_classes,
)
subset_scores = subset_scores[0] # remove the batch dimension
dl_scores.append(subset_scores)
if "NSR" in ModelCfg.dl_classes:
dl_nsr_cid = ModelCfg.dl_classes.index("NSR")
elif "426783006" in ModelCfg.dl_classes:
dl_nsr_cid = ModelCfg.dl_classes.index("426783006")
else:
dl_nsr_cid = None
# TODO: make a classifier using the scores from the 4 different dl models
dl_scores = np.max(np.array(dl_scores), axis=0)
dl_conclusions = (dl_scores >= ModelCfg.bin_pred_thr).astype(int)
# treat exceptional cases
max_prob = dl_scores.max()
if max_prob < ModelCfg.bin_pred_nsr_thr and dl_nsr_cid is not None:
dl_conclusions[row_idx, dl_nsr_cid] = 1
elif dl_conclusions.sum() == 0:
dl_conclusions = ((dl_scores+ModelCfg.bin_pred_look_again_tol) >= max_prob)
dl_conclusions = (dl_conclusions & (dl_scores >= ModelCfg.bin_pred_nsr_thr))
dl_conclusions = dl_conclusions.astype(int)
dl_scores = extend_predictions(
dl_scores,
ModelCfg.dl_classes,
ModelCfg.full_classes,
)
dl_conclusions = extend_predictions(
dl_conclusions,
ModelCfg.dl_classes,
ModelCfg.full_classes,
)
final_scores.append(dl_scores)
final_conclusions.append(dl_conclusions)
final_scores = np.max(final_scores, axis=0)
final_conclusions = np.max(final_conclusions, axis=0)
current_label = final_conclusions
current_score = final_scores
classes = ModelCfg.full_classes
return current_label, current_score, classes
def load_12ECG_model(input_directory:Optional[str]=None):
"""
"""
loaded_model = ED()
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
for k in ["AB", "E", "F"]:
model_config = deepcopy(ECG_CRNN_CONFIG)
model_config.cnn.name = ModelCfg.cnn_name
model_config.rnn.name = ModelCfg.rnn_name
classes = ModelCfg.tranche_classes[k]
loaded_model[k] = ECG_CRNN(
classes=classes,
config=model_config,
)
model_weight_path = ModelCfg.tranche_model[k]
loaded_model[k].load_state_dict(torch.load(model_weight_path, map_location=device))
loaded_model[k].eval()
loaded_model["all"] = ECG_CRNN(
classes=ModelCfg.dl_classes,
config=deepcopy(ECG_CRNN_CONFIG),
)
loaded_model["all"].load_state_dict(torch.load(ModelCfg.tranche_model["all"], map_location=device))
loaded_model["all"].eval()
return loaded_model | 0.6137 | 0.266378 |
import asyncio
from cleo import Command
from cleo.helpers import option
from netaudio.dante.browser import DanteBrowser
class SubscriptionAddCommand(Command):
name = "add"
description = "Add a subscription"
options = [
option("rx-channel-name", None, "Specify Rx channel by name", flag=False),
option("rx-channel-number", None, "Specify Rx channel by number", flag=False),
option("rx-device-host", None, "Specify Tx device by host", flag=False),
option("rx-device-name", None, "Specify Tx device by name", flag=False),
option("tx-channel-name", None, "Specify Tx channel by name", flag=False),
option("tx-channel-number", None, "Specify Tx channel by number", flag=False),
option("tx-device-host", None, "Specify Tx device by host", flag=False),
option("tx-device-name", None, "Specify Tx device by name", flag=False),
]
async def subscription_add(self):
dante_browser = DanteBrowser(mdns_timeout=1.5)
dante_devices = await dante_browser.get_devices()
for _, device in dante_devices.items():
await device.get_controls()
rx_channel = None
rx_device = None
tx_channel = None
tx_device = None
if self.option("tx-device-name"):
tx_device = next(
filter(
lambda d: d[1].name == self.option("tx-device-name"),
dante_devices.items(),
)
)[1]
elif self.option("tx-device-host"):
tx_device = next(
filter(
lambda d: d[1].ipv4 == self.option("tx-device-host"),
dante_devices.items(),
)
)[1]
if self.option("tx-channel-name"):
tx_channel = next(
filter(
lambda c: self.option("tx-channel-name") == c[1].friendly_name
or self.option("tx-channel-name") == c[1].name
and not c[1].friendly_name,
tx_device.tx_channels.items(),
)
)[1]
elif self.option("tx-channel-number"):
tx_channel = next(
filter(
lambda c: c[1].number == self.option("tx-channel-number"),
tx_device.tx_channels.items(),
)
)[1]
if self.option("rx-device-name"):
rx_device = next(
filter(
lambda d: d[1].name == self.option("rx-device-name"),
dante_devices.items(),
)
)[1]
elif self.option("rx-device-host"):
rx_device = next(
filter(
lambda d: d[1].ipv4 == self.option("rx-device-host"),
dante_devices.items(),
)
)[1]
if self.option("rx-channel-name"):
rx_channel = next(
filter(
lambda c: c[1].name == self.option("rx-channel-name"),
rx_device.rx_channels.items(),
)
)[1]
elif self.option("rx-channel-number"):
rx_channel = next(
filter(
lambda c: c[1].number == self.option("rx-channel-number"),
rx_device.rx_channels.items(),
)
)[1]
if rx_device and not tx_device:
tx_device = rx_device
if rx_channel and rx_device and tx_channel and tx_channel:
self.line(
f"{rx_channel.name}@{rx_device.name} <- {tx_channel.name}@{tx_device.name}"
)
await rx_device.add_subscription(rx_channel, tx_channel, tx_device)
def handle(self):
asyncio.run(self.subscription_add()) | netaudio/console/commands/subscription/_add.py | import asyncio
from cleo import Command
from cleo.helpers import option
from netaudio.dante.browser import DanteBrowser
class SubscriptionAddCommand(Command):
name = "add"
description = "Add a subscription"
options = [
option("rx-channel-name", None, "Specify Rx channel by name", flag=False),
option("rx-channel-number", None, "Specify Rx channel by number", flag=False),
option("rx-device-host", None, "Specify Tx device by host", flag=False),
option("rx-device-name", None, "Specify Tx device by name", flag=False),
option("tx-channel-name", None, "Specify Tx channel by name", flag=False),
option("tx-channel-number", None, "Specify Tx channel by number", flag=False),
option("tx-device-host", None, "Specify Tx device by host", flag=False),
option("tx-device-name", None, "Specify Tx device by name", flag=False),
]
async def subscription_add(self):
dante_browser = DanteBrowser(mdns_timeout=1.5)
dante_devices = await dante_browser.get_devices()
for _, device in dante_devices.items():
await device.get_controls()
rx_channel = None
rx_device = None
tx_channel = None
tx_device = None
if self.option("tx-device-name"):
tx_device = next(
filter(
lambda d: d[1].name == self.option("tx-device-name"),
dante_devices.items(),
)
)[1]
elif self.option("tx-device-host"):
tx_device = next(
filter(
lambda d: d[1].ipv4 == self.option("tx-device-host"),
dante_devices.items(),
)
)[1]
if self.option("tx-channel-name"):
tx_channel = next(
filter(
lambda c: self.option("tx-channel-name") == c[1].friendly_name
or self.option("tx-channel-name") == c[1].name
and not c[1].friendly_name,
tx_device.tx_channels.items(),
)
)[1]
elif self.option("tx-channel-number"):
tx_channel = next(
filter(
lambda c: c[1].number == self.option("tx-channel-number"),
tx_device.tx_channels.items(),
)
)[1]
if self.option("rx-device-name"):
rx_device = next(
filter(
lambda d: d[1].name == self.option("rx-device-name"),
dante_devices.items(),
)
)[1]
elif self.option("rx-device-host"):
rx_device = next(
filter(
lambda d: d[1].ipv4 == self.option("rx-device-host"),
dante_devices.items(),
)
)[1]
if self.option("rx-channel-name"):
rx_channel = next(
filter(
lambda c: c[1].name == self.option("rx-channel-name"),
rx_device.rx_channels.items(),
)
)[1]
elif self.option("rx-channel-number"):
rx_channel = next(
filter(
lambda c: c[1].number == self.option("rx-channel-number"),
rx_device.rx_channels.items(),
)
)[1]
if rx_device and not tx_device:
tx_device = rx_device
if rx_channel and rx_device and tx_channel and tx_channel:
self.line(
f"{rx_channel.name}@{rx_device.name} <- {tx_channel.name}@{tx_device.name}"
)
await rx_device.add_subscription(rx_channel, tx_channel, tx_device)
def handle(self):
asyncio.run(self.subscription_add()) | 0.41941 | 0.105763 |
from typing import List, Optional, Tuple, Type, overload
from ufdl.json.core.filter import FilterExpression
from ufdl.json.core.filter.field import Exact
from wai.json.object import OptionallyPresent, StrictJSONObject
from wai.json.object.property import (
BoolProperty,
NumberProperty,
StringProperty,
ConstantProperty,
OneOfProperty
)
from wai.json.raw import RawJSONElement, RawJSONObject
from wai.json.schema import JSONSchema
from ...base import NamedServerType, UFDLType
from ._Domain import Domain
from ._Framework import Framework, FrameworkInstance
from ...error import expect
class PretrainedModelInstance(StrictJSONObject['PretrainedModelInstance']):
"""
Instance-representation of a pre-trained model.
"""
# The primary-key of the model, if it was sent by the server
pk: OptionallyPresent[int] = NumberProperty(integer_only=True, minimum=1, optional=True)
url: str = StringProperty()
name: OptionallyPresent[str] = StringProperty(optional=True)
description: OptionallyPresent[str] = StringProperty(optional=True)
# The framework of the model
framework: FrameworkInstance
domain: str
licence: OptionallyPresent[str] = StringProperty(optional=True)
data: bool = BoolProperty(optional=True, default=False)
metadata: OptionallyPresent[str] = StringProperty(optional=True)
creator: OptionallyPresent[Optional[int]] = OneOfProperty(
sub_properties=(
NumberProperty(integer_only=True, minimum=1),
ConstantProperty(value=None)
),
optional=True
)
creation_time: OptionallyPresent[str] = StringProperty(optional=True)
deletion_time: OptionallyPresent[Optional[str]] = OneOfProperty(
sub_properties=(
StringProperty(),
ConstantProperty(value=None)
),
optional=True
)
class PretrainedModel(
NamedServerType[
Tuple[Domain, Framework],
PretrainedModelInstance,
PretrainedModelInstance
]
):
@overload
def __init__(self, domain_type: Domain, framework_type: Framework): ...
@overload
def __init__(self, type_args: Optional[Tuple[Domain, Framework]] = None): ...
def __init__(self, *args):
if len(args) == 2:
args = args,
super().__init__(*args)
self._instance_class: Optional[Type[PretrainedModelInstance]] = None
@property
def instance_class(self):
if self._instance_class is None:
domain_type = self.type_args[0].type_args[0].value()
framework_type = self.type_args[1]
class SpecialisedPretrainedModelInstance(PretrainedModelInstance):
framework = framework_type.instance_class.as_property()
domain = (
ConstantProperty(value=domain_type)
if isinstance(domain_type, str) else
StringProperty(max_length=32)
)
self._instance_class = SpecialisedPretrainedModelInstance
return self._instance_class
def name_filter(self, name: str) -> FilterExpression:
return Exact(field="name", value=name)
def extract_name_from_json(self, value: RawJSONObject) -> str:
return value['name']
def server_table_name(self) -> str:
return "pretrained-models"
def filter_rules(self) -> List[FilterExpression]:
rules = []
domain_type, framework_type = self.type_args
if isinstance(domain_type, Domain):
description_type = domain_type.type_args[0].value()
if isinstance(description_type, str):
rules.append(Exact(field="domain.description", value=description_type))
if isinstance(framework_type, Framework):
name_type, version_type = framework_type.type_args
if isinstance(name_type.value(), str):
rules.append(Exact(field="framework.name", value=name_type.value()))
if isinstance(version_type.value(), str):
rules.append(Exact(field="framework.version", value=version_type.value()))
return rules
def parse_json_value(self, value: RawJSONElement) -> PretrainedModelInstance:
return self.instance_class.from_raw_json(value)
def format_python_value_to_json(self, value: PretrainedModelInstance) -> RawJSONElement:
expect(self.instance_class, value)
return value.to_raw_json()
@property
def json_schema(self) -> JSONSchema:
return self.instance_class.get_json_validation_schema()
@classmethod
def type_params_expected_base_types(cls) -> Tuple[UFDLType, ...]:
return Domain(), Framework()
@property
def is_abstract(self) -> bool:
return False | src/ufdl/jobtypes/standard/server/_PretrainedModel.py | from typing import List, Optional, Tuple, Type, overload
from ufdl.json.core.filter import FilterExpression
from ufdl.json.core.filter.field import Exact
from wai.json.object import OptionallyPresent, StrictJSONObject
from wai.json.object.property import (
BoolProperty,
NumberProperty,
StringProperty,
ConstantProperty,
OneOfProperty
)
from wai.json.raw import RawJSONElement, RawJSONObject
from wai.json.schema import JSONSchema
from ...base import NamedServerType, UFDLType
from ._Domain import Domain
from ._Framework import Framework, FrameworkInstance
from ...error import expect
class PretrainedModelInstance(StrictJSONObject['PretrainedModelInstance']):
"""
Instance-representation of a pre-trained model.
"""
# The primary-key of the model, if it was sent by the server
pk: OptionallyPresent[int] = NumberProperty(integer_only=True, minimum=1, optional=True)
url: str = StringProperty()
name: OptionallyPresent[str] = StringProperty(optional=True)
description: OptionallyPresent[str] = StringProperty(optional=True)
# The framework of the model
framework: FrameworkInstance
domain: str
licence: OptionallyPresent[str] = StringProperty(optional=True)
data: bool = BoolProperty(optional=True, default=False)
metadata: OptionallyPresent[str] = StringProperty(optional=True)
creator: OptionallyPresent[Optional[int]] = OneOfProperty(
sub_properties=(
NumberProperty(integer_only=True, minimum=1),
ConstantProperty(value=None)
),
optional=True
)
creation_time: OptionallyPresent[str] = StringProperty(optional=True)
deletion_time: OptionallyPresent[Optional[str]] = OneOfProperty(
sub_properties=(
StringProperty(),
ConstantProperty(value=None)
),
optional=True
)
class PretrainedModel(
NamedServerType[
Tuple[Domain, Framework],
PretrainedModelInstance,
PretrainedModelInstance
]
):
@overload
def __init__(self, domain_type: Domain, framework_type: Framework): ...
@overload
def __init__(self, type_args: Optional[Tuple[Domain, Framework]] = None): ...
def __init__(self, *args):
if len(args) == 2:
args = args,
super().__init__(*args)
self._instance_class: Optional[Type[PretrainedModelInstance]] = None
@property
def instance_class(self):
if self._instance_class is None:
domain_type = self.type_args[0].type_args[0].value()
framework_type = self.type_args[1]
class SpecialisedPretrainedModelInstance(PretrainedModelInstance):
framework = framework_type.instance_class.as_property()
domain = (
ConstantProperty(value=domain_type)
if isinstance(domain_type, str) else
StringProperty(max_length=32)
)
self._instance_class = SpecialisedPretrainedModelInstance
return self._instance_class
def name_filter(self, name: str) -> FilterExpression:
return Exact(field="name", value=name)
def extract_name_from_json(self, value: RawJSONObject) -> str:
return value['name']
def server_table_name(self) -> str:
return "pretrained-models"
def filter_rules(self) -> List[FilterExpression]:
rules = []
domain_type, framework_type = self.type_args
if isinstance(domain_type, Domain):
description_type = domain_type.type_args[0].value()
if isinstance(description_type, str):
rules.append(Exact(field="domain.description", value=description_type))
if isinstance(framework_type, Framework):
name_type, version_type = framework_type.type_args
if isinstance(name_type.value(), str):
rules.append(Exact(field="framework.name", value=name_type.value()))
if isinstance(version_type.value(), str):
rules.append(Exact(field="framework.version", value=version_type.value()))
return rules
def parse_json_value(self, value: RawJSONElement) -> PretrainedModelInstance:
return self.instance_class.from_raw_json(value)
def format_python_value_to_json(self, value: PretrainedModelInstance) -> RawJSONElement:
expect(self.instance_class, value)
return value.to_raw_json()
@property
def json_schema(self) -> JSONSchema:
return self.instance_class.get_json_validation_schema()
@classmethod
def type_params_expected_base_types(cls) -> Tuple[UFDLType, ...]:
return Domain(), Framework()
@property
def is_abstract(self) -> bool:
return False | 0.91914 | 0.190573 |
{
"variables": {
"realm_node_build_as_library%": "0",
"realm_download_binaries%": "1"
},
"includes": [
"target_defaults.gypi",
"realm.gypi"
],
"targets": [
{
"target_name": "realm",
"dependencies": [
"object-store"
],
"sources": [
"src/node/platform.cpp",
"src/js_realm.cpp"
],
"include_dirs": [
"src"
],
"conditions": [
["realm_node_build_as_library", {
"type": "static_library",
"export_dependent_settings": [ "object-store" ]
}, {
"sources": [
"src/node/node_init.cpp"
]
}]
]
},
{
"target_name": "object-store",
"dependencies": [ "realm-core" ],
"type": "static_library",
"include_dirs": [
"src/object-store/src",
"src/object-store/src/impl",
"src/object-store/src/impl/apple",
"src/object-store/src/parser",
"src/object-store/external/pegtl"
],
"sources": [
"src/object-store/src/binding_callback_thread_observer.cpp",
"src/object-store/src/collection_notifications.cpp",
"src/object-store/src/index_set.cpp",
"src/object-store/src/list.cpp",
"src/object-store/src/object.cpp",
"src/object-store/src/object_schema.cpp",
"src/object-store/src/object_store.cpp",
"src/object-store/src/results.cpp",
"src/object-store/src/schema.cpp",
"src/object-store/src/shared_realm.cpp",
"src/object-store/src/thread_safe_reference.cpp",
"src/object-store/src/impl/collection_change_builder.cpp",
"src/object-store/src/impl/collection_notifier.cpp",
"src/object-store/src/impl/list_notifier.cpp",
"src/object-store/src/impl/object_notifier.cpp",
"src/object-store/src/impl/realm_coordinator.cpp",
"src/object-store/src/impl/results_notifier.cpp",
"src/object-store/src/impl/transact_log_handler.cpp",
"src/object-store/src/impl/weak_realm_notifier.cpp",
"src/object-store/src/parser/parser.cpp",
"src/object-store/src/parser/query_builder.cpp",
"src/object-store/src/util/format.cpp",
],
"conditions": [
["OS=='win'", {
"sources": [
"src/object-store/src/impl/windows/external_commit_helper.cpp",
]
}],
["OS=='linux'", {
"sources": [
"src/object-store/src/impl/epoll/external_commit_helper.cpp",
]
}],
["OS=='mac'", {
"sources": [
"src/object-store/src/impl/apple/external_commit_helper.cpp",
"src/object-store/src/impl/apple/keychain_helper.cpp",
"src/object-store/src/sync/impl/apple/network_reachability_observer.cpp",
"src/object-store/src/sync/impl/apple/system_configuration.cpp"
]
}],
["realm_enable_sync", {
"dependencies": [ "realm-sync" ],
"sources": [
"src/object-store/src/sync/sync_manager.cpp",
"src/object-store/src/sync/sync_user.cpp",
"src/object-store/src/sync/sync_session.cpp",
"src/object-store/src/sync/impl/sync_file.cpp",
"src/object-store/src/sync/impl/sync_metadata.cpp"
],
}]
],
"all_dependent_settings": {
"include_dirs": [
"src/object-store/src",
"src/object-store/src/impl",
"src/object-store/src/impl/apple",
"src/object-store/src/parser",
"src/object-store/external/pegtl"
]
},
"export_dependent_settings": [
"<@(_dependencies)" # re-export settings related to linking the realm binaries
]
}
],
"conditions": [
["not realm_node_build_as_library", {
"targets": [
{
"target_name": "action_after_build",
"type": "none",
"dependencies": [ "<(module_name)" ],
"copies": [
{
"files": [ "<(PRODUCT_DIR)/<(module_name).node" ],
"destination": "<(module_path)"
}
]
}
]
}]
]
} | binding.gyp | {
"variables": {
"realm_node_build_as_library%": "0",
"realm_download_binaries%": "1"
},
"includes": [
"target_defaults.gypi",
"realm.gypi"
],
"targets": [
{
"target_name": "realm",
"dependencies": [
"object-store"
],
"sources": [
"src/node/platform.cpp",
"src/js_realm.cpp"
],
"include_dirs": [
"src"
],
"conditions": [
["realm_node_build_as_library", {
"type": "static_library",
"export_dependent_settings": [ "object-store" ]
}, {
"sources": [
"src/node/node_init.cpp"
]
}]
]
},
{
"target_name": "object-store",
"dependencies": [ "realm-core" ],
"type": "static_library",
"include_dirs": [
"src/object-store/src",
"src/object-store/src/impl",
"src/object-store/src/impl/apple",
"src/object-store/src/parser",
"src/object-store/external/pegtl"
],
"sources": [
"src/object-store/src/binding_callback_thread_observer.cpp",
"src/object-store/src/collection_notifications.cpp",
"src/object-store/src/index_set.cpp",
"src/object-store/src/list.cpp",
"src/object-store/src/object.cpp",
"src/object-store/src/object_schema.cpp",
"src/object-store/src/object_store.cpp",
"src/object-store/src/results.cpp",
"src/object-store/src/schema.cpp",
"src/object-store/src/shared_realm.cpp",
"src/object-store/src/thread_safe_reference.cpp",
"src/object-store/src/impl/collection_change_builder.cpp",
"src/object-store/src/impl/collection_notifier.cpp",
"src/object-store/src/impl/list_notifier.cpp",
"src/object-store/src/impl/object_notifier.cpp",
"src/object-store/src/impl/realm_coordinator.cpp",
"src/object-store/src/impl/results_notifier.cpp",
"src/object-store/src/impl/transact_log_handler.cpp",
"src/object-store/src/impl/weak_realm_notifier.cpp",
"src/object-store/src/parser/parser.cpp",
"src/object-store/src/parser/query_builder.cpp",
"src/object-store/src/util/format.cpp",
],
"conditions": [
["OS=='win'", {
"sources": [
"src/object-store/src/impl/windows/external_commit_helper.cpp",
]
}],
["OS=='linux'", {
"sources": [
"src/object-store/src/impl/epoll/external_commit_helper.cpp",
]
}],
["OS=='mac'", {
"sources": [
"src/object-store/src/impl/apple/external_commit_helper.cpp",
"src/object-store/src/impl/apple/keychain_helper.cpp",
"src/object-store/src/sync/impl/apple/network_reachability_observer.cpp",
"src/object-store/src/sync/impl/apple/system_configuration.cpp"
]
}],
["realm_enable_sync", {
"dependencies": [ "realm-sync" ],
"sources": [
"src/object-store/src/sync/sync_manager.cpp",
"src/object-store/src/sync/sync_user.cpp",
"src/object-store/src/sync/sync_session.cpp",
"src/object-store/src/sync/impl/sync_file.cpp",
"src/object-store/src/sync/impl/sync_metadata.cpp"
],
}]
],
"all_dependent_settings": {
"include_dirs": [
"src/object-store/src",
"src/object-store/src/impl",
"src/object-store/src/impl/apple",
"src/object-store/src/parser",
"src/object-store/external/pegtl"
]
},
"export_dependent_settings": [
"<@(_dependencies)" # re-export settings related to linking the realm binaries
]
}
],
"conditions": [
["not realm_node_build_as_library", {
"targets": [
{
"target_name": "action_after_build",
"type": "none",
"dependencies": [ "<(module_name)" ],
"copies": [
{
"files": [ "<(PRODUCT_DIR)/<(module_name).node" ],
"destination": "<(module_path)"
}
]
}
]
}]
]
} | 0.476823 | 0.176352 |
from __main__ import qt, ctk, slicer
from GBMWizardStep import *
from Helper import *
""" PreprocessStep inherits from GBMWizardStep, with itself inherits
from a ctk workflow class.
"""
class PreprocessStep( GBMWizardStep ) :
def __init__( self, stepid ):
self.initialize( stepid )
self.setName( '2. Preprocessing' )
self.__parent = super( PreprocessStep, self )
self.volumeLabels = ['t1Pre', 't1Post', 't2', 'flair']
def createUserInterface( self ):
""" This method uses qt to create a user interface of radio buttons to select
a registration method. Note that BSpline registration is so slow and memory-consuming
as to at one point break Slicer. There is an option to run it with limited memory,
but this may take prohibitively long. <- NOTE this last comment was based on
expert automated registration - not sure about other modules.
"""
self.__layout = self.__parent.createUserInterface()
step_label = qt.QLabel( """This step allows you to pre-process your data as necessary for deep learning segmentation. Your data may already be preprocessed, in which case you can skip this step. Note that for proper deep learning segmentation, your data will need to be A) registered, and B) resampled into isotropic space.
""")
step_label.setWordWrap(True)
self.__informationGroupBox = qt.QGroupBox()
self.__informationGroupBox.setTitle('Information')
self.__informationGroupBoxLayout = qt.QFormLayout(self.__informationGroupBox)
self.__informationGroupBoxLayout.addRow(step_label)
self.__layout.addRow(self.__informationGroupBox)
self.__registrationCollapsibleButton = ctk.ctkCollapsibleButton()
self.__registrationCollapsibleButton.text = "Registration"
self.__layout.addWidget(self.__registrationCollapsibleButton)
self.__registrationLayout = qt.QFormLayout(self.__registrationCollapsibleButton)
# Moving/Fixed Image Registration Order Options
OrderGroupBox = qt.QGroupBox()
OrderGroupBox.setTitle('Registration Base Volume')
self.__registrationLayout.addRow(OrderGroupBox)
OrderGroupBoxLayout = qt.QFormLayout(OrderGroupBox)
self.__OrderRadio1 = qt.QRadioButton("Register to T2.")
self.__OrderRadio1.toolTip = "Your images will be registered to T2 space."
OrderGroupBoxLayout.addRow(self.__OrderRadio1)
self.__OrderRadio1.setChecked(True)
self.__OrderRadio2 = qt.QRadioButton("Register to FLAIR")
self.__OrderRadio2.toolTip = "Your images will be registered to FLAIR space."
OrderGroupBoxLayout.addRow(self.__OrderRadio2)
self.__OrderRadio3 = qt.QRadioButton("Register to post-contrast T1")
self.__OrderRadio3.toolTip = "Your images will be registered to post-contrast T1 space."
OrderGroupBoxLayout.addRow(self.__OrderRadio3)
self.__OrderRadio4 = qt.QRadioButton("Register to pre-contrast T1")
self.__OrderRadio4.toolTip = "Your images will be registered to pre-contrast T1 space."
OrderGroupBoxLayout.addRow(self.__OrderRadio4)
self.__orderMapping = dict(zip(self.volumeLabels, [self.__OrderRadio1, self.__OrderRadio2, self.__OrderRadio3, self.__OrderRadio4]))
# Registration Method Options
RegistrationGroupBox = qt.QGroupBox()
RegistrationGroupBox.setTitle('Registration Method')
self.__registrationLayout.addRow(RegistrationGroupBox)
RegistrationGroupBoxLayout = qt.QFormLayout(RegistrationGroupBox)
self.__RegistrationRadio1 = qt.QRadioButton("Rigid Registration")
self.__RegistrationRadio1.toolTip = """Computes a rigid registration on the pre-contrast image with respect to the post-contrast image. This will likely be the fastest registration method"""
RegistrationGroupBoxLayout.addRow(self.__RegistrationRadio1)
self.__RegistrationRadio2 = qt.QRadioButton("Affine Registration")
self.__RegistrationRadio2.toolTip = "Computes a rigid and affine registration on the pre-contrast image with respect to the post-contrast image. This method may take longer than rigid registration, but has the ability to stretch or compress images in addition to rotation and translation."
RegistrationGroupBoxLayout.addRow(self.__RegistrationRadio2)
self.__RegistrationRadio2.setChecked(True)
self.__RegistrationRadio3 = qt.QRadioButton("Deformable Registration")
self.__RegistrationRadio3.toolTip = """Computes a BSpline Registration on the pre-contrast image with respect to the post-contrast image. This method is slowest and may be necessary for only severly distorted images."""
RegistrationGroupBoxLayout.addRow(self.__RegistrationRadio3)
# Output Volume Preference
OutputGroupBox = qt.QGroupBox()
OutputGroupBox.setTitle('Registration Output')
self.__registrationLayout.addRow(OutputGroupBox)
OutputGroupBoxLayout = qt.QFormLayout(OutputGroupBox)
self.__OutputRadio1 = qt.QRadioButton("Create new volume.")
self.__OutputRadio1.toolTip = "A new volume will be created with the naming convention \"[pre]_reg_[post]\"."
OutputGroupBoxLayout.addRow(self.__OutputRadio1)
self.__OutputRadio1.setChecked(True)
self.__OutputRadio2 = qt.QRadioButton("Replace existing volume.")
self.__OutputRadio2.toolTip = "Your registered volume will be overwritten at the end of this step."
OutputGroupBoxLayout.addRow(self.__OutputRadio2)
# Registration Button and Progress Indicator
RunGroupBox = qt.QGroupBox()
RunGroupBox.setTitle('Run Registration')
self.__registrationLayout.addRow(RunGroupBox)
RunGroupBoxLayout = qt.QFormLayout(RunGroupBox)
self.__registrationButton = qt.QPushButton('Run registration')
self.__registrationStatus = qt.QLabel('Register scans')
self.__registrationStatus.alignment = 4 # This codes for centered alignment, although I'm not sure why.
RunGroupBoxLayout.addRow(self.__registrationStatus)
RunGroupBoxLayout.addRow(self.__registrationButton)
self.__registrationButton.connect('clicked()', self.onRegistrationRequest)
def killButton(self):
# ctk creates an unwanted final page button. This method gets rid of it.
bl = slicer.util.findChildren(text='ReviewStep')
if len(bl):
bl[0].hide()
def validate(self, desiredBranchId):
""" This checks to make sure you are not currently registering an image, and
throws an exception if so.
"""
self.__parent.validate( desiredBranchId )
pNode = self.parameterNode()
# Temporary
self.__parent.validationSucceeded(desiredBranchId)
return
if pNode.GetParameter('followupVolumeID') == '' or pNode.GetParameter('followupVolumeID') == None:
self.__parent.validationSucceeded(desiredBranchId)
else:
if self.__status == 'Uncalled':
if self.__RegistrationRadio1.isChecked():
self.__parent.validationSucceeded(desiredBranchId)
else:
self.__parent.validationFailed(desiredBranchId, 'Error','Please click \"Run Registration\" or select the \"No Registration\" option to continue.')
elif self.__status == 'Completed':
self.__parent.validationSucceeded(desiredBranchId)
else:
self.__parent.validationFailed(desiredBranchId, 'Error','Please wait until registration is completed.')
def onEntry(self, comingFrom, transitionType):
super(PreprocessStep, self).onEntry(comingFrom, transitionType)
pNode = self.parameterNode()
pNode.SetParameter('currentStep', self.stepid)
# Helper.SetBgFgVolumes(pNode.GetParameter('baselineVolumeID'), pNode.GetParameter('followupVolumeID'))
# A different attempt to get rid of the extra workflow button.
qt.QTimer.singleShot(0, self.killButton)
def onExit(self, goingTo, transitionType):
super(GBMWizardStep, self).onExit(goingTo, transitionType)
def onRegistrationRequest(self, wait_for_completion=False):
""" This method makes a call to a different slicer module, BRAINSFIT.
Note that this registration method computes a transform, which is
then applied to the followup volume in processRegistrationCompletion.
TO-DO: Add a cancel button and a progress bar.
"""
pNode = self.parameterNode()
# Determine the Fixed Volume
for volumeLabel in self.volumeLabels:
if self.__orderMapping[volumeLabel].isChecked():
fixedLabel = volumeLabel
fixedVolumeID = pNode.GetParameter(volumeLabel + 'ID')
break
fixedVolume = Helper.getNodeByID(fixedVolumeID)
# TODO: Add Advanced Options Dropdown for these params.
parameters = {}
parameters["interpolationMode"] = 'Linear'
parameters["initializeTransformMode"] = 'useMomentsAlign'
parameters["samplingPercentage"] = .02
for volumeLabel in self.volumeLabels:
if volumeLabel == fixedLabel:
continue
movingVolume = Helper.getNodeByID(pNode.GetParameter(volumeLabel + 'ID'))
# Registration Type Options.
if self.__RegistrationRadio3.isChecked():
BSplineTransform = slicer.vtkMRMLBSplineTransformNode()
slicer.mrmlScene.AddNode(BSplineTransform)
pNode.SetParameter(volumeLabel + 'RegistrationTransformID', BSplineTransform.GetID())
else:
LinearTransform = slicer.vtkMRMLLinearTransformNode()
slicer.mrmlScene.AddNode(LinearTransform)
pNode.SetParameter(volumeLabel + 'RegistrationTransformID', LinearTransform.GetID())
if self.__RegistrationRadio1.isChecked():
parameters['transformType'] = 'Rigid'
elif self.__RegistrationRadio2.isChecked():
parameters['transformType'] = 'Rigid,ScaleVersor3D,ScaleSkewVersor3D,Affine'
elif self.__RegistrationRadio3.isChecked():
parameters['transformType'] = 'BSpline'
parameters["fixedVolume"] = fixedVolume
parameters["movingVolume"] = movingVolume
# Output options. TODO: Make this section a bit more logical.
if self.__OutputRadio2.isChecked():
parameters['outputVolume'] = movingVolume
pNode.SetParameter(volumeLabel + 'RegistrationVolumeID', movingVolume.GetID())
elif self.__OutputRadio1.isChecked():
registrationID = pNode.GetParameter(volumeLabel + 'RegistrationVolumeID')
if registrationID == None or registrationID == '':
registrationVolume = slicer.vtkMRMLScalarVolumeNode()
registrationVolume.SetScene(slicer.mrmlScene)
registrationVolume.SetName(movingVolume.GetName() + '_reg_' + fixedVolume.GetName())
slicer.mrmlScene.AddNode(registrationVolume)
pNode.SetParameter(volumeLabel + 'RegistrationVolumeID', registrationVolume.GetID())
else:
registrationVolume = Helper.getNodeByID(registrationID)
parameters['outputVolume'] = registrationVolume
self.__cliNode = None
self.__cliNode = slicer.cli.run(slicer.modules.brainsfit, self.__cliNode, parameters, wait_for_completion=wait_for_completion)
# An event listener for the CLI. TODO: Add a progress bar.
self.__cliObserverTag = self.__cliNode.AddObserver('ModifiedEvent', self.processRegistrationCompletion)
self.__registrationStatus.setText('Wait ...')
self.__registrationButton.setEnabled(0)
def processRegistrationCompletion(self, node, event):
""" This updates the registration button with the CLI module's convenient status
indicator. Upon completion, it applies the transform to the followup node.
Furthermore, it sets the followup node to be the baseline node in the viewer.
"""
self.__status = node.GetStatusString()
self.__registrationStatus.setText('Registration ' + self.__status)
if self.__status == 'Completed':
self.__registrationButton.setEnabled(1)
pNode = self.parameterNode()
if self.__OrderRadio1.isChecked():
Helper.SetBgFgVolumes(pNode.GetParameter('followupVolumeID'), pNode.GetParameter('registrationVolumeID'))
else:
Helper.SetBgFgVolumes(pNode.GetParameter('registrationVolumeID'), pNode.GetParameter('baselineVolumeID')) | GBMWizard/GBMWizard_Lib/Preprocess.py | from __main__ import qt, ctk, slicer
from GBMWizardStep import *
from Helper import *
""" PreprocessStep inherits from GBMWizardStep, with itself inherits
from a ctk workflow class.
"""
class PreprocessStep( GBMWizardStep ) :
def __init__( self, stepid ):
self.initialize( stepid )
self.setName( '2. Preprocessing' )
self.__parent = super( PreprocessStep, self )
self.volumeLabels = ['t1Pre', 't1Post', 't2', 'flair']
def createUserInterface( self ):
""" This method uses qt to create a user interface of radio buttons to select
a registration method. Note that BSpline registration is so slow and memory-consuming
as to at one point break Slicer. There is an option to run it with limited memory,
but this may take prohibitively long. <- NOTE this last comment was based on
expert automated registration - not sure about other modules.
"""
self.__layout = self.__parent.createUserInterface()
step_label = qt.QLabel( """This step allows you to pre-process your data as necessary for deep learning segmentation. Your data may already be preprocessed, in which case you can skip this step. Note that for proper deep learning segmentation, your data will need to be A) registered, and B) resampled into isotropic space.
""")
step_label.setWordWrap(True)
self.__informationGroupBox = qt.QGroupBox()
self.__informationGroupBox.setTitle('Information')
self.__informationGroupBoxLayout = qt.QFormLayout(self.__informationGroupBox)
self.__informationGroupBoxLayout.addRow(step_label)
self.__layout.addRow(self.__informationGroupBox)
self.__registrationCollapsibleButton = ctk.ctkCollapsibleButton()
self.__registrationCollapsibleButton.text = "Registration"
self.__layout.addWidget(self.__registrationCollapsibleButton)
self.__registrationLayout = qt.QFormLayout(self.__registrationCollapsibleButton)
# Moving/Fixed Image Registration Order Options
OrderGroupBox = qt.QGroupBox()
OrderGroupBox.setTitle('Registration Base Volume')
self.__registrationLayout.addRow(OrderGroupBox)
OrderGroupBoxLayout = qt.QFormLayout(OrderGroupBox)
self.__OrderRadio1 = qt.QRadioButton("Register to T2.")
self.__OrderRadio1.toolTip = "Your images will be registered to T2 space."
OrderGroupBoxLayout.addRow(self.__OrderRadio1)
self.__OrderRadio1.setChecked(True)
self.__OrderRadio2 = qt.QRadioButton("Register to FLAIR")
self.__OrderRadio2.toolTip = "Your images will be registered to FLAIR space."
OrderGroupBoxLayout.addRow(self.__OrderRadio2)
self.__OrderRadio3 = qt.QRadioButton("Register to post-contrast T1")
self.__OrderRadio3.toolTip = "Your images will be registered to post-contrast T1 space."
OrderGroupBoxLayout.addRow(self.__OrderRadio3)
self.__OrderRadio4 = qt.QRadioButton("Register to pre-contrast T1")
self.__OrderRadio4.toolTip = "Your images will be registered to pre-contrast T1 space."
OrderGroupBoxLayout.addRow(self.__OrderRadio4)
self.__orderMapping = dict(zip(self.volumeLabels, [self.__OrderRadio1, self.__OrderRadio2, self.__OrderRadio3, self.__OrderRadio4]))
# Registration Method Options
RegistrationGroupBox = qt.QGroupBox()
RegistrationGroupBox.setTitle('Registration Method')
self.__registrationLayout.addRow(RegistrationGroupBox)
RegistrationGroupBoxLayout = qt.QFormLayout(RegistrationGroupBox)
self.__RegistrationRadio1 = qt.QRadioButton("Rigid Registration")
self.__RegistrationRadio1.toolTip = """Computes a rigid registration on the pre-contrast image with respect to the post-contrast image. This will likely be the fastest registration method"""
RegistrationGroupBoxLayout.addRow(self.__RegistrationRadio1)
self.__RegistrationRadio2 = qt.QRadioButton("Affine Registration")
self.__RegistrationRadio2.toolTip = "Computes a rigid and affine registration on the pre-contrast image with respect to the post-contrast image. This method may take longer than rigid registration, but has the ability to stretch or compress images in addition to rotation and translation."
RegistrationGroupBoxLayout.addRow(self.__RegistrationRadio2)
self.__RegistrationRadio2.setChecked(True)
self.__RegistrationRadio3 = qt.QRadioButton("Deformable Registration")
self.__RegistrationRadio3.toolTip = """Computes a BSpline Registration on the pre-contrast image with respect to the post-contrast image. This method is slowest and may be necessary for only severly distorted images."""
RegistrationGroupBoxLayout.addRow(self.__RegistrationRadio3)
# Output Volume Preference
OutputGroupBox = qt.QGroupBox()
OutputGroupBox.setTitle('Registration Output')
self.__registrationLayout.addRow(OutputGroupBox)
OutputGroupBoxLayout = qt.QFormLayout(OutputGroupBox)
self.__OutputRadio1 = qt.QRadioButton("Create new volume.")
self.__OutputRadio1.toolTip = "A new volume will be created with the naming convention \"[pre]_reg_[post]\"."
OutputGroupBoxLayout.addRow(self.__OutputRadio1)
self.__OutputRadio1.setChecked(True)
self.__OutputRadio2 = qt.QRadioButton("Replace existing volume.")
self.__OutputRadio2.toolTip = "Your registered volume will be overwritten at the end of this step."
OutputGroupBoxLayout.addRow(self.__OutputRadio2)
# Registration Button and Progress Indicator
RunGroupBox = qt.QGroupBox()
RunGroupBox.setTitle('Run Registration')
self.__registrationLayout.addRow(RunGroupBox)
RunGroupBoxLayout = qt.QFormLayout(RunGroupBox)
self.__registrationButton = qt.QPushButton('Run registration')
self.__registrationStatus = qt.QLabel('Register scans')
self.__registrationStatus.alignment = 4 # This codes for centered alignment, although I'm not sure why.
RunGroupBoxLayout.addRow(self.__registrationStatus)
RunGroupBoxLayout.addRow(self.__registrationButton)
self.__registrationButton.connect('clicked()', self.onRegistrationRequest)
def killButton(self):
# ctk creates an unwanted final page button. This method gets rid of it.
bl = slicer.util.findChildren(text='ReviewStep')
if len(bl):
bl[0].hide()
def validate(self, desiredBranchId):
""" This checks to make sure you are not currently registering an image, and
throws an exception if so.
"""
self.__parent.validate( desiredBranchId )
pNode = self.parameterNode()
# Temporary
self.__parent.validationSucceeded(desiredBranchId)
return
if pNode.GetParameter('followupVolumeID') == '' or pNode.GetParameter('followupVolumeID') == None:
self.__parent.validationSucceeded(desiredBranchId)
else:
if self.__status == 'Uncalled':
if self.__RegistrationRadio1.isChecked():
self.__parent.validationSucceeded(desiredBranchId)
else:
self.__parent.validationFailed(desiredBranchId, 'Error','Please click \"Run Registration\" or select the \"No Registration\" option to continue.')
elif self.__status == 'Completed':
self.__parent.validationSucceeded(desiredBranchId)
else:
self.__parent.validationFailed(desiredBranchId, 'Error','Please wait until registration is completed.')
def onEntry(self, comingFrom, transitionType):
super(PreprocessStep, self).onEntry(comingFrom, transitionType)
pNode = self.parameterNode()
pNode.SetParameter('currentStep', self.stepid)
# Helper.SetBgFgVolumes(pNode.GetParameter('baselineVolumeID'), pNode.GetParameter('followupVolumeID'))
# A different attempt to get rid of the extra workflow button.
qt.QTimer.singleShot(0, self.killButton)
def onExit(self, goingTo, transitionType):
super(GBMWizardStep, self).onExit(goingTo, transitionType)
def onRegistrationRequest(self, wait_for_completion=False):
""" This method makes a call to a different slicer module, BRAINSFIT.
Note that this registration method computes a transform, which is
then applied to the followup volume in processRegistrationCompletion.
TO-DO: Add a cancel button and a progress bar.
"""
pNode = self.parameterNode()
# Determine the Fixed Volume
for volumeLabel in self.volumeLabels:
if self.__orderMapping[volumeLabel].isChecked():
fixedLabel = volumeLabel
fixedVolumeID = pNode.GetParameter(volumeLabel + 'ID')
break
fixedVolume = Helper.getNodeByID(fixedVolumeID)
# TODO: Add Advanced Options Dropdown for these params.
parameters = {}
parameters["interpolationMode"] = 'Linear'
parameters["initializeTransformMode"] = 'useMomentsAlign'
parameters["samplingPercentage"] = .02
for volumeLabel in self.volumeLabels:
if volumeLabel == fixedLabel:
continue
movingVolume = Helper.getNodeByID(pNode.GetParameter(volumeLabel + 'ID'))
# Registration Type Options.
if self.__RegistrationRadio3.isChecked():
BSplineTransform = slicer.vtkMRMLBSplineTransformNode()
slicer.mrmlScene.AddNode(BSplineTransform)
pNode.SetParameter(volumeLabel + 'RegistrationTransformID', BSplineTransform.GetID())
else:
LinearTransform = slicer.vtkMRMLLinearTransformNode()
slicer.mrmlScene.AddNode(LinearTransform)
pNode.SetParameter(volumeLabel + 'RegistrationTransformID', LinearTransform.GetID())
if self.__RegistrationRadio1.isChecked():
parameters['transformType'] = 'Rigid'
elif self.__RegistrationRadio2.isChecked():
parameters['transformType'] = 'Rigid,ScaleVersor3D,ScaleSkewVersor3D,Affine'
elif self.__RegistrationRadio3.isChecked():
parameters['transformType'] = 'BSpline'
parameters["fixedVolume"] = fixedVolume
parameters["movingVolume"] = movingVolume
# Output options. TODO: Make this section a bit more logical.
if self.__OutputRadio2.isChecked():
parameters['outputVolume'] = movingVolume
pNode.SetParameter(volumeLabel + 'RegistrationVolumeID', movingVolume.GetID())
elif self.__OutputRadio1.isChecked():
registrationID = pNode.GetParameter(volumeLabel + 'RegistrationVolumeID')
if registrationID == None or registrationID == '':
registrationVolume = slicer.vtkMRMLScalarVolumeNode()
registrationVolume.SetScene(slicer.mrmlScene)
registrationVolume.SetName(movingVolume.GetName() + '_reg_' + fixedVolume.GetName())
slicer.mrmlScene.AddNode(registrationVolume)
pNode.SetParameter(volumeLabel + 'RegistrationVolumeID', registrationVolume.GetID())
else:
registrationVolume = Helper.getNodeByID(registrationID)
parameters['outputVolume'] = registrationVolume
self.__cliNode = None
self.__cliNode = slicer.cli.run(slicer.modules.brainsfit, self.__cliNode, parameters, wait_for_completion=wait_for_completion)
# An event listener for the CLI. TODO: Add a progress bar.
self.__cliObserverTag = self.__cliNode.AddObserver('ModifiedEvent', self.processRegistrationCompletion)
self.__registrationStatus.setText('Wait ...')
self.__registrationButton.setEnabled(0)
def processRegistrationCompletion(self, node, event):
""" This updates the registration button with the CLI module's convenient status
indicator. Upon completion, it applies the transform to the followup node.
Furthermore, it sets the followup node to be the baseline node in the viewer.
"""
self.__status = node.GetStatusString()
self.__registrationStatus.setText('Registration ' + self.__status)
if self.__status == 'Completed':
self.__registrationButton.setEnabled(1)
pNode = self.parameterNode()
if self.__OrderRadio1.isChecked():
Helper.SetBgFgVolumes(pNode.GetParameter('followupVolumeID'), pNode.GetParameter('registrationVolumeID'))
else:
Helper.SetBgFgVolumes(pNode.GetParameter('registrationVolumeID'), pNode.GetParameter('baselineVolumeID')) | 0.79732 | 0.238218 |
import base64
import random
import re
import httpx
from nonebot import get_driver
from nonebot.adapters import Bot, Event
from nonebot.adapters.cqhttp import MessageSegment, GROUP_ADMIN, GROUP_OWNER
from nonebot.permission import SUPERUSER
from nonebot.plugin import on_regex, on_command
from nonebot.typing import T_State
from src.utils.database import Recipes
from src.utils.general import DailyNumberLimiter
global_config = get_driver().config
__PROJECT_ROOT__ = global_config.project_root
_day_limit = 3
_curse_limit = 1
_lmt = DailyNumberLimiter(_day_limit)
_curse_lmt = DailyNumberLimiter(_curse_limit)
rx = r"^(今天|[早中午晚][上饭餐午]|夜宵|睡前)吃(什么|啥|点啥)"
eat_program_on = on_regex(rx, priority=3)
@eat_program_on.handle()
async def live_subscription(bot: Bot, event: Event, state: T_State):
print("1" * 70)
user_id = str(event.get_user_id())
if not _lmt.check(user_id):
if not _curse_lmt.check(user_id):
return
else:
_curse_lmt.increase(user_id)
await eat_program_on.finish(MessageSegment.at(event.get_user_id()) + "盛饭的桶")
else:
_lmt.increase(user_id)
res = re.match(rx, str(event.get_message()))
time = res.group(1)
recipes = await Recipes(name="", content="".encode()).select_all()
if recipes.result:
food = recipes.result[random.randint(0, len(recipes.result) - 1)]
to_eat = f'{time}去吃{food.name}吧~\n'
message = MessageSegment.at(event.get_user_id()) + to_eat + MessageSegment.image(
f"base64://{food.content.decode()}")
await eat_program_on.finish(message)
recipes_program = on_command("加菜", priority=3, permission=SUPERUSER | GROUP_ADMIN | GROUP_OWNER)
@recipes_program.handle()
async def eat_program_receive(bot: Bot, event: Event, state: T_State):
args = str(event.get_message()).strip()
if args:
state["content"] = args
@recipes_program.got("content", prompt="输入:加菜 菜名 图片")
async def eat_program_got(bot: Bot, event: Event, state: T_State):
name = str(state["content"]).split(' ')[0]
message_image = re.findall(r'.*?file=(.*?\.image)', str(state["content"]).split(' ')[1])[0]
url = await bot.get_image(file=message_image)
content = httpx.get(url=url["url"]).content
image = base64.b64encode(content)
recipes = Recipes(name=name, content=image)
result = await recipes.insert()
if result.result == 1:
await recipes_program.finish("添加成功") | src/plugins/eat/__init__.py | import base64
import random
import re
import httpx
from nonebot import get_driver
from nonebot.adapters import Bot, Event
from nonebot.adapters.cqhttp import MessageSegment, GROUP_ADMIN, GROUP_OWNER
from nonebot.permission import SUPERUSER
from nonebot.plugin import on_regex, on_command
from nonebot.typing import T_State
from src.utils.database import Recipes
from src.utils.general import DailyNumberLimiter
global_config = get_driver().config
__PROJECT_ROOT__ = global_config.project_root
_day_limit = 3
_curse_limit = 1
_lmt = DailyNumberLimiter(_day_limit)
_curse_lmt = DailyNumberLimiter(_curse_limit)
rx = r"^(今天|[早中午晚][上饭餐午]|夜宵|睡前)吃(什么|啥|点啥)"
eat_program_on = on_regex(rx, priority=3)
@eat_program_on.handle()
async def live_subscription(bot: Bot, event: Event, state: T_State):
print("1" * 70)
user_id = str(event.get_user_id())
if not _lmt.check(user_id):
if not _curse_lmt.check(user_id):
return
else:
_curse_lmt.increase(user_id)
await eat_program_on.finish(MessageSegment.at(event.get_user_id()) + "盛饭的桶")
else:
_lmt.increase(user_id)
res = re.match(rx, str(event.get_message()))
time = res.group(1)
recipes = await Recipes(name="", content="".encode()).select_all()
if recipes.result:
food = recipes.result[random.randint(0, len(recipes.result) - 1)]
to_eat = f'{time}去吃{food.name}吧~\n'
message = MessageSegment.at(event.get_user_id()) + to_eat + MessageSegment.image(
f"base64://{food.content.decode()}")
await eat_program_on.finish(message)
recipes_program = on_command("加菜", priority=3, permission=SUPERUSER | GROUP_ADMIN | GROUP_OWNER)
@recipes_program.handle()
async def eat_program_receive(bot: Bot, event: Event, state: T_State):
args = str(event.get_message()).strip()
if args:
state["content"] = args
@recipes_program.got("content", prompt="输入:加菜 菜名 图片")
async def eat_program_got(bot: Bot, event: Event, state: T_State):
name = str(state["content"]).split(' ')[0]
message_image = re.findall(r'.*?file=(.*?\.image)', str(state["content"]).split(' ')[1])[0]
url = await bot.get_image(file=message_image)
content = httpx.get(url=url["url"]).content
image = base64.b64encode(content)
recipes = Recipes(name=name, content=image)
result = await recipes.insert()
if result.result == 1:
await recipes_program.finish("添加成功") | 0.175044 | 0.0686 |
import Vars
import Offsets
import math
class Vector2 ():
x = 0
y = 0
xDir = ""
yDir = ""
class Player ():
type = "Player"
id = 0
x = 0
y = 0
xVel = 0
yVel = 0
damageTaken = 0
grounded = False
inAnimation = False
inStun = False
canAttack = False
jumpCount = 2
isDodgingCurrently = False
name = "DefaultEntityName"
pointer = 0
def init (self):
self.updateInfo()
def printInfo (self):
print("[X] : " + str(self.x) + " [Y] : " + str(self.y))
print("Jumps : " + str(self.jumpCount))
def dist (self, targ, type = "est", rtnType = "vec"):
self.updateInfo()
if type == "real" and rtnType == "val":
return math.sqrt((self.x - targ.x * Vars.velMultiplier) * (self.x - targ.x * Vars.velMultiplier) + (self.y - targ.y * Vars.velMultiplier) * (self.y - targ.y * Vars.velMultiplier))
if type == "est" and rtnType == "val":
return math.sqrt(((self.x + self.xVel * Vars.velMultiplier) - (targ.x + targ.xVel * Vars.velMultiplier)) * ((self.x + self.xVel * Vars.velMultiplier) - (targ.x + targ.xVel * Vars.velMultiplier)) + ((self.y + self.yVel * Vars.velMultiplier) - (targ.y + targ.yVel * Vars.velMultiplier)) * ((self.y + self.yVel * Vars.velMultiplier) - (targ.y + targ.yVel * Vars.velMultiplier)))
if type == "est" and rtnType == "vec":
rX = (self.x + self.xVel * Vars.velMultiplier) - (targ.x + targ.xVel * Vars.velMultiplier)
rY = (self.y + self.yVel * Vars.velMultiplier) - (targ.y + targ.yVel * Vars.velMultiplier)
rtn = Vector2()
rtn.x = rX
rtn.y = rY
if rtn.x < 0:
rtn.xDir = "right"
else:
rtn.xDir = "left"
if rtn.y < 0:
rtn.yDir = "down"
else:
rtn.yDir = "up"
return rtn
if type == "real" and rtnType == "vec":
rX = (self.x) - (targ.x)
rY = (self.y) - (targ.y)
rtn = Vector2()
rtn.x = rX
rtn.y = rY
if rtn.x < 0:
rtn.xDir = "right"
else:
rtn.xDir = "left"
if rtn.y < 0:
rtn.yDir = "down"
else:
rtn.yDir = "up"
rtn.x = abs(rtn.x)
rtn.y = abs(rtn.y)
return rtn
if type == "realEst" and rtnType == "vec":
rX = (self.x + self.xVel * Vars.velMultiplier) - (targ.x + targ.xVel * Vars.velMultiplier)
rY = (self.y + self.yVel * Vars.velMultiplier) - (targ.y + targ.yVel * Vars.velMultiplier)
rtn = Vector2()
rtn.x = rX
rtn.y = rY
if rtn.x < 0:
rtn.xDir = "right"
else:
rtn.xDir = "left"
if rtn.y < 0:
rtn.yDir = "down"
else:
rtn.yDir = "up"
rtn.x = abs(rtn.x)
rtn.y = abs(rtn.y)
return rtn
def update (self):
self.updateInfo()
def updateInfo (self):
self.x = Vars.mem.Address(self.pointer + Offsets.offsets["x"]).read(type='double')
self.y = -Vars.mem.Address(self.pointer + Offsets.offsets["y"]).read(type='double')
self.damageTaken = Vars.mem.Address(self.pointer + Offsets.offsets["damageTaken"]).read(type='double')
self.xVel = Vars.mem.Address(self.pointer + Offsets.offsets["xVel"]).read(type='double')
self.yVel = -Vars.mem.Address(self.pointer + Offsets.offsets["yVel"]).read(type='double')
self.jumpCount = 2 - Vars.mem.Address(self.pointer + Offsets.offsets["jumpCount"]).read(type='int')
if Vars.mem.Address(self.pointer + Offsets.offsets["inAnimation"]).read(type='int') != 0:
self.inAnimation = True
else:
self.inAnimation = False
if Vars.mem.Address(self.pointer + Offsets.offsets["inStun"]).read(type='int') == 0:
self.inStun = True
else:
self.inStun = False
if Vars.mem.Address(self.pointer + Offsets.offsets["grounded"]).read(type='int') == 0:
self.grounded = True
else:
self.grounded = False
if Vars.mem.Address(self.pointer + Offsets.offsets["canDodge"]).read(type='int') == 0:
self.canDodge = True
else:
self.canDodge = False
if Vars.mem.Address(self.pointer + Offsets.offsets["canAttack"]).read(type='int') == 0:
self.canAttack = True
else:
self.canAttack = False
if Vars.mem.Address(self.pointer + Offsets.offsets["isDodgingCurrently"]).read(type='double') == 0:
self.isDodgingCurrently = True
else:
self.isDodgingCurrently = False | Player.py | import Vars
import Offsets
import math
class Vector2 ():
x = 0
y = 0
xDir = ""
yDir = ""
class Player ():
type = "Player"
id = 0
x = 0
y = 0
xVel = 0
yVel = 0
damageTaken = 0
grounded = False
inAnimation = False
inStun = False
canAttack = False
jumpCount = 2
isDodgingCurrently = False
name = "DefaultEntityName"
pointer = 0
def init (self):
self.updateInfo()
def printInfo (self):
print("[X] : " + str(self.x) + " [Y] : " + str(self.y))
print("Jumps : " + str(self.jumpCount))
def dist (self, targ, type = "est", rtnType = "vec"):
self.updateInfo()
if type == "real" and rtnType == "val":
return math.sqrt((self.x - targ.x * Vars.velMultiplier) * (self.x - targ.x * Vars.velMultiplier) + (self.y - targ.y * Vars.velMultiplier) * (self.y - targ.y * Vars.velMultiplier))
if type == "est" and rtnType == "val":
return math.sqrt(((self.x + self.xVel * Vars.velMultiplier) - (targ.x + targ.xVel * Vars.velMultiplier)) * ((self.x + self.xVel * Vars.velMultiplier) - (targ.x + targ.xVel * Vars.velMultiplier)) + ((self.y + self.yVel * Vars.velMultiplier) - (targ.y + targ.yVel * Vars.velMultiplier)) * ((self.y + self.yVel * Vars.velMultiplier) - (targ.y + targ.yVel * Vars.velMultiplier)))
if type == "est" and rtnType == "vec":
rX = (self.x + self.xVel * Vars.velMultiplier) - (targ.x + targ.xVel * Vars.velMultiplier)
rY = (self.y + self.yVel * Vars.velMultiplier) - (targ.y + targ.yVel * Vars.velMultiplier)
rtn = Vector2()
rtn.x = rX
rtn.y = rY
if rtn.x < 0:
rtn.xDir = "right"
else:
rtn.xDir = "left"
if rtn.y < 0:
rtn.yDir = "down"
else:
rtn.yDir = "up"
return rtn
if type == "real" and rtnType == "vec":
rX = (self.x) - (targ.x)
rY = (self.y) - (targ.y)
rtn = Vector2()
rtn.x = rX
rtn.y = rY
if rtn.x < 0:
rtn.xDir = "right"
else:
rtn.xDir = "left"
if rtn.y < 0:
rtn.yDir = "down"
else:
rtn.yDir = "up"
rtn.x = abs(rtn.x)
rtn.y = abs(rtn.y)
return rtn
if type == "realEst" and rtnType == "vec":
rX = (self.x + self.xVel * Vars.velMultiplier) - (targ.x + targ.xVel * Vars.velMultiplier)
rY = (self.y + self.yVel * Vars.velMultiplier) - (targ.y + targ.yVel * Vars.velMultiplier)
rtn = Vector2()
rtn.x = rX
rtn.y = rY
if rtn.x < 0:
rtn.xDir = "right"
else:
rtn.xDir = "left"
if rtn.y < 0:
rtn.yDir = "down"
else:
rtn.yDir = "up"
rtn.x = abs(rtn.x)
rtn.y = abs(rtn.y)
return rtn
def update (self):
self.updateInfo()
def updateInfo (self):
self.x = Vars.mem.Address(self.pointer + Offsets.offsets["x"]).read(type='double')
self.y = -Vars.mem.Address(self.pointer + Offsets.offsets["y"]).read(type='double')
self.damageTaken = Vars.mem.Address(self.pointer + Offsets.offsets["damageTaken"]).read(type='double')
self.xVel = Vars.mem.Address(self.pointer + Offsets.offsets["xVel"]).read(type='double')
self.yVel = -Vars.mem.Address(self.pointer + Offsets.offsets["yVel"]).read(type='double')
self.jumpCount = 2 - Vars.mem.Address(self.pointer + Offsets.offsets["jumpCount"]).read(type='int')
if Vars.mem.Address(self.pointer + Offsets.offsets["inAnimation"]).read(type='int') != 0:
self.inAnimation = True
else:
self.inAnimation = False
if Vars.mem.Address(self.pointer + Offsets.offsets["inStun"]).read(type='int') == 0:
self.inStun = True
else:
self.inStun = False
if Vars.mem.Address(self.pointer + Offsets.offsets["grounded"]).read(type='int') == 0:
self.grounded = True
else:
self.grounded = False
if Vars.mem.Address(self.pointer + Offsets.offsets["canDodge"]).read(type='int') == 0:
self.canDodge = True
else:
self.canDodge = False
if Vars.mem.Address(self.pointer + Offsets.offsets["canAttack"]).read(type='int') == 0:
self.canAttack = True
else:
self.canAttack = False
if Vars.mem.Address(self.pointer + Offsets.offsets["isDodgingCurrently"]).read(type='double') == 0:
self.isDodgingCurrently = True
else:
self.isDodgingCurrently = False | 0.290377 | 0.241344 |
from .analyst.report import Report
from .analyst.draw_engine import DrawEngine
from .analyst.automated_suite import do_statistics
from .clockmaster import adjust_clock
from .datasource.log_engine import proceed as l_proceed
from .driver import Driver
from .utils import Report as ParserReport
from .workflow.engine import proceed
def _load_data(data_path, driver):
print("Load result from %s" % data_path)
assert isinstance(driver, Driver)
print("Load driver %s" % driver.name)
print
master = driver.graph
print("graph:")
print(str(master))
report_i = ParserReport()
requestinss = None
try:
# build logs
targets_byname = l_proceed(data_path, driver.services, driver, report_i)
# build states
requestinss = proceed(targets_byname, master, report_i)
except Exception:
print("\n%r\n" % report_i)
raise
print("%r" % report_i)
print()
# correct clocks
adjust_clock(requestinss)
return requestinss
def execute(driver):
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('folder',
default=".",
help="The logs are in that folder.")
parser.add_argument('--brief',
action="store_true",
help="Don't export report and draw figures.")
# parser.add_argument('--outfolder',
# help="Folder to put figures.",
# default="/root/container/out/")
# parser.add_argument('--csv-print-header', action="store_true",
# help="Write a row into the CSV file for the headers.")
# parser.add_argument('--outfile',
# help="The output file of report, "
# "valid only when --draw is set.")
args = parser.parse_args()
requestinss = _load_data(args.folder, driver)
if requestinss:
folders = args.folder.split("/")
name = folders[-1] or folders[-2]
draw_engine = None
out_file = None
if not args.brief:
outfolder = args.folder + ("/out-%s/" % driver.name)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
draw_engine = DrawEngine(outfolder)
out_file = outfolder+"/report.csv"
do_statistics(name, driver.graph, requestinss, draw_engine, out_file)
def load(data_path, driver):
requestinss = _load_data(data_path, driver)
folders = data_path.split("/")
name = folders[-1] or folders[-2]
from .analyst.notebook_display import Requests_D
return Requests_D(name, requestinss, driver.graph)
__all__ = ["load"] | workflow_parser/loader.py | from .analyst.report import Report
from .analyst.draw_engine import DrawEngine
from .analyst.automated_suite import do_statistics
from .clockmaster import adjust_clock
from .datasource.log_engine import proceed as l_proceed
from .driver import Driver
from .utils import Report as ParserReport
from .workflow.engine import proceed
def _load_data(data_path, driver):
print("Load result from %s" % data_path)
assert isinstance(driver, Driver)
print("Load driver %s" % driver.name)
print
master = driver.graph
print("graph:")
print(str(master))
report_i = ParserReport()
requestinss = None
try:
# build logs
targets_byname = l_proceed(data_path, driver.services, driver, report_i)
# build states
requestinss = proceed(targets_byname, master, report_i)
except Exception:
print("\n%r\n" % report_i)
raise
print("%r" % report_i)
print()
# correct clocks
adjust_clock(requestinss)
return requestinss
def execute(driver):
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('folder',
default=".",
help="The logs are in that folder.")
parser.add_argument('--brief',
action="store_true",
help="Don't export report and draw figures.")
# parser.add_argument('--outfolder',
# help="Folder to put figures.",
# default="/root/container/out/")
# parser.add_argument('--csv-print-header', action="store_true",
# help="Write a row into the CSV file for the headers.")
# parser.add_argument('--outfile',
# help="The output file of report, "
# "valid only when --draw is set.")
args = parser.parse_args()
requestinss = _load_data(args.folder, driver)
if requestinss:
folders = args.folder.split("/")
name = folders[-1] or folders[-2]
draw_engine = None
out_file = None
if not args.brief:
outfolder = args.folder + ("/out-%s/" % driver.name)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
draw_engine = DrawEngine(outfolder)
out_file = outfolder+"/report.csv"
do_statistics(name, driver.graph, requestinss, draw_engine, out_file)
def load(data_path, driver):
requestinss = _load_data(data_path, driver)
folders = data_path.split("/")
name = folders[-1] or folders[-2]
from .analyst.notebook_display import Requests_D
return Requests_D(name, requestinss, driver.graph)
__all__ = ["load"] | 0.41739 | 0.109706 |
import os
import yaml
import six
from pssh.exceptions import BadKeyNameError, MissingKeyError
if six.PY2:
from io import open
def get_user_configuration_path():
"""
Get the config path for the current user.
:rtype: Path to the config file (str)
"""
return os.path.normpath(os.path.expanduser("~/.pssh/config.yml"))
def load_configuration_file(path_to_file):
"""
Load a pssh configuration file.
:param path_to_file Path to file (str)
:return Configuration data (dict)
"""
if not os.path.isfile(path_to_file):
raise RuntimeError("File `{0}` does not exist.".format(path_to_file))
with open(path_to_file, mode="rt", encoding="utf-8") as handle:
file_content = handle.read()
return load_configuration_content(file_content)
def load_configuration_content(file_content):
"""
Load a pssh configuration content.
:param file_content File content (str)
:return Configuration data (dict)
"""
try:
dict_content = yaml.load(file_content)
except Exception:
raise RuntimeError("Bad YAML file.")
return dict_content
def extract_machine_hierarchy(config_dict):
"""
Extract machines from configuration dict.
:param config_dict Configuration dict (dict)
:return Machine hierarchy (dict)
"""
default_values = _extract_default_configurations(config_dict)
machines = _extract_machine_configurations(config_dict)
configured_machines = apply_default_configurations(machines, default_values)
return {
"default_values": default_values,
"machines": configured_machines
}
def fetch_default_values_for_name(default_values, config_name):
"""
Fetch the default values for a config name.
:param default_values Default values (dict)
:param config_name Configuration name (str)
"""
default_keys = default_values.keys()
split_parent_names = config_name.split(":")[:-1]
current_values = {}
current_parent = ""
# Fetch global defaults
if "" in default_keys:
current_values = default_values[""]
# Check for each namespaces
for parent in split_parent_names:
# Compute parent name
if current_parent == "":
current_parent = parent
else:
current_parent = ":".join((current_parent, parent))
if current_parent in default_keys:
values = default_values[current_parent]
current_values.update(values)
return current_values
def apply_default_configurations(machines, default_values):
"""
Apply default configurations to machines.
:param machines Machine configurations (dict)
:param default_values Default values (dict)
:return New machine configurations (dict)
"""
new_machines = {}
for config_name in machines:
new_machine_config = {}
default_config = fetch_default_values_for_name(default_values, config_name)
current_machine_config = machines[config_name]
new_machine_config.update(default_config)
new_machine_config.update(current_machine_config)
new_machines[config_name] = new_machine_config
return new_machines
def _extract_machine_configurations(config_dict):
if "machines" not in config_dict:
raise MissingKeyError("Missing 'machines' key in configuration.")
machines = config_dict["machines"]
return dict(_extract_definition_keys("", machines))
def _extract_default_configurations(config_dict):
if "defaults" not in config_dict:
return {}
defaults = config_dict["defaults"]
return dict(_extract_definition_keys("", defaults))
def _extract_definition_keys(parent_key, current_dict):
keys = current_dict.keys() if current_dict else []
extract = []
if len(keys) > 0:
# Values
if "_values" in keys:
values = current_dict["_values"]
extract.append((parent_key, values))
# Child keys
for key in [k for k in keys if k != "_values"]:
if ":" in key:
raise BadKeyNameError("Character ':' not allowed.")
current_key_name = key
if parent_key != "":
current_key_name = ":".join((parent_key, key))
results = _extract_definition_keys(current_key_name, current_dict[key])
for result in results:
extract.append(result)
return extract | pssh/config.py |
import os
import yaml
import six
from pssh.exceptions import BadKeyNameError, MissingKeyError
if six.PY2:
from io import open
def get_user_configuration_path():
"""
Get the config path for the current user.
:rtype: Path to the config file (str)
"""
return os.path.normpath(os.path.expanduser("~/.pssh/config.yml"))
def load_configuration_file(path_to_file):
"""
Load a pssh configuration file.
:param path_to_file Path to file (str)
:return Configuration data (dict)
"""
if not os.path.isfile(path_to_file):
raise RuntimeError("File `{0}` does not exist.".format(path_to_file))
with open(path_to_file, mode="rt", encoding="utf-8") as handle:
file_content = handle.read()
return load_configuration_content(file_content)
def load_configuration_content(file_content):
"""
Load a pssh configuration content.
:param file_content File content (str)
:return Configuration data (dict)
"""
try:
dict_content = yaml.load(file_content)
except Exception:
raise RuntimeError("Bad YAML file.")
return dict_content
def extract_machine_hierarchy(config_dict):
"""
Extract machines from configuration dict.
:param config_dict Configuration dict (dict)
:return Machine hierarchy (dict)
"""
default_values = _extract_default_configurations(config_dict)
machines = _extract_machine_configurations(config_dict)
configured_machines = apply_default_configurations(machines, default_values)
return {
"default_values": default_values,
"machines": configured_machines
}
def fetch_default_values_for_name(default_values, config_name):
"""
Fetch the default values for a config name.
:param default_values Default values (dict)
:param config_name Configuration name (str)
"""
default_keys = default_values.keys()
split_parent_names = config_name.split(":")[:-1]
current_values = {}
current_parent = ""
# Fetch global defaults
if "" in default_keys:
current_values = default_values[""]
# Check for each namespaces
for parent in split_parent_names:
# Compute parent name
if current_parent == "":
current_parent = parent
else:
current_parent = ":".join((current_parent, parent))
if current_parent in default_keys:
values = default_values[current_parent]
current_values.update(values)
return current_values
def apply_default_configurations(machines, default_values):
"""
Apply default configurations to machines.
:param machines Machine configurations (dict)
:param default_values Default values (dict)
:return New machine configurations (dict)
"""
new_machines = {}
for config_name in machines:
new_machine_config = {}
default_config = fetch_default_values_for_name(default_values, config_name)
current_machine_config = machines[config_name]
new_machine_config.update(default_config)
new_machine_config.update(current_machine_config)
new_machines[config_name] = new_machine_config
return new_machines
def _extract_machine_configurations(config_dict):
if "machines" not in config_dict:
raise MissingKeyError("Missing 'machines' key in configuration.")
machines = config_dict["machines"]
return dict(_extract_definition_keys("", machines))
def _extract_default_configurations(config_dict):
if "defaults" not in config_dict:
return {}
defaults = config_dict["defaults"]
return dict(_extract_definition_keys("", defaults))
def _extract_definition_keys(parent_key, current_dict):
keys = current_dict.keys() if current_dict else []
extract = []
if len(keys) > 0:
# Values
if "_values" in keys:
values = current_dict["_values"]
extract.append((parent_key, values))
# Child keys
for key in [k for k in keys if k != "_values"]:
if ":" in key:
raise BadKeyNameError("Character ':' not allowed.")
current_key_name = key
if parent_key != "":
current_key_name = ":".join((parent_key, key))
results = _extract_definition_keys(current_key_name, current_dict[key])
for result in results:
extract.append(result)
return extract | 0.581184 | 0.31384 |
from email.parser import Parser
from fabric.api import local, require, settings, task
from fabric.state import env
from jinja2 import Environment, FileSystemLoader
from termcolor import colored
from datetime import datetime
import app_config
# Other fabfiles
import assets
import boto.ses
import data
import flat
import issues
import os
import pytumblr
import render
import smtplib
import text
import utils
if app_config.DEPLOY_TO_SERVERS:
import servers
if app_config.DEPLOY_CRONTAB:
import cron_jobs
# Bootstrap can only be run once, then it's disabled
if app_config.PROJECT_SLUG == '$NEW_PROJECT_SLUG':
import bootstrap
"""
Base configuration
"""
env.user = app_config.SERVER_USER
env.forward_agent = True
env.hosts = []
env.settings = None
env.tumblr_blog_name = 'stage-lookatthis'
env.twitter_handle = 'lookatthisstory'
env.twitter_timeframe = '7' # days
env.from_email_address = 'NPR Visuals Linklater <<EMAIL>>'
env.to_email_addresses = ['<EMAIL>', '<EMAIL>', '<EMAIL>']
env.email_subject_template = 'Richard Linklater\'s links for %s'
# Jinja env
fab_path = os.path.realpath(os.path.dirname(__file__))
templates_path = os.path.join(fab_path, '../templates')
env.jinja_env = Environment(loader=FileSystemLoader(templates_path))
"""
Environments
Changing environment requires a full-stack test.
An environment points to both a server and an S3
bucket.
"""
@task
def production():
"""
Run as though on production.
"""
env.settings = 'production'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
env.tumblr_blog_name = 'lookatthis'
@task
def staging():
"""
Run as though on staging.
"""
env.settings = 'staging'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
"""
Branches
Changing branches requires deploying that branch to a host.
"""
@task
def stable():
"""
Work on stable branch.
"""
env.branch = 'stable'
@task
def master():
"""
Work on development branch.
"""
env.branch = 'master'
@task
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
"""
Running the app
"""
@task
def app(port='8000'):
"""
Serve app.py.
"""
local('gunicorn -b 0.0.0.0:%s --debug --reload app:wsgi_app' % port)
@task
def public_app(port='8001'):
"""
Serve public_app.py.
"""
local('gunicorn -b 0.0.0.0:%s --debug --reload public_app:wsgi_app' % port)
@task
def tests():
"""
Run Python unit tests.
"""
local('nosetests')
"""
Deployment
Changes to deployment requires a full-stack test. Deployment
has two primary functions: Pushing flat files to S3 and deploying
code to a remote server if required.
"""
@task
def update():
"""
Update all application data not in repository (copy, assets, etc).
"""
text.update()
assets.sync()
data.update()
@task
def deploy(remote='origin'):
"""
Deploy the latest app to S3 and, if configured, to our servers.
"""
require('settings', provided_by=[production, staging])
if app_config.DEPLOY_TO_SERVERS:
require('branch', provided_by=[stable, master, branch])
if (app_config.DEPLOYMENT_TARGET == 'production' and env.branch != 'stable'):
utils.confirm(
colored("You are trying to deploy the '%s' branch to production.\nYou should really only deploy a stable branch.\nDo you know what you're doing?" % env.branch, "red")
)
servers.checkout_latest(remote)
servers.fabcast('assets.sync')
servers.install_crontab()
@task
def linklater():
"""
Alerts recipients when Tumblr draft with links scraped from Twitter via fetch_tweets() is available.
"""
now = datetime.now()
print "%s: Running linklater" % now.isoformat()
response = deploy_to_tumblr()
template = env.jinja_env.get_template('notification_email.html')
context = {
'blog_name': env.tumblr_blog_name,
'tumblr_post_id': response['id'],
'day_range': env.twitter_timeframe,
'twitter_handle': env.twitter_handle,
'richard_picture': 'http://assets.apps.npr.org/linklater/hippie_linklater.jpg'
}
output = template.render(**context)
subject = env.email_subject_template % now.strftime('%a, %b %d %Y')
connection = boto.ses.connect_to_region('us-east-1')
try:
connection.send_email(
source=env.from_email_address,
subject=subject,
body=None,
html_body=output,
to_addresses=env.to_email_addresses
)
except boto.ses.exceptions.SESAddressNotVerifiedError as e:
print '%s: ERROR An email address has not been verified. Tried to send to %s' % (now.isoformat(), ', '.join(env.to_email_addresses))
@task
def deploy_to_tumblr():
now = datetime.now()
secrets = app_config.get_secrets()
tumblr_api = pytumblr.TumblrRestClient(
secrets['TUMBLR_CONSUMER_KEY'],
secrets['TUMBLR_CONSUMER_SECRET'],
secrets['TUMBLR_TOKEN'],
secrets['TUMBLR_TOKEN_SECRET']
)
body = data.make_tumblr_draft_html()
response = tumblr_api.create_text(env.tumblr_blog_name, state='draft', format='html', body=body.encode('utf8'))
print "%s: Created tumblr draft (id: %s)" % (now.isoformat(), response['id'])
return response
"""
Destruction
Changes to destruction require setup/deploy to a test host in order to test.
Destruction should remove all files related to the project from both a remote
host and S3.
"""
@task
def shiva_the_destroyer():
"""
Deletes the app from s3
"""
require('settings', provided_by=[production, staging])
utils.confirm(
colored("You are about to destroy everything deployed to %s for this project.\nDo you know what you're doing?')" % app_config.DEPLOYMENT_TARGET, "red")
)
with settings(warn_only=True):
flat.delete_folder(app_config.PROJECT_SLUG)
if app_config.DEPLOY_TO_SERVERS:
servers.delete_project()
if app_config.DEPLOY_CRONTAB:
servers.uninstall_crontab()
if app_config.DEPLOY_SERVICES:
servers.nuke_confs() | fabfile/__init__.py |
from email.parser import Parser
from fabric.api import local, require, settings, task
from fabric.state import env
from jinja2 import Environment, FileSystemLoader
from termcolor import colored
from datetime import datetime
import app_config
# Other fabfiles
import assets
import boto.ses
import data
import flat
import issues
import os
import pytumblr
import render
import smtplib
import text
import utils
if app_config.DEPLOY_TO_SERVERS:
import servers
if app_config.DEPLOY_CRONTAB:
import cron_jobs
# Bootstrap can only be run once, then it's disabled
if app_config.PROJECT_SLUG == '$NEW_PROJECT_SLUG':
import bootstrap
"""
Base configuration
"""
env.user = app_config.SERVER_USER
env.forward_agent = True
env.hosts = []
env.settings = None
env.tumblr_blog_name = 'stage-lookatthis'
env.twitter_handle = 'lookatthisstory'
env.twitter_timeframe = '7' # days
env.from_email_address = 'NPR Visuals Linklater <<EMAIL>>'
env.to_email_addresses = ['<EMAIL>', '<EMAIL>', '<EMAIL>']
env.email_subject_template = 'Richard Linklater\'s links for %s'
# Jinja env
fab_path = os.path.realpath(os.path.dirname(__file__))
templates_path = os.path.join(fab_path, '../templates')
env.jinja_env = Environment(loader=FileSystemLoader(templates_path))
"""
Environments
Changing environment requires a full-stack test.
An environment points to both a server and an S3
bucket.
"""
@task
def production():
"""
Run as though on production.
"""
env.settings = 'production'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
env.tumblr_blog_name = 'lookatthis'
@task
def staging():
"""
Run as though on staging.
"""
env.settings = 'staging'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
"""
Branches
Changing branches requires deploying that branch to a host.
"""
@task
def stable():
"""
Work on stable branch.
"""
env.branch = 'stable'
@task
def master():
"""
Work on development branch.
"""
env.branch = 'master'
@task
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
"""
Running the app
"""
@task
def app(port='8000'):
"""
Serve app.py.
"""
local('gunicorn -b 0.0.0.0:%s --debug --reload app:wsgi_app' % port)
@task
def public_app(port='8001'):
"""
Serve public_app.py.
"""
local('gunicorn -b 0.0.0.0:%s --debug --reload public_app:wsgi_app' % port)
@task
def tests():
"""
Run Python unit tests.
"""
local('nosetests')
"""
Deployment
Changes to deployment requires a full-stack test. Deployment
has two primary functions: Pushing flat files to S3 and deploying
code to a remote server if required.
"""
@task
def update():
"""
Update all application data not in repository (copy, assets, etc).
"""
text.update()
assets.sync()
data.update()
@task
def deploy(remote='origin'):
"""
Deploy the latest app to S3 and, if configured, to our servers.
"""
require('settings', provided_by=[production, staging])
if app_config.DEPLOY_TO_SERVERS:
require('branch', provided_by=[stable, master, branch])
if (app_config.DEPLOYMENT_TARGET == 'production' and env.branch != 'stable'):
utils.confirm(
colored("You are trying to deploy the '%s' branch to production.\nYou should really only deploy a stable branch.\nDo you know what you're doing?" % env.branch, "red")
)
servers.checkout_latest(remote)
servers.fabcast('assets.sync')
servers.install_crontab()
@task
def linklater():
"""
Alerts recipients when Tumblr draft with links scraped from Twitter via fetch_tweets() is available.
"""
now = datetime.now()
print "%s: Running linklater" % now.isoformat()
response = deploy_to_tumblr()
template = env.jinja_env.get_template('notification_email.html')
context = {
'blog_name': env.tumblr_blog_name,
'tumblr_post_id': response['id'],
'day_range': env.twitter_timeframe,
'twitter_handle': env.twitter_handle,
'richard_picture': 'http://assets.apps.npr.org/linklater/hippie_linklater.jpg'
}
output = template.render(**context)
subject = env.email_subject_template % now.strftime('%a, %b %d %Y')
connection = boto.ses.connect_to_region('us-east-1')
try:
connection.send_email(
source=env.from_email_address,
subject=subject,
body=None,
html_body=output,
to_addresses=env.to_email_addresses
)
except boto.ses.exceptions.SESAddressNotVerifiedError as e:
print '%s: ERROR An email address has not been verified. Tried to send to %s' % (now.isoformat(), ', '.join(env.to_email_addresses))
@task
def deploy_to_tumblr():
now = datetime.now()
secrets = app_config.get_secrets()
tumblr_api = pytumblr.TumblrRestClient(
secrets['TUMBLR_CONSUMER_KEY'],
secrets['TUMBLR_CONSUMER_SECRET'],
secrets['TUMBLR_TOKEN'],
secrets['TUMBLR_TOKEN_SECRET']
)
body = data.make_tumblr_draft_html()
response = tumblr_api.create_text(env.tumblr_blog_name, state='draft', format='html', body=body.encode('utf8'))
print "%s: Created tumblr draft (id: %s)" % (now.isoformat(), response['id'])
return response
"""
Destruction
Changes to destruction require setup/deploy to a test host in order to test.
Destruction should remove all files related to the project from both a remote
host and S3.
"""
@task
def shiva_the_destroyer():
"""
Deletes the app from s3
"""
require('settings', provided_by=[production, staging])
utils.confirm(
colored("You are about to destroy everything deployed to %s for this project.\nDo you know what you're doing?')" % app_config.DEPLOYMENT_TARGET, "red")
)
with settings(warn_only=True):
flat.delete_folder(app_config.PROJECT_SLUG)
if app_config.DEPLOY_TO_SERVERS:
servers.delete_project()
if app_config.DEPLOY_CRONTAB:
servers.uninstall_crontab()
if app_config.DEPLOY_SERVICES:
servers.nuke_confs() | 0.376279 | 0.079961 |
from collections.abc import MutableMapping
import json
import os.path
import pathlib
from typing import Any, Optional, Type, Union
import warnings
from weakref import proxy
class ProjectEncoder(json.JSONEncoder):
"""Make project parts serialisable on json dump"""
def default(self, obj):
if isinstance(obj, Sources):
serialisable = obj._sources
return serialisable
if isinstance(obj, Content):
serialisable = {
"alias": obj.alias,
"filename": obj.filename,
"cpath": f"{obj.cpath!s}",
"keyp": obj.keyp,
"source": obj.source,
"exists": obj.exists,
"desc": obj.desc,
"kind": obj.kind,
"tags": obj.tags,
"ignore_keyp": obj.ignore_keyp,
"_type": "indirect.Content"
}
return serialisable
if isinstance(obj, pathlib.Path):
return f"{obj!s}"
if isinstance(obj, Abstraction):
serialisable = {
"alias": obj.alias,
"path": obj.path,
"_type": "indirect.Abstraction"
}
return serialisable
return super().default(obj)
class ProjectDecoder:
def __init__(self, project=None):
self._project = project
def __call__(self, dct):
try:
_type = dct.pop("_type")
except KeyError:
_type = None
if _type is None:
return dct
if _type == "indirect.Content":
decoded = Content(
alias=dct["alias"],
filename=dct["filename"],
cpath=dct["cpath"],
keyp=dct["keyp"],
source=dct["source"],
exists=dct["exists"],
desc=dct["desc"],
kind=dct["kind"],
tags=dct["tags"],
project=self._project,
ignore_keyp=dct["ingore_keyp"]
)
return decoded
return dct
class Abstraction:
__slots__ = ["alias", "path", "previous", "next", "content", '__weakref__']
def __init__(self, alias=None, /, *, path=None):
if alias is None:
alias = ""
self.alias = alias
if path is None:
path = ""
self.path = pathlib.Path(path)
self.previous = None
self.next = None
self.content = None
def __str__(self):
n_next = len(self.next) if self.next is not None else None
n_content = len(self.content) if self.content is not None else None
str_repr = (
f"{type(self).__name__}\n"
f" alias: {self.alias!r}\n"
f" path: {str(self.path)!r}\n"
f" next: {n_next!r}\n"
f" content: {n_content!r}"
)
return str_repr
def __repr__(self):
next_str_reprs = (
[str(x) for x in self.next]
if self.next is not None else None
)
content_str_reprs = (
[str(x) for x in self.content]
if self.content is not None else None
)
obj_repr = {
"alias": self.alias,
"path": self.path,
"next": next_str_reprs,
"content": content_str_reprs
}
return str(obj_repr)
def __contains__(self, alias):
if (self.next is not None) and (alias in self.next):
return True
if (self.content is not None) and (alias in self.content):
return True
return False
def __iadd__(self, other):
if isinstance(other, Abstraction):
if self.next is None:
self.next = {}
self.next[other.alias] = other
elif isinstance(other, Content):
if self.content is None:
self.content = {}
self.content[other.alias] = other
else:
raise TypeError(
"unsupported operand type(s) for +: "
f"'{type(self).__name__}' and '{type(other).__name__}'"
)
return self
def to_dict(self, depth=None):
def make_dct(dct, a, depth, current_depth=0):
dct[a.alias] = {
"path": a.path,
}
if hasattr(a, "content") and (a.content is not None):
dct[a.alias]["content"] = a.content
if depth is None or depth > current_depth:
if hasattr(a, "next"):
if (a.next is None) or (len(a.next) == 0):
return
dct[a.alias]["next"] = {}
for a_ in a.next.values():
make_dct(
dct[a.alias]["next"], a_,
depth, current_depth=current_depth + 1
)
dct = {}
make_dct(dct, self, depth)
return dct
@property
def fullpath(self):
def retrace(a):
if a.previous is not None:
yield from retrace(a.previous)
yield os.path.expandvars(a.path)
return pathlib.Path(*retrace(self))
class KeyPath(list):
def __init__(self, iterable=(), /):
if isinstance(iterable, str):
super().__init__(self.from_string(iterable))
else:
super().__init__(str(k) for k in iterable)
@classmethod
def from_string(cls, s):
as_list = s.split(".")
if len(as_list) == 1 and as_list[0] == "":
as_list = []
return cls(as_list)
def to_string(self):
return ".".join(self)
class View(list):
def __init__(self, iterable=(), /):
if isinstance(iterable, dict):
super().__init__(self.from_dict(iterable))
elif isinstance(iterable, str):
super().__init__([iterable])
else:
super().__init__(KeyPath(x) for x in iterable)
@classmethod
def from_dict(cls, dct):
def decent(d, keyp=None):
if keyp is None:
keyp = []
for key in d:
keyp_ = keyp + [key]
if (d[key] is None) or (len(d[key]) == 0):
yield keyp_
else:
yield from decent(d[key], keyp=keyp_)
return cls(decent(dct))
def to_dict(self):
dct = {}
for keyp in self:
d_ = dct
for key in keyp:
if key not in d_:
d_[key] = {}
d_ = d_[key]
return dct
class Sources(MutableMapping):
def __init__(self) -> None:
self._sources = {}
def __getitem__(self, key: str):
try:
return self._sources[key]
except KeyError as error:
if key == "home":
return pathlib.Path()
raise error
def __setitem__(self, key: str, value: Union[str, pathlib.Path]) -> None:
if isinstance(value, str):
value = pathlib.Path(value)
self._sources[key] = value
def __delitem__(self, key: str) -> None:
del self._sources[key]
def __iter__(self):
return iter(self._sources)
def __len__(self):
return len(self._sources)
def __repr__(self):
return f"{type(self).__name__}({self._sources})"
class Views(MutableMapping):
def __init__(self):
self._views = {}
def __getitem__(self, key: str) -> Type["View"]:
return self._views[key]
def __setitem__(self, key: str, value: Any):
self._views[key] = View(value)
def __delitem__(self, key: str):
del self._views[key]
def __iter__(self):
return iter(self._views)
def __len__(self):
return len(self._views)
def __repr__(self):
return f"{type(self).__name__}({self._views})"
class Project:
def __init__(self, alias=None, /, *, file=None):
self.abstractions = Abstraction("root")
self.views = Views()
self.sources = Sources()
if file is None:
self.file = file
else:
self.load(file)
self.alias = alias
@property
def a(self):
return self.abstractions
@property
def v(self):
return self.views
@property
def s(self):
return self.sources
def load(self, file, reinit=False):
"""Load project from file"""
if reinit:
self.__init__()
file = pathlib.Path(file)
with open(os.path.expandvars(file)) as file_:
details = json.load(file_, object_hook=ProjectDecoder())
# self.abstractions.update(details["abstractions"])
self.sources.update(details["sources"])
self.views.update(details["views"])
self.file = file
self.sources["home"] = self.file.parent
def save(self, file):
file = pathlib.Path(file)
save_obj = {
"sources": self.sources,
"views": self.views,
"abstractions": self.abstractions.to_dict()
}
with open(os.path.expandvars(file), "w") as fp:
json.dump(save_obj, fp, indent=4, cls=ProjectEncoder)
self.file = file
self.sources["home"] = self.file.parent
def add_abstraction(self, alias, *, path=None, view=None):
"""Add abstraction to abstractions
Args:
alias: Short identifier for the new abstraction.
Keyword args:
path: Actual path fragment represented by this abstraction.
If `None`, will use ``alias``.
view: List of KeyPath instances (or equivalents) under which
the new abstraction should be added.
"""
if path is None:
path = alias
view = self.check_view(view)
for keyp in view:
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
last_a = self.decent_keyp(keyp)
assert isinstance(last_a, Abstraction)
a = Abstraction(alias, path=path)
a.previous = proxy(last_a)
if last_a.next is None:
last_a.next = {}
last_a.next[alias] = a
def check_view(self, view):
if view is None:
view = View([[]])
elif isinstance(view, str):
try:
view = self.views[view]
except KeyError:
raise LookupError("Could not find view")
elif not isinstance(view, View):
view = View(view)
return view
def rm_abstraction(self, alias, view):
view = self.check_view(view)
for keyp in view:
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
last_a = self.decent_keyp(keyp)
assert isinstance(last_a, Abstraction)
_ = last_a.next.pop(alias)
def add_content(
self, alias, filename, *, cpath='',
source="home", check=False, desc=None, kind=None, hash=None,
tags=None, ignore_keyp=False, view=None):
view = self.check_view(view)
for keyp in view:
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
last_a = self.decent_keyp(keyp)
assert isinstance(last_a, Abstraction)
c = Content(
alias,
filename=filename.format(*keyp),
cpath=cpath,
keyp=keyp,
source=source,
exists=None, # Implement existence check
desc=desc,
kind=kind,
tags=tags,
project=proxy(self),
ignore_keyp=ignore_keyp
)
if last_a.content is None:
last_a.content = {}
last_a.content[alias] = c
def rm_content(self, alias, view=None):
if view is None:
view = [[]]
for keyp in view:
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
last_a = self.decent_keyp(keyp)
assert isinstance(last_a, Abstraction)
_ = last_a.content.pop(alias)
def decent_keyp(
self, keyp: Type["KeyPath"]
) -> Union[Type["Abstraction"], Type["Content"]]:
a = self.abstractions
for key in keyp:
try:
a = a.next[key]
except (KeyError, TypeError):
try:
return a.content[key]
except (KeyError, TypeError):
raise LookupError("Invalid KeyPath")
return a
def eval_keyp(self, keyp: Type[KeyPath]) -> Type[pathlib.Path]:
a = self.abstractions
keyp_eval = pathlib.Path()
for key in keyp:
try:
a = a.next[key]
except (KeyError, TypeError):
try:
a = a.content[key]
except (KeyError, TypeError):
raise LookupError("Invalid KeyPath")
finally:
if isinstance(a, Abstraction):
keyp_eval = keyp_eval / os.path.expandvars(a.path)
elif isinstance(a, Content):
keyp_eval = keyp_eval / (
f"{os.path.expandvars(a.cpath)}/"
f"{a.filename}"
)
return keyp_eval
def __getitem__(self, keyp):
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
return self.decent_keyp(keyp)
def __setitem__(self, keyp, item):
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
if len(keyp) == 0:
# Setting root
assert isinstance(item, Abstraction), "Root must be of type Abstraction"
self.abstractions = item
return
a = self.decent_keyp(keyp[:-1])
alias = keyp[-1]
if isinstance(item, Content):
if a.content is None:
a.content = {}
if alias != item.alias:
warnings.warn(
f"Alias mismatch ({alias} != {item.alias})", UserWarning
)
a.content[item.alias] = item
elif isinstance(item, Abstraction):
if a.next is None:
a.next = {}
if alias != item.alias:
warnings.warn(
f"Alias mismatch ({alias} != {item.alias})", UserWarning
)
a.next[item.alias] = item
else:
raise TypeError("Item must be of type Content or Abstraction")
def __repr__(self):
obj_repr = (
f"{type(self).__name__}"
f"(alias={self.alias!r}, file={self.file!r})"
)
return obj_repr
def __str__(self):
str_repr = (
f"{type(self).__name__}\n"
f" alias: {self.alias!r}\n"
f" file: {self.file!r}"
)
return str_repr
class Content:
"""
Args:
alias: Name as stored in project.
filename: Actual name of the file.
cpath: Path snippet inserted between keyp and filename.
keyp: Key trajectory through Abstractions dict.
source: Root of the tree in which this file lies.
exists: Existence indicator.
desc: Description.
kind: Binary, txt? Used if filename has no extension.
_hash: Hash to track file modification.
tags: List of keyword identifiers.
project: Associated project.
ignore_keyp Do not consider keyp for fullpath.
"""
def __init__(
self,
alias: Optional[str] = None, /, *,
filename: Optional[str] = None,
cpath: Optional[str] = None,
keyp: Type["KeyPath"] = None,
source: Optional[str] = None,
exists: Optional[bool] = None,
desc: Optional[str] = None,
kind: Optional[str] = None,
# _hash: ,
tags: Optional[list] = None,
project: Type["Project"] = None,
ignore_keyp: bool = False
):
if alias is None:
alias = ""
self.alias = alias
if filename is None:
filename = ""
self.filename = filename
if cpath is None:
cpath = ""
self.cpath = pathlib.Path(cpath)
if keyp is None:
keyp = []
self.keyp = KeyPath(keyp)
if source is None:
source = "home"
self.source = source
self.exists = exists
if desc is None:
desc = ""
self.desc = desc
self.kind = kind
if tags is None:
tags = []
self.tags = tags
self.project = project
self.ignore_keyp = ignore_keyp
@property
def fullpath(self):
if self.project is None:
fullpath_ = pathlib.Path(
f"{os.path.expandvars(self.cpath)}/"
f"{self.filename}"
)
else:
if not self.ignore_keyp:
keyp_eval = self.project.eval_keyp(self.keyp)
else:
keyp_eval = ""
fullpath_ = pathlib.Path(
f"{os.path.expandvars(self.project.sources[self.source])}/"
f"{keyp_eval}/"
f"{os.path.expandvars(self.cpath)}/"
f"{self.filename}"
)
return fullpath_
def __repr__(self):
obj_repr = (
f"{type(self).__name__}("
f"alias={self.alias!r}, "
f"filename={self.filename!r}, "
f"cpath={self.cpath!r}, "
f"keyp={self.keyp!r}, "
f"source={self.source!r}, "
f"exists={self.exists!r}, "
f"desc={self.desc!r}, "
f"kind={self.kind!r}', "
f"tags={self.tags!r}, "
f"project={self.project.__repr__()}, "
f"ignore_keyp={self.ignore_keyp!r})"
)
return obj_repr
def __str__(self):
str_repr = (
f"{type(self).__name__}\n"
f" alias: {self.alias!r}\n"
f" filename: {self.filename!r}\n"
f" cpath: {str(self.cpath)!r}\n"
f" keyp: {self.keyp!r}\n"
f" source: {self.source!r}\n"
f" exists: {self.exists!r}\n"
f" desc: {self.desc!r}\n"
f" kind: {self.kind!r}\n"
f" tags: {self.tags!r}\n"
f" project: {self.project!s}\n"
f" ignore keyp: {self.ignore_keyp!r}"
)
return str_repr | indirect/indirect.py | from collections.abc import MutableMapping
import json
import os.path
import pathlib
from typing import Any, Optional, Type, Union
import warnings
from weakref import proxy
class ProjectEncoder(json.JSONEncoder):
"""Make project parts serialisable on json dump"""
def default(self, obj):
if isinstance(obj, Sources):
serialisable = obj._sources
return serialisable
if isinstance(obj, Content):
serialisable = {
"alias": obj.alias,
"filename": obj.filename,
"cpath": f"{obj.cpath!s}",
"keyp": obj.keyp,
"source": obj.source,
"exists": obj.exists,
"desc": obj.desc,
"kind": obj.kind,
"tags": obj.tags,
"ignore_keyp": obj.ignore_keyp,
"_type": "indirect.Content"
}
return serialisable
if isinstance(obj, pathlib.Path):
return f"{obj!s}"
if isinstance(obj, Abstraction):
serialisable = {
"alias": obj.alias,
"path": obj.path,
"_type": "indirect.Abstraction"
}
return serialisable
return super().default(obj)
class ProjectDecoder:
def __init__(self, project=None):
self._project = project
def __call__(self, dct):
try:
_type = dct.pop("_type")
except KeyError:
_type = None
if _type is None:
return dct
if _type == "indirect.Content":
decoded = Content(
alias=dct["alias"],
filename=dct["filename"],
cpath=dct["cpath"],
keyp=dct["keyp"],
source=dct["source"],
exists=dct["exists"],
desc=dct["desc"],
kind=dct["kind"],
tags=dct["tags"],
project=self._project,
ignore_keyp=dct["ingore_keyp"]
)
return decoded
return dct
class Abstraction:
__slots__ = ["alias", "path", "previous", "next", "content", '__weakref__']
def __init__(self, alias=None, /, *, path=None):
if alias is None:
alias = ""
self.alias = alias
if path is None:
path = ""
self.path = pathlib.Path(path)
self.previous = None
self.next = None
self.content = None
def __str__(self):
n_next = len(self.next) if self.next is not None else None
n_content = len(self.content) if self.content is not None else None
str_repr = (
f"{type(self).__name__}\n"
f" alias: {self.alias!r}\n"
f" path: {str(self.path)!r}\n"
f" next: {n_next!r}\n"
f" content: {n_content!r}"
)
return str_repr
def __repr__(self):
next_str_reprs = (
[str(x) for x in self.next]
if self.next is not None else None
)
content_str_reprs = (
[str(x) for x in self.content]
if self.content is not None else None
)
obj_repr = {
"alias": self.alias,
"path": self.path,
"next": next_str_reprs,
"content": content_str_reprs
}
return str(obj_repr)
def __contains__(self, alias):
if (self.next is not None) and (alias in self.next):
return True
if (self.content is not None) and (alias in self.content):
return True
return False
def __iadd__(self, other):
if isinstance(other, Abstraction):
if self.next is None:
self.next = {}
self.next[other.alias] = other
elif isinstance(other, Content):
if self.content is None:
self.content = {}
self.content[other.alias] = other
else:
raise TypeError(
"unsupported operand type(s) for +: "
f"'{type(self).__name__}' and '{type(other).__name__}'"
)
return self
def to_dict(self, depth=None):
def make_dct(dct, a, depth, current_depth=0):
dct[a.alias] = {
"path": a.path,
}
if hasattr(a, "content") and (a.content is not None):
dct[a.alias]["content"] = a.content
if depth is None or depth > current_depth:
if hasattr(a, "next"):
if (a.next is None) or (len(a.next) == 0):
return
dct[a.alias]["next"] = {}
for a_ in a.next.values():
make_dct(
dct[a.alias]["next"], a_,
depth, current_depth=current_depth + 1
)
dct = {}
make_dct(dct, self, depth)
return dct
@property
def fullpath(self):
def retrace(a):
if a.previous is not None:
yield from retrace(a.previous)
yield os.path.expandvars(a.path)
return pathlib.Path(*retrace(self))
class KeyPath(list):
def __init__(self, iterable=(), /):
if isinstance(iterable, str):
super().__init__(self.from_string(iterable))
else:
super().__init__(str(k) for k in iterable)
@classmethod
def from_string(cls, s):
as_list = s.split(".")
if len(as_list) == 1 and as_list[0] == "":
as_list = []
return cls(as_list)
def to_string(self):
return ".".join(self)
class View(list):
def __init__(self, iterable=(), /):
if isinstance(iterable, dict):
super().__init__(self.from_dict(iterable))
elif isinstance(iterable, str):
super().__init__([iterable])
else:
super().__init__(KeyPath(x) for x in iterable)
@classmethod
def from_dict(cls, dct):
def decent(d, keyp=None):
if keyp is None:
keyp = []
for key in d:
keyp_ = keyp + [key]
if (d[key] is None) or (len(d[key]) == 0):
yield keyp_
else:
yield from decent(d[key], keyp=keyp_)
return cls(decent(dct))
def to_dict(self):
dct = {}
for keyp in self:
d_ = dct
for key in keyp:
if key not in d_:
d_[key] = {}
d_ = d_[key]
return dct
class Sources(MutableMapping):
def __init__(self) -> None:
self._sources = {}
def __getitem__(self, key: str):
try:
return self._sources[key]
except KeyError as error:
if key == "home":
return pathlib.Path()
raise error
def __setitem__(self, key: str, value: Union[str, pathlib.Path]) -> None:
if isinstance(value, str):
value = pathlib.Path(value)
self._sources[key] = value
def __delitem__(self, key: str) -> None:
del self._sources[key]
def __iter__(self):
return iter(self._sources)
def __len__(self):
return len(self._sources)
def __repr__(self):
return f"{type(self).__name__}({self._sources})"
class Views(MutableMapping):
def __init__(self):
self._views = {}
def __getitem__(self, key: str) -> Type["View"]:
return self._views[key]
def __setitem__(self, key: str, value: Any):
self._views[key] = View(value)
def __delitem__(self, key: str):
del self._views[key]
def __iter__(self):
return iter(self._views)
def __len__(self):
return len(self._views)
def __repr__(self):
return f"{type(self).__name__}({self._views})"
class Project:
def __init__(self, alias=None, /, *, file=None):
self.abstractions = Abstraction("root")
self.views = Views()
self.sources = Sources()
if file is None:
self.file = file
else:
self.load(file)
self.alias = alias
@property
def a(self):
return self.abstractions
@property
def v(self):
return self.views
@property
def s(self):
return self.sources
def load(self, file, reinit=False):
"""Load project from file"""
if reinit:
self.__init__()
file = pathlib.Path(file)
with open(os.path.expandvars(file)) as file_:
details = json.load(file_, object_hook=ProjectDecoder())
# self.abstractions.update(details["abstractions"])
self.sources.update(details["sources"])
self.views.update(details["views"])
self.file = file
self.sources["home"] = self.file.parent
def save(self, file):
file = pathlib.Path(file)
save_obj = {
"sources": self.sources,
"views": self.views,
"abstractions": self.abstractions.to_dict()
}
with open(os.path.expandvars(file), "w") as fp:
json.dump(save_obj, fp, indent=4, cls=ProjectEncoder)
self.file = file
self.sources["home"] = self.file.parent
def add_abstraction(self, alias, *, path=None, view=None):
"""Add abstraction to abstractions
Args:
alias: Short identifier for the new abstraction.
Keyword args:
path: Actual path fragment represented by this abstraction.
If `None`, will use ``alias``.
view: List of KeyPath instances (or equivalents) under which
the new abstraction should be added.
"""
if path is None:
path = alias
view = self.check_view(view)
for keyp in view:
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
last_a = self.decent_keyp(keyp)
assert isinstance(last_a, Abstraction)
a = Abstraction(alias, path=path)
a.previous = proxy(last_a)
if last_a.next is None:
last_a.next = {}
last_a.next[alias] = a
def check_view(self, view):
if view is None:
view = View([[]])
elif isinstance(view, str):
try:
view = self.views[view]
except KeyError:
raise LookupError("Could not find view")
elif not isinstance(view, View):
view = View(view)
return view
def rm_abstraction(self, alias, view):
view = self.check_view(view)
for keyp in view:
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
last_a = self.decent_keyp(keyp)
assert isinstance(last_a, Abstraction)
_ = last_a.next.pop(alias)
def add_content(
self, alias, filename, *, cpath='',
source="home", check=False, desc=None, kind=None, hash=None,
tags=None, ignore_keyp=False, view=None):
view = self.check_view(view)
for keyp in view:
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
last_a = self.decent_keyp(keyp)
assert isinstance(last_a, Abstraction)
c = Content(
alias,
filename=filename.format(*keyp),
cpath=cpath,
keyp=keyp,
source=source,
exists=None, # Implement existence check
desc=desc,
kind=kind,
tags=tags,
project=proxy(self),
ignore_keyp=ignore_keyp
)
if last_a.content is None:
last_a.content = {}
last_a.content[alias] = c
def rm_content(self, alias, view=None):
if view is None:
view = [[]]
for keyp in view:
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
last_a = self.decent_keyp(keyp)
assert isinstance(last_a, Abstraction)
_ = last_a.content.pop(alias)
def decent_keyp(
self, keyp: Type["KeyPath"]
) -> Union[Type["Abstraction"], Type["Content"]]:
a = self.abstractions
for key in keyp:
try:
a = a.next[key]
except (KeyError, TypeError):
try:
return a.content[key]
except (KeyError, TypeError):
raise LookupError("Invalid KeyPath")
return a
def eval_keyp(self, keyp: Type[KeyPath]) -> Type[pathlib.Path]:
a = self.abstractions
keyp_eval = pathlib.Path()
for key in keyp:
try:
a = a.next[key]
except (KeyError, TypeError):
try:
a = a.content[key]
except (KeyError, TypeError):
raise LookupError("Invalid KeyPath")
finally:
if isinstance(a, Abstraction):
keyp_eval = keyp_eval / os.path.expandvars(a.path)
elif isinstance(a, Content):
keyp_eval = keyp_eval / (
f"{os.path.expandvars(a.cpath)}/"
f"{a.filename}"
)
return keyp_eval
def __getitem__(self, keyp):
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
return self.decent_keyp(keyp)
def __setitem__(self, keyp, item):
if isinstance(keyp, str):
keyp = KeyPath.from_string(keyp)
if len(keyp) == 0:
# Setting root
assert isinstance(item, Abstraction), "Root must be of type Abstraction"
self.abstractions = item
return
a = self.decent_keyp(keyp[:-1])
alias = keyp[-1]
if isinstance(item, Content):
if a.content is None:
a.content = {}
if alias != item.alias:
warnings.warn(
f"Alias mismatch ({alias} != {item.alias})", UserWarning
)
a.content[item.alias] = item
elif isinstance(item, Abstraction):
if a.next is None:
a.next = {}
if alias != item.alias:
warnings.warn(
f"Alias mismatch ({alias} != {item.alias})", UserWarning
)
a.next[item.alias] = item
else:
raise TypeError("Item must be of type Content or Abstraction")
def __repr__(self):
obj_repr = (
f"{type(self).__name__}"
f"(alias={self.alias!r}, file={self.file!r})"
)
return obj_repr
def __str__(self):
str_repr = (
f"{type(self).__name__}\n"
f" alias: {self.alias!r}\n"
f" file: {self.file!r}"
)
return str_repr
class Content:
"""
Args:
alias: Name as stored in project.
filename: Actual name of the file.
cpath: Path snippet inserted between keyp and filename.
keyp: Key trajectory through Abstractions dict.
source: Root of the tree in which this file lies.
exists: Existence indicator.
desc: Description.
kind: Binary, txt? Used if filename has no extension.
_hash: Hash to track file modification.
tags: List of keyword identifiers.
project: Associated project.
ignore_keyp Do not consider keyp for fullpath.
"""
def __init__(
self,
alias: Optional[str] = None, /, *,
filename: Optional[str] = None,
cpath: Optional[str] = None,
keyp: Type["KeyPath"] = None,
source: Optional[str] = None,
exists: Optional[bool] = None,
desc: Optional[str] = None,
kind: Optional[str] = None,
# _hash: ,
tags: Optional[list] = None,
project: Type["Project"] = None,
ignore_keyp: bool = False
):
if alias is None:
alias = ""
self.alias = alias
if filename is None:
filename = ""
self.filename = filename
if cpath is None:
cpath = ""
self.cpath = pathlib.Path(cpath)
if keyp is None:
keyp = []
self.keyp = KeyPath(keyp)
if source is None:
source = "home"
self.source = source
self.exists = exists
if desc is None:
desc = ""
self.desc = desc
self.kind = kind
if tags is None:
tags = []
self.tags = tags
self.project = project
self.ignore_keyp = ignore_keyp
@property
def fullpath(self):
if self.project is None:
fullpath_ = pathlib.Path(
f"{os.path.expandvars(self.cpath)}/"
f"{self.filename}"
)
else:
if not self.ignore_keyp:
keyp_eval = self.project.eval_keyp(self.keyp)
else:
keyp_eval = ""
fullpath_ = pathlib.Path(
f"{os.path.expandvars(self.project.sources[self.source])}/"
f"{keyp_eval}/"
f"{os.path.expandvars(self.cpath)}/"
f"{self.filename}"
)
return fullpath_
def __repr__(self):
obj_repr = (
f"{type(self).__name__}("
f"alias={self.alias!r}, "
f"filename={self.filename!r}, "
f"cpath={self.cpath!r}, "
f"keyp={self.keyp!r}, "
f"source={self.source!r}, "
f"exists={self.exists!r}, "
f"desc={self.desc!r}, "
f"kind={self.kind!r}', "
f"tags={self.tags!r}, "
f"project={self.project.__repr__()}, "
f"ignore_keyp={self.ignore_keyp!r})"
)
return obj_repr
def __str__(self):
str_repr = (
f"{type(self).__name__}\n"
f" alias: {self.alias!r}\n"
f" filename: {self.filename!r}\n"
f" cpath: {str(self.cpath)!r}\n"
f" keyp: {self.keyp!r}\n"
f" source: {self.source!r}\n"
f" exists: {self.exists!r}\n"
f" desc: {self.desc!r}\n"
f" kind: {self.kind!r}\n"
f" tags: {self.tags!r}\n"
f" project: {self.project!s}\n"
f" ignore keyp: {self.ignore_keyp!r}"
)
return str_repr | 0.782829 | 0.136695 |
import json
from unittest import TestCase
from unittest.mock import Mock
from main import AtonCore
class TestDrawingCards(TestCase):
def test_draw_cards_from_deck(self):
aton = AtonCore()
aton.red.deck = [1, 2, 3, 4, 4, 3, 2, 1]
aton.start()
self.assertEqual(aton.red.hand, [1, 2, 3, 4])
self.assertEqual(aton.red.deck, [4, 3, 2, 1])
def test_use_discard_as_deck_if_deck_is_too_small(self):
aton = AtonCore()
aton.red.deck = [2, 3, 4]
aton.red.discard = [1, 1, 1, 1, 1]
aton.start()
self.assertEqual(aton.red.hand, [2, 3, 4, 1])
self.assertEqual(aton.red.discard, [])
self.assertEqual(aton.red.deck, [1, 1, 1, 1])
def test_sends_cards_to_users(self):
notifiers = [Mock(), Mock()]
aton = AtonCore(notifiers)
aton.red.deck = [1, 1, 4, 3]
aton.blue.deck = [4, 2, 3, 4]
aton.start()
notifiers[0].assert_called_with(json.dumps({
'message': 'cards_drawn',
'cards': [1, 1, 4, 3]
}))
notifiers[1].assert_called_with(json.dumps({
'message': 'cards_drawn',
'cards': [4, 2, 3, 4]
}))
def test_player_can_exchange_cards_by_default(self):
aton = AtonCore()
self.assertTrue(aton.red.can_exchange_cards)
self.assertTrue(aton.blue.can_exchange_cards)
def test_exchanges_cards(self):
aton = AtonCore()
red = aton.red
red.deck = [1, 3, 2, 2, 4, 1, 3, 1, 2, 2, 4, 4]
aton.start()
aton.execute(json.dumps({
'player': 'red',
'message': 'exchange_cards',
}))
self.assertFalse(red.can_exchange_cards)
self.assertEqual(red.discard, [1, 3, 2, 2])
self.assertEqual(red.hand, [4, 1, 3, 1])
self.assertEqual(red.deck, [2, 2, 4, 4])
def test_player_can_exchange_cards_only_once(self):
aton = AtonCore()
red = aton.red
red.deck = [1, 2, 3, 4, 4, 3, 1, 2, 1, 1, 1, 1]
aton.start()
aton.execute(json.dumps({
'player': 'red',
'message': 'exchange_cards',
}))
aton.execute(json.dumps({
'player': 'red',
'message': 'exchange_cards',
}))
self.assertEqual(red.discard, [1, 2, 3, 4])
self.assertEqual(red.hand, [4, 3, 1, 2])
self.assertEqual(red.deck, [1, 1, 1, 1])
def test_notifies_when_opponent_exchanges_cards(self):
notifier = Mock()
aton = AtonCore([notifier, None])
aton.start()
aton.execute(json.dumps({
'player': 'blue',
'message': 'exchange_cards',
}))
notifier.assert_called_with(json.dumps({
'message': 'opponent_exchanged_cards',
})) | tests/tests_drawing_cards.py | import json
from unittest import TestCase
from unittest.mock import Mock
from main import AtonCore
class TestDrawingCards(TestCase):
def test_draw_cards_from_deck(self):
aton = AtonCore()
aton.red.deck = [1, 2, 3, 4, 4, 3, 2, 1]
aton.start()
self.assertEqual(aton.red.hand, [1, 2, 3, 4])
self.assertEqual(aton.red.deck, [4, 3, 2, 1])
def test_use_discard_as_deck_if_deck_is_too_small(self):
aton = AtonCore()
aton.red.deck = [2, 3, 4]
aton.red.discard = [1, 1, 1, 1, 1]
aton.start()
self.assertEqual(aton.red.hand, [2, 3, 4, 1])
self.assertEqual(aton.red.discard, [])
self.assertEqual(aton.red.deck, [1, 1, 1, 1])
def test_sends_cards_to_users(self):
notifiers = [Mock(), Mock()]
aton = AtonCore(notifiers)
aton.red.deck = [1, 1, 4, 3]
aton.blue.deck = [4, 2, 3, 4]
aton.start()
notifiers[0].assert_called_with(json.dumps({
'message': 'cards_drawn',
'cards': [1, 1, 4, 3]
}))
notifiers[1].assert_called_with(json.dumps({
'message': 'cards_drawn',
'cards': [4, 2, 3, 4]
}))
def test_player_can_exchange_cards_by_default(self):
aton = AtonCore()
self.assertTrue(aton.red.can_exchange_cards)
self.assertTrue(aton.blue.can_exchange_cards)
def test_exchanges_cards(self):
aton = AtonCore()
red = aton.red
red.deck = [1, 3, 2, 2, 4, 1, 3, 1, 2, 2, 4, 4]
aton.start()
aton.execute(json.dumps({
'player': 'red',
'message': 'exchange_cards',
}))
self.assertFalse(red.can_exchange_cards)
self.assertEqual(red.discard, [1, 3, 2, 2])
self.assertEqual(red.hand, [4, 1, 3, 1])
self.assertEqual(red.deck, [2, 2, 4, 4])
def test_player_can_exchange_cards_only_once(self):
aton = AtonCore()
red = aton.red
red.deck = [1, 2, 3, 4, 4, 3, 1, 2, 1, 1, 1, 1]
aton.start()
aton.execute(json.dumps({
'player': 'red',
'message': 'exchange_cards',
}))
aton.execute(json.dumps({
'player': 'red',
'message': 'exchange_cards',
}))
self.assertEqual(red.discard, [1, 2, 3, 4])
self.assertEqual(red.hand, [4, 3, 1, 2])
self.assertEqual(red.deck, [1, 1, 1, 1])
def test_notifies_when_opponent_exchanges_cards(self):
notifier = Mock()
aton = AtonCore([notifier, None])
aton.start()
aton.execute(json.dumps({
'player': 'blue',
'message': 'exchange_cards',
}))
notifier.assert_called_with(json.dumps({
'message': 'opponent_exchanged_cards',
})) | 0.558929 | 0.594728 |
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from .models import Infobox
import json
from .utils import *
default_fields = [ 'born', 'died', 'nationality', 'known for' ]
def render_search_result(info, fields=None, keywords=None):
person_id = info.id
person_name = info.title
person = json.loads(info.json_str)
sub_person = {}
if fields:
for field in fields:
val = search_key(field, person)
if val: sub_person[field] = val
if keywords:
sub_key = search_matched_value(keywords, person)
for k, v in sub_key.items():
if k not in sub_person:
sub_person[k] = v
return { 'person_id': int(person_id),
'person_name': person_name,
'person': { 'not_display': sub_person }
}
def person(request, person_id):
person_info = get_object_or_404(Infobox, pk=person_id)
person = json.loads(person_info.json_str)
return render(request, 'person.html',
{ 'person_id': int(person_id),
'person': person,
'person_name': person_info.title,
'highlight': request.GET.getlist('highlight') })
def index(request):
return render(request, 'index.html')
def search(request):
keywords = split_keywords(request.GET.getlist('q'))
if keywords:
matched = search_by_keywords(keywords)
else: matched = None
fields = request.GET.getlist('f')
for key in request.GET:
if not key.startswith('f_'):
continue
field = key[2:].replace('_', ' ')
f_keys = split_keywords(request.GET.getlist(key))
if not f_keys: continue
if field not in fields:
fields.append(field)
if field != 'name':
matched_f = search_by_field(field, f_keys)
else: matched_f = search_by_name(f_keys)
mismatch = []
if matched is not None:
for key, val in matched.items():
if key not in matched_f:
mismatch.append(key)
else: val['keywords'].extend(matched_f[key]['keywords'])
for key in mismatch:
del matched[key]
else: matched = matched_f
keywords = merge_list(keywords, f_keys)
matched = [ (val['keywords'], val['object']) for val in matched.values() ]
matched.sort(key=lambda x: len(x[0]), reverse=True)
if fields:
fields = merge_list(fields, default_fields)
results = [ render_search_result(obj, fields=fields) for _, obj in matched ]
else:
results = [ render_search_result(obj, fields=default_fields, keywords=keywords) for _, obj in matched ]
page = int(request.GET['page']) if 'page' in request.GET else 1
page_size = int(request.GET['page_size']) if 'page_size' in request.GET else 10
paginator = Paginator(results, page_size)
try:
contents = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
contents = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
contents = paginator.page(paginator.num_pages)
show_page_range_min = contents.number - 5
show_page_range_max = contents.number + 5
if show_page_range_max > paginator.num_pages:
show_page_range_max = paginator.num_pages
show_page_range_min = show_page_range_max - 10
elif show_page_range_min <= 1:
show_page_range_min = 1
show_page_range_max = show_page_range_min + 10
show_page_range_max = min(show_page_range_max, paginator.num_pages)
show_page_range_min = max(show_page_range_min, 1)
show_page_range = range(show_page_range_min, show_page_range_max + 1)
return render(request, 'search.html',
{ 'keywords': keywords,
'results': contents,
'show_page_range': show_page_range }) | wiki-index/server/infobox/views.py | from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from .models import Infobox
import json
from .utils import *
default_fields = [ 'born', 'died', 'nationality', 'known for' ]
def render_search_result(info, fields=None, keywords=None):
person_id = info.id
person_name = info.title
person = json.loads(info.json_str)
sub_person = {}
if fields:
for field in fields:
val = search_key(field, person)
if val: sub_person[field] = val
if keywords:
sub_key = search_matched_value(keywords, person)
for k, v in sub_key.items():
if k not in sub_person:
sub_person[k] = v
return { 'person_id': int(person_id),
'person_name': person_name,
'person': { 'not_display': sub_person }
}
def person(request, person_id):
person_info = get_object_or_404(Infobox, pk=person_id)
person = json.loads(person_info.json_str)
return render(request, 'person.html',
{ 'person_id': int(person_id),
'person': person,
'person_name': person_info.title,
'highlight': request.GET.getlist('highlight') })
def index(request):
return render(request, 'index.html')
def search(request):
keywords = split_keywords(request.GET.getlist('q'))
if keywords:
matched = search_by_keywords(keywords)
else: matched = None
fields = request.GET.getlist('f')
for key in request.GET:
if not key.startswith('f_'):
continue
field = key[2:].replace('_', ' ')
f_keys = split_keywords(request.GET.getlist(key))
if not f_keys: continue
if field not in fields:
fields.append(field)
if field != 'name':
matched_f = search_by_field(field, f_keys)
else: matched_f = search_by_name(f_keys)
mismatch = []
if matched is not None:
for key, val in matched.items():
if key not in matched_f:
mismatch.append(key)
else: val['keywords'].extend(matched_f[key]['keywords'])
for key in mismatch:
del matched[key]
else: matched = matched_f
keywords = merge_list(keywords, f_keys)
matched = [ (val['keywords'], val['object']) for val in matched.values() ]
matched.sort(key=lambda x: len(x[0]), reverse=True)
if fields:
fields = merge_list(fields, default_fields)
results = [ render_search_result(obj, fields=fields) for _, obj in matched ]
else:
results = [ render_search_result(obj, fields=default_fields, keywords=keywords) for _, obj in matched ]
page = int(request.GET['page']) if 'page' in request.GET else 1
page_size = int(request.GET['page_size']) if 'page_size' in request.GET else 10
paginator = Paginator(results, page_size)
try:
contents = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
contents = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
contents = paginator.page(paginator.num_pages)
show_page_range_min = contents.number - 5
show_page_range_max = contents.number + 5
if show_page_range_max > paginator.num_pages:
show_page_range_max = paginator.num_pages
show_page_range_min = show_page_range_max - 10
elif show_page_range_min <= 1:
show_page_range_min = 1
show_page_range_max = show_page_range_min + 10
show_page_range_max = min(show_page_range_max, paginator.num_pages)
show_page_range_min = max(show_page_range_min, 1)
show_page_range = range(show_page_range_min, show_page_range_max + 1)
return render(request, 'search.html',
{ 'keywords': keywords,
'results': contents,
'show_page_range': show_page_range }) | 0.375936 | 0.088505 |
import os
def test_001(settings, inspector):
"""
Sample doing depth imports
"""
sources = [
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
]
inspector.inspect(*sources)
# First depth level
children = inspector.children(
os.path.join(settings.sample_path, 'main_depth_import-1.scss')
)
assert children == set([
os.path.join(settings.sample_path, "_empty.scss"),
os.path.join(settings.sample_path, "_vendor.scss"),
os.path.join(settings.sample_path, "main_basic.scss"),
])
parents = inspector.parents(os.path.join(
settings.sample_path, 'main_depth_import-1.scss')
)
assert parents == set([
os.path.join(settings.sample_path, "main_depth_import-2.scss"),
os.path.join(settings.sample_path, "main_depth_import-3.scss"),
])
# Second depth level
children = inspector.children(
os.path.join(settings.sample_path, 'main_depth_import-2.scss')
)
assert children == set([
os.path.join(settings.sample_path, "main_depth_import-1.scss"),
os.path.join(settings.sample_path, "_empty.scss"),
os.path.join(settings.sample_path, "_vendor.scss"),
os.path.join(settings.sample_path, "main_basic.scss"),
])
parents = inspector.parents(os.path.join(
settings.sample_path, 'main_depth_import-2.scss')
)
assert parents == set([
os.path.join(settings.sample_path, "main_depth_import-3.scss"),
])
# Third depth level
children = inspector.children(
os.path.join(settings.sample_path, 'main_depth_import-3.scss')
)
assert children == set([
os.path.join(settings.sample_path, "main_depth_import-1.scss"),
os.path.join(settings.sample_path, "main_depth_import-2.scss"),
os.path.join(settings.sample_path, "_vendor.scss"),
os.path.join(settings.sample_path, "main_basic.scss"),
os.path.join(settings.sample_path, "_empty.scss"),
])
parents = inspector.parents(
os.path.join(settings.sample_path, 'main_depth_import-3.scss')
)
assert parents == set([]) | tests/010_inspector/004_depth.py | import os
def test_001(settings, inspector):
"""
Sample doing depth imports
"""
sources = [
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
]
inspector.inspect(*sources)
# First depth level
children = inspector.children(
os.path.join(settings.sample_path, 'main_depth_import-1.scss')
)
assert children == set([
os.path.join(settings.sample_path, "_empty.scss"),
os.path.join(settings.sample_path, "_vendor.scss"),
os.path.join(settings.sample_path, "main_basic.scss"),
])
parents = inspector.parents(os.path.join(
settings.sample_path, 'main_depth_import-1.scss')
)
assert parents == set([
os.path.join(settings.sample_path, "main_depth_import-2.scss"),
os.path.join(settings.sample_path, "main_depth_import-3.scss"),
])
# Second depth level
children = inspector.children(
os.path.join(settings.sample_path, 'main_depth_import-2.scss')
)
assert children == set([
os.path.join(settings.sample_path, "main_depth_import-1.scss"),
os.path.join(settings.sample_path, "_empty.scss"),
os.path.join(settings.sample_path, "_vendor.scss"),
os.path.join(settings.sample_path, "main_basic.scss"),
])
parents = inspector.parents(os.path.join(
settings.sample_path, 'main_depth_import-2.scss')
)
assert parents == set([
os.path.join(settings.sample_path, "main_depth_import-3.scss"),
])
# Third depth level
children = inspector.children(
os.path.join(settings.sample_path, 'main_depth_import-3.scss')
)
assert children == set([
os.path.join(settings.sample_path, "main_depth_import-1.scss"),
os.path.join(settings.sample_path, "main_depth_import-2.scss"),
os.path.join(settings.sample_path, "_vendor.scss"),
os.path.join(settings.sample_path, "main_basic.scss"),
os.path.join(settings.sample_path, "_empty.scss"),
])
parents = inspector.parents(
os.path.join(settings.sample_path, 'main_depth_import-3.scss')
)
assert parents == set([]) | 0.554229 | 0.336726 |
# Stdlib
from abc import ABCMeta, abstractmethod
# External
import capnp
# SCION
from lib.errors import SCIONParseError, SCIONTypeError
from lib.util import hex_str
class Serializable(object, metaclass=ABCMeta): # pragma: no cover
"""
Base class for all objects which serialize into raw bytes.
"""
def __init__(self, raw=None):
if raw:
self._parse(raw)
@abstractmethod
def _parse(self, raw):
raise NotImplementedError
@abstractmethod
def from_values(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def pack(self):
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def __str__(self):
raise NotImplementedError
class Cerealizable(object, metaclass=ABCMeta):
"""
Base class for all objects which serialize to Cap'n Proto.
Each subclass needs to specify a class attribute for the corresponding
proto file (P) and the proto message name (P_CLS), e.g.,
P = capnp.load("proto/foo.capnp")
P_CLS = P.Foo
"""
def __init__(self, p):
assert not isinstance(p, bytes), type(p)
self.p = p
self._packed = False
@classmethod
def from_raw(cls, raw):
assert isinstance(raw, bytes), type(raw)
try:
return cls(cls.P_CLS.from_bytes_packed(raw).as_builder())
except capnp.lib.capnp.KjException as e:
raise SCIONParseError("Unable to parse %s capnp message: %s" %
(cls, e)) from None
@classmethod
def from_raw_multiple(cls, raw):
assert isinstance(raw, bytes), type(raw)
try:
for p in cls.P_CLS.read_multiple_bytes_packed(raw):
yield cls(p.as_builder())
except capnp.lib.capnp.KjException as e:
raise SCIONParseError("Unable to parse %s capnp message: %s" %
(cls, e)) from None
@abstractmethod
def from_values(self, *args, **kwargs):
raise NotImplementedError
@classmethod
def from_proto(cls, p): # pragma: no cover
return cls(p)
def proto(self):
return self.p
@classmethod
def from_dict(cls, d):
return cls(cls.P_CLS.new_message(**d))
def to_dict(self):
return self.proto().to_dict()
def pack(self, *args, **kwargs):
assert not self._packed, "May only be packed once"
self._packed = True
return self._pack(*args, **kwargs)
def _pack(self):
return self.proto().to_bytes_packed()
def __bool__(self):
return True
def __len__(self):
return self.proto().total_size.word_count * 8
def copy(self):
return type(self)(self.proto().copy())
def __copy__(self):
return type(self)(self.proto().copy())
def __deepcopy__(self, memo):
# http://stackoverflow.com/a/15774013
inst = type(self)(self.p.copy())
memo[id(self)] = inst
return inst
def __eq__(self, other): # pragma: no cover
raise NotImplementedError
def short_desc(self):
return str(self.proto())
def __str__(self):
return "%s: %s" % (self.NAME, self.short_desc())
class L4HeaderBase(Serializable, metaclass=ABCMeta): # pragma: no cover
"""
Base class for L4 headers.
"""
TYPE = None
def pack(self, payload, checksum=None):
self.total_len = self.LEN + len(payload)
if checksum is None:
checksum = self._calc_checksum(payload)
return self._pack(checksum)
@abstractmethod
def validate(self, payload):
raise NotImplementedError
class PacketBase(Serializable): # pragma: no cover
"""
Base class for packets.
"""
def __init__(self, raw=None):
"""
Initialize an instance of the class PacketBase.
"""
self._payload = b""
super().__init__(raw)
def get_payload(self):
return self._payload
def set_payload(self, new_payload):
assert isinstance(new_payload, (Serializable, CerealBox)), type(new_payload)
self._payload = new_payload
class CerealBox(object, metaclass=ABCMeta):
"""
CerealBox represents capnp structs that have a unnamed union. In the simplest case, a CerealBox
object contains a Cerealizable object, but CerealBoxes can also be nested
(e.g. CtrlPayload(PathMgmt(RevInfo.from_values(...)))).
All child classes must define the NAME, P_CLS, and CLASS_FIELD_MAP attributes.
"""
def __init__(self, union):
self.union = union
@classmethod
def from_proto(cls, p): # pragma: no cover
"""
Internal constructor, used by sub-classes to create the corresponding python object from a
capnp object. The appropriate python class is selected by looking up the union field name in
CLASS_FIELD_MAP.
"""
type_ = p.which()
for cls_, field in cls.CLASS_FIELD_MAP.items():
if type_ == field:
return cls._from_union(p, cls_.from_proto(getattr(p, type_)))
raise SCIONParseError("Unsupported %s proto type: %s" % (cls.NAME, type_))
@classmethod
def _from_union(cls, p, union): # pragma: no cover
"""
Internal constructor, overridden by sub-classes which have more fields than just a single
unnamed union.
p is passed in to be available to subclasses which override this.
"""
return cls(union)
def proto(self):
"""
Return the corresponding capnp object.
"""
return self.P_CLS.new_message(**{self.type(): self.union.proto()})
def type(self):
"""
Return the type of the union, represented by the union field name.
"""
c = self.CLASS_FIELD_MAP.get(self.union.__class__)
if c is not None:
return c
raise SCIONTypeError("Unsupported %s proto class %s (%s)" %
(self.NAME, self.union.__class__, type(self.union)))
def inner_type(self):
"""
Return the type of the innermost Cerealizable object, represented by the union field name in
the innermost CerealBox object.
"""
if isinstance(self.union, CerealBox):
return self.union.inner_type()
return self.type()
def pack(self):
return self.proto().to_bytes_packed()
def copy(self):
return self.__class__(self.union.copy())
def __len__(self):
return self.proto().total_size.word_count * 8
def __str__(self):
return "%s(%dB): %s" % (self.NAME, len(self), self.union)
class PayloadRaw(Serializable): # pragma: no cover
NAME = "PayloadRaw"
SNIPPET_LEN = 32
def __init__(self, raw=None):
self._raw = b""
super().__init__(raw)
def _parse(self, raw):
self._raw = raw or b""
def from_values(cls, raw):
assert isinstance(raw, bytes), type(raw)
inst = cls()
inst._raw = raw
return inst
def pack(self):
return self._raw
def __eq__(self, other):
return self._raw == other._raw
def __len__(self):
return len(self._raw)
def __str__(self):
s = "%s(%dB): %s" % (
self.NAME, len(self._raw), hex_str(self._raw[:self.SNIPPET_LEN]))
if len(self._raw) > self.SNIPPET_LEN:
s += "[...]"
return s | python/lib/packet/packet_base.py | # Stdlib
from abc import ABCMeta, abstractmethod
# External
import capnp
# SCION
from lib.errors import SCIONParseError, SCIONTypeError
from lib.util import hex_str
class Serializable(object, metaclass=ABCMeta): # pragma: no cover
"""
Base class for all objects which serialize into raw bytes.
"""
def __init__(self, raw=None):
if raw:
self._parse(raw)
@abstractmethod
def _parse(self, raw):
raise NotImplementedError
@abstractmethod
def from_values(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def pack(self):
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def __str__(self):
raise NotImplementedError
class Cerealizable(object, metaclass=ABCMeta):
"""
Base class for all objects which serialize to Cap'n Proto.
Each subclass needs to specify a class attribute for the corresponding
proto file (P) and the proto message name (P_CLS), e.g.,
P = capnp.load("proto/foo.capnp")
P_CLS = P.Foo
"""
def __init__(self, p):
assert not isinstance(p, bytes), type(p)
self.p = p
self._packed = False
@classmethod
def from_raw(cls, raw):
assert isinstance(raw, bytes), type(raw)
try:
return cls(cls.P_CLS.from_bytes_packed(raw).as_builder())
except capnp.lib.capnp.KjException as e:
raise SCIONParseError("Unable to parse %s capnp message: %s" %
(cls, e)) from None
@classmethod
def from_raw_multiple(cls, raw):
assert isinstance(raw, bytes), type(raw)
try:
for p in cls.P_CLS.read_multiple_bytes_packed(raw):
yield cls(p.as_builder())
except capnp.lib.capnp.KjException as e:
raise SCIONParseError("Unable to parse %s capnp message: %s" %
(cls, e)) from None
@abstractmethod
def from_values(self, *args, **kwargs):
raise NotImplementedError
@classmethod
def from_proto(cls, p): # pragma: no cover
return cls(p)
def proto(self):
return self.p
@classmethod
def from_dict(cls, d):
return cls(cls.P_CLS.new_message(**d))
def to_dict(self):
return self.proto().to_dict()
def pack(self, *args, **kwargs):
assert not self._packed, "May only be packed once"
self._packed = True
return self._pack(*args, **kwargs)
def _pack(self):
return self.proto().to_bytes_packed()
def __bool__(self):
return True
def __len__(self):
return self.proto().total_size.word_count * 8
def copy(self):
return type(self)(self.proto().copy())
def __copy__(self):
return type(self)(self.proto().copy())
def __deepcopy__(self, memo):
# http://stackoverflow.com/a/15774013
inst = type(self)(self.p.copy())
memo[id(self)] = inst
return inst
def __eq__(self, other): # pragma: no cover
raise NotImplementedError
def short_desc(self):
return str(self.proto())
def __str__(self):
return "%s: %s" % (self.NAME, self.short_desc())
class L4HeaderBase(Serializable, metaclass=ABCMeta): # pragma: no cover
"""
Base class for L4 headers.
"""
TYPE = None
def pack(self, payload, checksum=None):
self.total_len = self.LEN + len(payload)
if checksum is None:
checksum = self._calc_checksum(payload)
return self._pack(checksum)
@abstractmethod
def validate(self, payload):
raise NotImplementedError
class PacketBase(Serializable): # pragma: no cover
"""
Base class for packets.
"""
def __init__(self, raw=None):
"""
Initialize an instance of the class PacketBase.
"""
self._payload = b""
super().__init__(raw)
def get_payload(self):
return self._payload
def set_payload(self, new_payload):
assert isinstance(new_payload, (Serializable, CerealBox)), type(new_payload)
self._payload = new_payload
class CerealBox(object, metaclass=ABCMeta):
"""
CerealBox represents capnp structs that have a unnamed union. In the simplest case, a CerealBox
object contains a Cerealizable object, but CerealBoxes can also be nested
(e.g. CtrlPayload(PathMgmt(RevInfo.from_values(...)))).
All child classes must define the NAME, P_CLS, and CLASS_FIELD_MAP attributes.
"""
def __init__(self, union):
self.union = union
@classmethod
def from_proto(cls, p): # pragma: no cover
"""
Internal constructor, used by sub-classes to create the corresponding python object from a
capnp object. The appropriate python class is selected by looking up the union field name in
CLASS_FIELD_MAP.
"""
type_ = p.which()
for cls_, field in cls.CLASS_FIELD_MAP.items():
if type_ == field:
return cls._from_union(p, cls_.from_proto(getattr(p, type_)))
raise SCIONParseError("Unsupported %s proto type: %s" % (cls.NAME, type_))
@classmethod
def _from_union(cls, p, union): # pragma: no cover
"""
Internal constructor, overridden by sub-classes which have more fields than just a single
unnamed union.
p is passed in to be available to subclasses which override this.
"""
return cls(union)
def proto(self):
"""
Return the corresponding capnp object.
"""
return self.P_CLS.new_message(**{self.type(): self.union.proto()})
def type(self):
"""
Return the type of the union, represented by the union field name.
"""
c = self.CLASS_FIELD_MAP.get(self.union.__class__)
if c is not None:
return c
raise SCIONTypeError("Unsupported %s proto class %s (%s)" %
(self.NAME, self.union.__class__, type(self.union)))
def inner_type(self):
"""
Return the type of the innermost Cerealizable object, represented by the union field name in
the innermost CerealBox object.
"""
if isinstance(self.union, CerealBox):
return self.union.inner_type()
return self.type()
def pack(self):
return self.proto().to_bytes_packed()
def copy(self):
return self.__class__(self.union.copy())
def __len__(self):
return self.proto().total_size.word_count * 8
def __str__(self):
return "%s(%dB): %s" % (self.NAME, len(self), self.union)
class PayloadRaw(Serializable): # pragma: no cover
NAME = "PayloadRaw"
SNIPPET_LEN = 32
def __init__(self, raw=None):
self._raw = b""
super().__init__(raw)
def _parse(self, raw):
self._raw = raw or b""
def from_values(cls, raw):
assert isinstance(raw, bytes), type(raw)
inst = cls()
inst._raw = raw
return inst
def pack(self):
return self._raw
def __eq__(self, other):
return self._raw == other._raw
def __len__(self):
return len(self._raw)
def __str__(self):
s = "%s(%dB): %s" % (
self.NAME, len(self._raw), hex_str(self._raw[:self.SNIPPET_LEN]))
if len(self._raw) > self.SNIPPET_LEN:
s += "[...]"
return s | 0.729616 | 0.236153 |
import config as cfg
from micro_image_large import ScanLinesMicroImage, BitMapMicroImage
from parasite import Parasite
from simulate_data import Simulator
import os
import numpy as np
if __name__ == "__main__":
session_name = "lab_sess"
'''
First, simulate some data using the Simulator class, upholding all rules given by Dragonfruit AI (i.e. body makes
up >=25% of frame)
'''
# Simulator takes as argument:
# session name, number of parasites to render, the size of the render, a list of size multipliers
sim = Simulator(session_name, numSamples=2, size=(500, 500), resize_factors=[1, 2, 4])
sim.show_all_frames() # Show the generated frames in which to draw the parasite
sim.show_everything() # Show superimposed images of the parasite body and the corresponding veins
sim.save_all_data() # Saves the rendered body and veins images as uncompressed TIFFs
'''
Next, Using the Parasite class, process and losslessly compress the body and veins image data and calculate whether
the parasite has cancer; (Number of vein pixels within body makes up >-10% of the number of body pixels)
'''
# Parasite class takes an argument:
# session name (for saving), path of raw body img, path of raw veins img, MicroImage processing technique to use
# I implemented 2 techniques for processing BitMapMicroImage and ScanLinesMicroImage.
par1 = Parasite(session_name+"_0",
os.path.join(cfg.COLLECTED_DIR, session_name + "_1_rf4_body.tiff"),
os.path.join(cfg.COLLECTED_DIR, session_name + "_1_rf4_veins.tiff"),
ScanLinesMicroImage)
# Show the loaded images (body and veins) superimposed on top of each other
par1.show_image()
print("-----PAR 0-----")
# Perform validation routines that ensure raw image = inv_process(process(raw_image))
print("Body Process & Inverse Validity:", par1.body.validate_process())
print("Veins Process & Inverse Validity:", par1.veins.validate_process())
# Output the number of vein pixels within the body as a percentage of the total number of body pixels
print("Veins to Body %:", par1.veins_body_frac * 100)
# Output whether the current parasite has cancer
print("Has cancer:", par1.has_cancer())
# Save the processed data in compressed numpy arrays
par1.save_data()
'''
Lastly, let's compare the compression rate to the )
'''
# Parasite class takes an argument:
# session name (for saving), path of raw body img, path of raw veins img, MicroImage processing technique to use
# I implemented 2 techniques for processing BitMapMicroImage and ScanLinesMicroImage.
par2 = Parasite(session_name+"_1",
os.path.join(cfg.COLLECTED_DIR, session_name + "_1_rf4_body.tiff"),
os.path.join(cfg.COLLECTED_DIR, session_name + "_1_rf4_veins.tiff"),
BitMapMicroImage)
# Show the loaded images (body and veins) superimposed on top of each other
par2.show_image()
print("-----PAR 1-----")
# Perform validation routines that ensure raw image = inv_process(process(raw_image))
print("Body Process & Inverse Validity:", par2.body.validate_process())
print("Veins Process & Inverse Validity:", par2.veins.validate_process())
# Output the number of vein pixels within the body as a percentage of the total number of body pixels
print("Veins to Body %:", par2.veins_body_frac * 100)
# Output whether the current parasite has cancer
print("Has cancer:", par2.has_cancer())
# Save the processed data in compressed numpy arrays
par2.save_data() | main.py | import config as cfg
from micro_image_large import ScanLinesMicroImage, BitMapMicroImage
from parasite import Parasite
from simulate_data import Simulator
import os
import numpy as np
if __name__ == "__main__":
session_name = "lab_sess"
'''
First, simulate some data using the Simulator class, upholding all rules given by Dragonfruit AI (i.e. body makes
up >=25% of frame)
'''
# Simulator takes as argument:
# session name, number of parasites to render, the size of the render, a list of size multipliers
sim = Simulator(session_name, numSamples=2, size=(500, 500), resize_factors=[1, 2, 4])
sim.show_all_frames() # Show the generated frames in which to draw the parasite
sim.show_everything() # Show superimposed images of the parasite body and the corresponding veins
sim.save_all_data() # Saves the rendered body and veins images as uncompressed TIFFs
'''
Next, Using the Parasite class, process and losslessly compress the body and veins image data and calculate whether
the parasite has cancer; (Number of vein pixels within body makes up >-10% of the number of body pixels)
'''
# Parasite class takes an argument:
# session name (for saving), path of raw body img, path of raw veins img, MicroImage processing technique to use
# I implemented 2 techniques for processing BitMapMicroImage and ScanLinesMicroImage.
par1 = Parasite(session_name+"_0",
os.path.join(cfg.COLLECTED_DIR, session_name + "_1_rf4_body.tiff"),
os.path.join(cfg.COLLECTED_DIR, session_name + "_1_rf4_veins.tiff"),
ScanLinesMicroImage)
# Show the loaded images (body and veins) superimposed on top of each other
par1.show_image()
print("-----PAR 0-----")
# Perform validation routines that ensure raw image = inv_process(process(raw_image))
print("Body Process & Inverse Validity:", par1.body.validate_process())
print("Veins Process & Inverse Validity:", par1.veins.validate_process())
# Output the number of vein pixels within the body as a percentage of the total number of body pixels
print("Veins to Body %:", par1.veins_body_frac * 100)
# Output whether the current parasite has cancer
print("Has cancer:", par1.has_cancer())
# Save the processed data in compressed numpy arrays
par1.save_data()
'''
Lastly, let's compare the compression rate to the )
'''
# Parasite class takes an argument:
# session name (for saving), path of raw body img, path of raw veins img, MicroImage processing technique to use
# I implemented 2 techniques for processing BitMapMicroImage and ScanLinesMicroImage.
par2 = Parasite(session_name+"_1",
os.path.join(cfg.COLLECTED_DIR, session_name + "_1_rf4_body.tiff"),
os.path.join(cfg.COLLECTED_DIR, session_name + "_1_rf4_veins.tiff"),
BitMapMicroImage)
# Show the loaded images (body and veins) superimposed on top of each other
par2.show_image()
print("-----PAR 1-----")
# Perform validation routines that ensure raw image = inv_process(process(raw_image))
print("Body Process & Inverse Validity:", par2.body.validate_process())
print("Veins Process & Inverse Validity:", par2.veins.validate_process())
# Output the number of vein pixels within the body as a percentage of the total number of body pixels
print("Veins to Body %:", par2.veins_body_frac * 100)
# Output whether the current parasite has cancer
print("Has cancer:", par2.has_cancer())
# Save the processed data in compressed numpy arrays
par2.save_data() | 0.425128 | 0.584805 |
import pandas as pd
import os
from os.path import join
import logging
from datetime import datetime
import sys
# TODO: Quota management
# TODO: Fix file not fond error
# TODO: Make filecount check - if 0 - do not continue
def get_export_files(directory):
"""
Return a list of UPCC export files from given directory
"""
db_export_files = []
for root, folders, files in os.walk(directory):
for f in files:
if f[-3:] == 'txt':
logging.info(u'Found %s', f)
db_export_files.append(join(root,f))
else:
logging.info(u'Scipping %s', f)
logging.info(u'Found %s txt files', len(db_export_files))
return db_export_files
def find_subscribers_by_service(service, is_service, db_file):
"""
"""
# used_columns=[0, 1, 13,38, 20]
# columns = ['userid', 'msisdn', 'service_id', 'quota_id', 'service_package_id']
columns = ['userid', 'service_id', 'subscription_date', 'expiry_date', 'quota_id', 'initial', 'balance', 'consumption', 'service_package_id']
used_columns = [0, 13, 16, 18, 20, 21, 22, 23, 38]
converters = {'subscription_date': str, 'expiry_date': str}
df = pd.read_csv(db_file, usecols=used_columns, names = columns)
df = df.fillna('Not_Found')
if is_service:
sel1 = df['service_id'].str.contains(service) # Change to exact match!!!
# sel1 = df['service_id'].str.match(service) # Change to exact match!!!
elif not is_service:
sel1 = df['service_package_id'].str.contains(service)
# sel1 = df['service_package_id'].str.match(service)
subscriber_list = []
for index, row in df[sel1].iterrows():
subscriber = (row['userid'], row['subscription_date'], row['expiry_date'])
subscriber_list.append(subscriber)
return subscriber_list
def create_subscriber_list(db_export_files, service_list):
"""
Looks for subscribers in db_export_files list,
which have services defined in service_list list
"""
results = {}
for service_id, is_service in service_list:
intermediate_results = []
for db_file in db_export_files:
subscriber_list = find_subscribers_by_service(service_id, is_service, db_file)
logging.info(u'%s subscribers with service %s in %s',
len(subscriber_list),
service_id,
db_file)
intermediate_results.append(subscriber_list)
results[(service_id, is_service)] = intermediate_results
return results
def silent_remove(filename):
try:
logging.info(u'Trying to remove %s', filename)
os.remove(filename)
except FileNotFoundError as e:
logging.info(u'Something happened %s', e)
if e.errno != 'errno.ENOENT':
raise
else:
pass
'''
File "/home/egk/Scripts/Hua/Parse_UPCC_Export.py", line 98, in silent_remove
os.remove(filename)
FileNotFoundError: [Errno 2] No such file or directory: '1494_rmv.txt'
'''
return
class SubscriberList(object):
def __init__(self, subscriber_list):
self.subscriber_list = subscriber_list
def resubscribe_service_mml(self):
"""
Read a dict
{
(service_tuple):[[(msisdn,date), (msisdn,date)...],[...]],
(service_tuple):[[(msisdn,date), (msisdn,date)...],[...]]
}
Generates a list of MML commands to remove and add services
"""
for (service_id, is_service), msisdn_list_of_lists in self.subscriber_list.items():
if is_service:
add_filename = '%s_add.txt' % service_id
rmv_filename = '%s_rmv.txt' % service_id
silent_remove(add_filename)
silent_remove(rmv_filename)
logging.info(u'Processing service %s', service_id)
add_extra_args = '\
SRVUSAGESTATE=Normal, \
SRVROAMINGTYPE=NULL, \
SRVCONTACTMETHOD=None, \
SRVCREATESUBSCRIBER=No, \
PAYMENTFLAG=Yes, \
SRVEXATTR1=255;'
rmv_extra_args = '\
TERMIND=Immediate termination, \
SRVDELETESUBSCRIBER=No;'
for msisdn_list in msisdn_list_of_lists:
for (msisdn, subscription_date) in msisdn_list:
_subscription_date = datetime.strptime(subscription_date, '%Y%m%d%H%M%S')
subscription_date_h = _subscription_date.strftime('%Y&%m&%d&%H&%M&%S')
ADD_PSRV_CMD = 'ADD PSRV: \
USRIDENTIFIER="%s", \
SRVNAME="%s", \
SRVSUBSCRIBEDATE=%s, \
SRVSTARTDATETIME=%s, \
%s\n' % (msisdn, service_id, subscription_date_h, subscription_date_h, add_extra_args)
RMV_PSRV_CMD = 'RMV PSRV: \
USRIDENTIFIER="%s", \
SRVNAME="%s", %s\n' % (msisdn, service_id, rmv_extra_args)
with open(add_filename, 'a') as add_file:
add_file.write(ADD_PSRV_CMD)
with open(rmv_filename, 'a') as rmv_file:
rmv_file.write(RMV_PSRV_CMD)
elif not is_service:
logging.info(u'Processing service Package %s', service_id)
sys.exit('Please complete the program first!')
'''ADD PSRV: USRIDENTIFIER="msisdn",
SRVNAME="service_id",
SRVSUBSCRIBEDATE=2018&10&24&12&07&47,
SRVSTARTDATETIME=2018&10&24&12&07&52,
SRVUSAGESTATE=Normal,
SRVROAMINGTYPE=NULL,
SRVCONTACTMETHOD=None,
SRVCREATESUBSCRIBER=No,
PAYMENTFLAG=Yes,
SRVEXATTR1=255;
RMV PSRV: USRIDENTIFIER="msisdn",
SRVNAME="service_id",
TERMIND=Immediate termination,
SRVDELETESUBSCRIBER=No;
ADD PSRVPKG: USRIDENTIFIER="msisdn",
SRVPKGNAME="service_id",
SRVPKGSUBSCRIBEDATE=2018&10&24&12&09&40,
SRVPKGSTARTDATETIME=2018&10&24&12&09&41,
SRVPKGROAMINGTYPE=NULL,
SRVPKGCONTACTMETHOD=None;
RMV PSRVPKG: USRIDENTIFIER="msisdn",
SRVPKGNAME="service_id",
TERMIND=Immediate termination;
'''
def create_mml_fix_date(self):
"""
Read a dict
{
(service_tuple):[[(msisdn,start_date,expiry_date), (msisdn,start_date,expiry_date)...],[...]],
(service_tuple):[[(msisdn,start_date,expiry_date), (msisdn,start_date,expiry_date)...],[...]]
}
Generates a list of MML commands to modify expiry date
"""
for (service_id, is_service), msisdn_list_of_lists in self.subscriber_list.items():
if is_service:
raise NotImplementedError
raise NotImplementedError
def meta():
root_directory = '/home/egk/Pile/P3/DB_Export'
directory = root_directory + '/Files'
os.chdir(root_directory)
service_list = [('1485', True)]
# service_list = [
# ( '1494', True ),
# ( '1493', True ),
# ( '1486', True )
# ]
logging.info(service_list)
logging.info(u'Working directory %s', directory)
db_export_files = get_export_files(directory)
subscriber_list = create_subscriber_list(db_export_files, service_list)
# create_mml(subscriber_list)
pcrf_subscriber_list = SubscriberList(subscriber_list)
pcrf_subscriber_list.create_mml_fix_date()
if __name__ == '__main__':
logging.basicConfig(
format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO)
meta() | Work/Parse_UPCC_Export.py | import pandas as pd
import os
from os.path import join
import logging
from datetime import datetime
import sys
# TODO: Quota management
# TODO: Fix file not fond error
# TODO: Make filecount check - if 0 - do not continue
def get_export_files(directory):
"""
Return a list of UPCC export files from given directory
"""
db_export_files = []
for root, folders, files in os.walk(directory):
for f in files:
if f[-3:] == 'txt':
logging.info(u'Found %s', f)
db_export_files.append(join(root,f))
else:
logging.info(u'Scipping %s', f)
logging.info(u'Found %s txt files', len(db_export_files))
return db_export_files
def find_subscribers_by_service(service, is_service, db_file):
"""
"""
# used_columns=[0, 1, 13,38, 20]
# columns = ['userid', 'msisdn', 'service_id', 'quota_id', 'service_package_id']
columns = ['userid', 'service_id', 'subscription_date', 'expiry_date', 'quota_id', 'initial', 'balance', 'consumption', 'service_package_id']
used_columns = [0, 13, 16, 18, 20, 21, 22, 23, 38]
converters = {'subscription_date': str, 'expiry_date': str}
df = pd.read_csv(db_file, usecols=used_columns, names = columns)
df = df.fillna('Not_Found')
if is_service:
sel1 = df['service_id'].str.contains(service) # Change to exact match!!!
# sel1 = df['service_id'].str.match(service) # Change to exact match!!!
elif not is_service:
sel1 = df['service_package_id'].str.contains(service)
# sel1 = df['service_package_id'].str.match(service)
subscriber_list = []
for index, row in df[sel1].iterrows():
subscriber = (row['userid'], row['subscription_date'], row['expiry_date'])
subscriber_list.append(subscriber)
return subscriber_list
def create_subscriber_list(db_export_files, service_list):
"""
Looks for subscribers in db_export_files list,
which have services defined in service_list list
"""
results = {}
for service_id, is_service in service_list:
intermediate_results = []
for db_file in db_export_files:
subscriber_list = find_subscribers_by_service(service_id, is_service, db_file)
logging.info(u'%s subscribers with service %s in %s',
len(subscriber_list),
service_id,
db_file)
intermediate_results.append(subscriber_list)
results[(service_id, is_service)] = intermediate_results
return results
def silent_remove(filename):
try:
logging.info(u'Trying to remove %s', filename)
os.remove(filename)
except FileNotFoundError as e:
logging.info(u'Something happened %s', e)
if e.errno != 'errno.ENOENT':
raise
else:
pass
'''
File "/home/egk/Scripts/Hua/Parse_UPCC_Export.py", line 98, in silent_remove
os.remove(filename)
FileNotFoundError: [Errno 2] No such file or directory: '1494_rmv.txt'
'''
return
class SubscriberList(object):
def __init__(self, subscriber_list):
self.subscriber_list = subscriber_list
def resubscribe_service_mml(self):
"""
Read a dict
{
(service_tuple):[[(msisdn,date), (msisdn,date)...],[...]],
(service_tuple):[[(msisdn,date), (msisdn,date)...],[...]]
}
Generates a list of MML commands to remove and add services
"""
for (service_id, is_service), msisdn_list_of_lists in self.subscriber_list.items():
if is_service:
add_filename = '%s_add.txt' % service_id
rmv_filename = '%s_rmv.txt' % service_id
silent_remove(add_filename)
silent_remove(rmv_filename)
logging.info(u'Processing service %s', service_id)
add_extra_args = '\
SRVUSAGESTATE=Normal, \
SRVROAMINGTYPE=NULL, \
SRVCONTACTMETHOD=None, \
SRVCREATESUBSCRIBER=No, \
PAYMENTFLAG=Yes, \
SRVEXATTR1=255;'
rmv_extra_args = '\
TERMIND=Immediate termination, \
SRVDELETESUBSCRIBER=No;'
for msisdn_list in msisdn_list_of_lists:
for (msisdn, subscription_date) in msisdn_list:
_subscription_date = datetime.strptime(subscription_date, '%Y%m%d%H%M%S')
subscription_date_h = _subscription_date.strftime('%Y&%m&%d&%H&%M&%S')
ADD_PSRV_CMD = 'ADD PSRV: \
USRIDENTIFIER="%s", \
SRVNAME="%s", \
SRVSUBSCRIBEDATE=%s, \
SRVSTARTDATETIME=%s, \
%s\n' % (msisdn, service_id, subscription_date_h, subscription_date_h, add_extra_args)
RMV_PSRV_CMD = 'RMV PSRV: \
USRIDENTIFIER="%s", \
SRVNAME="%s", %s\n' % (msisdn, service_id, rmv_extra_args)
with open(add_filename, 'a') as add_file:
add_file.write(ADD_PSRV_CMD)
with open(rmv_filename, 'a') as rmv_file:
rmv_file.write(RMV_PSRV_CMD)
elif not is_service:
logging.info(u'Processing service Package %s', service_id)
sys.exit('Please complete the program first!')
'''ADD PSRV: USRIDENTIFIER="msisdn",
SRVNAME="service_id",
SRVSUBSCRIBEDATE=2018&10&24&12&07&47,
SRVSTARTDATETIME=2018&10&24&12&07&52,
SRVUSAGESTATE=Normal,
SRVROAMINGTYPE=NULL,
SRVCONTACTMETHOD=None,
SRVCREATESUBSCRIBER=No,
PAYMENTFLAG=Yes,
SRVEXATTR1=255;
RMV PSRV: USRIDENTIFIER="msisdn",
SRVNAME="service_id",
TERMIND=Immediate termination,
SRVDELETESUBSCRIBER=No;
ADD PSRVPKG: USRIDENTIFIER="msisdn",
SRVPKGNAME="service_id",
SRVPKGSUBSCRIBEDATE=2018&10&24&12&09&40,
SRVPKGSTARTDATETIME=2018&10&24&12&09&41,
SRVPKGROAMINGTYPE=NULL,
SRVPKGCONTACTMETHOD=None;
RMV PSRVPKG: USRIDENTIFIER="msisdn",
SRVPKGNAME="service_id",
TERMIND=Immediate termination;
'''
def create_mml_fix_date(self):
"""
Read a dict
{
(service_tuple):[[(msisdn,start_date,expiry_date), (msisdn,start_date,expiry_date)...],[...]],
(service_tuple):[[(msisdn,start_date,expiry_date), (msisdn,start_date,expiry_date)...],[...]]
}
Generates a list of MML commands to modify expiry date
"""
for (service_id, is_service), msisdn_list_of_lists in self.subscriber_list.items():
if is_service:
raise NotImplementedError
raise NotImplementedError
def meta():
root_directory = '/home/egk/Pile/P3/DB_Export'
directory = root_directory + '/Files'
os.chdir(root_directory)
service_list = [('1485', True)]
# service_list = [
# ( '1494', True ),
# ( '1493', True ),
# ( '1486', True )
# ]
logging.info(service_list)
logging.info(u'Working directory %s', directory)
db_export_files = get_export_files(directory)
subscriber_list = create_subscriber_list(db_export_files, service_list)
# create_mml(subscriber_list)
pcrf_subscriber_list = SubscriberList(subscriber_list)
pcrf_subscriber_list.create_mml_fix_date()
if __name__ == '__main__':
logging.basicConfig(
format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO)
meta() | 0.122431 | 0.098729 |
from django.http.response import HttpResponse
from cart.models import Cart
from django.shortcuts import render, get_object_or_404, redirect
from .models import Product, Category, Contact
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.http import JsonResponse
import json
# Create your views here.
def home(request):
products = Product.objects.all().order_by('-rating')[:3]
context = {
"products": products
}
return render(request, 'ecom/home.html', context)
def about(request):
return render(request, 'ecom/about.html')
def menu(request):
products = Product.objects.all()
categories = Category.objects.all()
context = {}
context["products"] = {}
already_liked = []
in_cart = []
if request.user.is_authenticated:
cart = get_object_or_404(Cart, user=request.user).cart_entry.all()
for p in products:
context["products"][p.pk] = {}
context["products"][p.pk]["item"] = p
context["products"][p.pk]["is_liked"] = p.likes.filter(id=request.user.id).exists()
# cart items
cartp = cart.filter(product=p)
if cartp:
in_cart.append(p)
if p.likes.filter(id=request.user.id).exists():
already_liked.append(p)
context["categories"] = categories
context["already_liked"] = already_liked
context["in_cart"] = in_cart
# print(context)
return render(request, 'ecom/menu.html', context)
""" Product Like """
@login_required
def LikeView(request):
context = {}
products = Product.objects.all()
categories = Category.objects.all()
already_liked = []
for p in products:
if p.likes.filter(id=request.user.id).exists():
already_liked.append(p)
product = get_object_or_404(Product, id=request.POST.get('id'))
if product.likes.filter(id=request.user.id).exists():
product.likes.remove(request.user)
already_liked.remove(product)
else:
product.likes.add(request.user)
already_liked.append(product)
context["categories"] = categories
context["already_liked"] = already_liked
# context["product.id"] = product.id
if request.is_ajax():
html = render_to_string('ecom/like_section.html',context, request=request)
return JsonResponse({'form':html})
@login_required
def wishlist(request):
user = request.user
likes = user.product_like.all()
cart = get_object_or_404(Cart, user=request.user).cart_entry.all()
in_cart = []
for p in likes:
# cart items
cartp = cart.filter(product=p)
if cartp:
in_cart.append(p)
context = {
"wishlist": likes,
"in_cart":in_cart
}
return render(request, 'ecom/wishlist.html', context)
@login_required
def remove_from_wishlist(request):
user = request.user
product = get_object_or_404(Product, id=request.POST.get('id'))
likes = user.product_like.all()
product.likes.remove(user)
context = {
"wishlist": user.product_like.all()
}
if request.is_ajax():
html = render_to_string('ecom/wishlist_section.html',context, request=request)
return JsonResponse({'form':html})
""" Search by product or category """
def search(request):
query = request.GET['query']
if len(query) >= 150 or len(query) < 1:
res = Product.objects.none()
elif len(query.strip()) == 0:
res = Product.objects.none()
else:
allprod = Product.objects.filter(item__icontains=query)
allcatg = Category.objects.filter(title__icontains=query)
if allcatg:
pincatg = allcatg[0].get_products.all()
res = allprod.union(pincatg)
else:
res = allprod
# CART and LIKE
in_cart = []
already_liked = []
if request.user.is_authenticated:
cart = get_object_or_404(Cart, user=request.user).cart_entry.all()
for p in res:
cartp = cart.filter(product=p)
if cartp:
in_cart.append(p)
if p.likes.filter(id=request.user.id).exists():
already_liked.append(p)
context = {
'res': res,
'in_cart': in_cart,
"already_liked": already_liked
}
return render(request, 'ecom/search_results.html', context)
def contact_us(request):
name = request.POST.get('fname')
message = request.POST.get('message')
email = request.POST.get('email')
Contact.objects.create(name=name, message=message, email=email)
return redirect(request.META['HTTP_REFERER']+'#footerCtf')
def product_details(request, id):
product = Product.objects.get(id=id)
in_cart = []
already_liked = []
if request.user.is_authenticated:
cart = get_object_or_404(Cart, user=request.user).cart_entry.all()
cartp = cart.filter(product=product)
if cartp:
in_cart.append(product)
if product.likes.filter(id=request.user.id).exists():
already_liked.append(product)
# { "size":[ "S", "M", "L" ], "crust":[ "cheese burst", "classic hand tossed", "thin crust" ] }
pdict2 = {}
pot = product.options
if pot != "":
pdict2 = json.loads(pot)
else:
pdict2 = None
context = {
"product":product,
"already_liked":already_liked,
"in_cart":in_cart,
"product_opts":pdict2,
}
return render(request, 'ecom/product_details.html', context) | ecom/views.py | from django.http.response import HttpResponse
from cart.models import Cart
from django.shortcuts import render, get_object_or_404, redirect
from .models import Product, Category, Contact
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.http import JsonResponse
import json
# Create your views here.
def home(request):
products = Product.objects.all().order_by('-rating')[:3]
context = {
"products": products
}
return render(request, 'ecom/home.html', context)
def about(request):
return render(request, 'ecom/about.html')
def menu(request):
products = Product.objects.all()
categories = Category.objects.all()
context = {}
context["products"] = {}
already_liked = []
in_cart = []
if request.user.is_authenticated:
cart = get_object_or_404(Cart, user=request.user).cart_entry.all()
for p in products:
context["products"][p.pk] = {}
context["products"][p.pk]["item"] = p
context["products"][p.pk]["is_liked"] = p.likes.filter(id=request.user.id).exists()
# cart items
cartp = cart.filter(product=p)
if cartp:
in_cart.append(p)
if p.likes.filter(id=request.user.id).exists():
already_liked.append(p)
context["categories"] = categories
context["already_liked"] = already_liked
context["in_cart"] = in_cart
# print(context)
return render(request, 'ecom/menu.html', context)
""" Product Like """
@login_required
def LikeView(request):
context = {}
products = Product.objects.all()
categories = Category.objects.all()
already_liked = []
for p in products:
if p.likes.filter(id=request.user.id).exists():
already_liked.append(p)
product = get_object_or_404(Product, id=request.POST.get('id'))
if product.likes.filter(id=request.user.id).exists():
product.likes.remove(request.user)
already_liked.remove(product)
else:
product.likes.add(request.user)
already_liked.append(product)
context["categories"] = categories
context["already_liked"] = already_liked
# context["product.id"] = product.id
if request.is_ajax():
html = render_to_string('ecom/like_section.html',context, request=request)
return JsonResponse({'form':html})
@login_required
def wishlist(request):
user = request.user
likes = user.product_like.all()
cart = get_object_or_404(Cart, user=request.user).cart_entry.all()
in_cart = []
for p in likes:
# cart items
cartp = cart.filter(product=p)
if cartp:
in_cart.append(p)
context = {
"wishlist": likes,
"in_cart":in_cart
}
return render(request, 'ecom/wishlist.html', context)
@login_required
def remove_from_wishlist(request):
user = request.user
product = get_object_or_404(Product, id=request.POST.get('id'))
likes = user.product_like.all()
product.likes.remove(user)
context = {
"wishlist": user.product_like.all()
}
if request.is_ajax():
html = render_to_string('ecom/wishlist_section.html',context, request=request)
return JsonResponse({'form':html})
""" Search by product or category """
def search(request):
query = request.GET['query']
if len(query) >= 150 or len(query) < 1:
res = Product.objects.none()
elif len(query.strip()) == 0:
res = Product.objects.none()
else:
allprod = Product.objects.filter(item__icontains=query)
allcatg = Category.objects.filter(title__icontains=query)
if allcatg:
pincatg = allcatg[0].get_products.all()
res = allprod.union(pincatg)
else:
res = allprod
# CART and LIKE
in_cart = []
already_liked = []
if request.user.is_authenticated:
cart = get_object_or_404(Cart, user=request.user).cart_entry.all()
for p in res:
cartp = cart.filter(product=p)
if cartp:
in_cart.append(p)
if p.likes.filter(id=request.user.id).exists():
already_liked.append(p)
context = {
'res': res,
'in_cart': in_cart,
"already_liked": already_liked
}
return render(request, 'ecom/search_results.html', context)
def contact_us(request):
name = request.POST.get('fname')
message = request.POST.get('message')
email = request.POST.get('email')
Contact.objects.create(name=name, message=message, email=email)
return redirect(request.META['HTTP_REFERER']+'#footerCtf')
def product_details(request, id):
product = Product.objects.get(id=id)
in_cart = []
already_liked = []
if request.user.is_authenticated:
cart = get_object_or_404(Cart, user=request.user).cart_entry.all()
cartp = cart.filter(product=product)
if cartp:
in_cart.append(product)
if product.likes.filter(id=request.user.id).exists():
already_liked.append(product)
# { "size":[ "S", "M", "L" ], "crust":[ "cheese burst", "classic hand tossed", "thin crust" ] }
pdict2 = {}
pot = product.options
if pot != "":
pdict2 = json.loads(pot)
else:
pdict2 = None
context = {
"product":product,
"already_liked":already_liked,
"in_cart":in_cart,
"product_opts":pdict2,
}
return render(request, 'ecom/product_details.html', context) | 0.236164 | 0.116563 |
from .api import Api
from .types import *
class Client(object):
"""
Initializes the API for the client
:param token: Authorization token
:type token: str
:param headers: Additional headers **except Authorization**
:type headers: dict
"""
def __init__(self, token: str, headers=None):
"""
Initializes the API for the client
:param token: Authorization token
:type token: str
"""
self.api = Api(token, headers)
def follow(self, id: str) -> Response:
"""
Subscribes to a user
:param id: User id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.put('account/id/{}/follow'.format(id))
return Response.de_json(response)
def unfollow(self, id: str) -> Response:
"""
Unsubscribes to a user
:param id: User id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.delete('account/id/{}/follow'.format(id))
return Response.de_json(response)
def get_user(self, id: str) -> Response:
"""
Gets a user profile
:param id: User id
:type id: str
:rtype: :class:`Response`, :class:`Account`
"""
response = self.api.get('account/id/{}'.format(id))
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Account.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def like(self, id: str) -> Response:
"""
Likes a byte
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.put('post/id/{}/feedback/like'.format(id))
return Response.de_json(response)
def dislike(self, id: str) -> Response:
"""
Removes like from a byte
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.delete('post/id/{}/feedback/like'.format(id))
return Response.de_json(response)
def comment(self, id: str, text: str) -> Response:
"""
Comments a byte
:param id: Byte (post) id
:type id: str
:param text: Comment text
:type id: str
:rtype: :class:`Response`, :class:`Comment`
"""
response = self.api.post('post/id/{}/feedback/comment'.format(id),
json_data={
'postID': id,
'body': text
})
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Comment.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def delete_comment(self, id: str) -> Response:
"""
Deletes a comment
:param id: Comment id patterned by **{post id}-{comment id}**
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('feedback/comment/id/{}'.format(id),
json_data={
'commentID': id
})
response = Response.de_json(response)
return Response.de_json(response)
def loop(self, id: str) -> Response:
"""
Increments loop counter
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('post/id/{}/loop'.format(id))
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = LoopCounter.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def rebyte(self, id: str) -> Response:
"""
Increments loop counter
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('rebyte',
json_data={
'postID': id
})
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Rebyte.de_json(response.data)
if hasattr(response, 'error'):
error = response.error
return Response(response.success, data=data, error=error)
def get_colors(self) -> Response:
"""
Gets available color schemes
:rtype: :class:`Response`
"""
response = self.api.get('account/me/colors')
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Colors.de_json(response.data)
if hasattr(response, 'error'):
error = response.error
return Response(response.success, data=data, error=error)
def set_info(self, bio: str = None, display_name: str = None,
username: str = None, color_scheme: int = None) -> Response:
"""
Sets profile info
:param bio: New bio
:type bio: str
:param display_name: New name to display
:type display_name: str
:param username: New username
:type username: str
:param color_scheme: Id of new color scheme
:type color_scheme: int
:rtype: :class:`Response`
"""
data = {}
if bio:
data['bio'] = bio
if display_name:
data['displayName'] = display_name
if username:
data['username'] = username
if color_scheme:
data['colorScheme'] = color_scheme
response = self.api.put('account/me',
data=data)
return Response.de_json(response)
def set_username(self, username: str) -> Response:
"""
Sets username
:param username: New username
:type username: str
:rtype: :class:`Response`
"""
return self.set_info(username=username)
def set_bio(self, bio: str) -> Response:
"""
Sets bio
:param bio: New bio
:type bio: str
:rtype: :class:`Response`
"""
return self.set_info(bio=bio)
def set_display_name(self, display_name: str) -> Response:
"""
Sets name to display
:param display_name: New name to display
:type display_name: str
:rtype: :class:`Response`
"""
return self.set_info(display_name=display_name)
def set_color_scheme(self, color_scheme: int) -> Response:
"""
Sets color scheme
:param color_scheme: Id of new color scheme
:type color_scheme: str
:rtype: :class:`Response`
"""
return self.set_info(color_scheme=color_scheme) | byte_api/client.py | from .api import Api
from .types import *
class Client(object):
"""
Initializes the API for the client
:param token: Authorization token
:type token: str
:param headers: Additional headers **except Authorization**
:type headers: dict
"""
def __init__(self, token: str, headers=None):
"""
Initializes the API for the client
:param token: Authorization token
:type token: str
"""
self.api = Api(token, headers)
def follow(self, id: str) -> Response:
"""
Subscribes to a user
:param id: User id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.put('account/id/{}/follow'.format(id))
return Response.de_json(response)
def unfollow(self, id: str) -> Response:
"""
Unsubscribes to a user
:param id: User id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.delete('account/id/{}/follow'.format(id))
return Response.de_json(response)
def get_user(self, id: str) -> Response:
"""
Gets a user profile
:param id: User id
:type id: str
:rtype: :class:`Response`, :class:`Account`
"""
response = self.api.get('account/id/{}'.format(id))
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Account.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def like(self, id: str) -> Response:
"""
Likes a byte
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.put('post/id/{}/feedback/like'.format(id))
return Response.de_json(response)
def dislike(self, id: str) -> Response:
"""
Removes like from a byte
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.delete('post/id/{}/feedback/like'.format(id))
return Response.de_json(response)
def comment(self, id: str, text: str) -> Response:
"""
Comments a byte
:param id: Byte (post) id
:type id: str
:param text: Comment text
:type id: str
:rtype: :class:`Response`, :class:`Comment`
"""
response = self.api.post('post/id/{}/feedback/comment'.format(id),
json_data={
'postID': id,
'body': text
})
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Comment.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def delete_comment(self, id: str) -> Response:
"""
Deletes a comment
:param id: Comment id patterned by **{post id}-{comment id}**
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('feedback/comment/id/{}'.format(id),
json_data={
'commentID': id
})
response = Response.de_json(response)
return Response.de_json(response)
def loop(self, id: str) -> Response:
"""
Increments loop counter
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('post/id/{}/loop'.format(id))
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = LoopCounter.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def rebyte(self, id: str) -> Response:
"""
Increments loop counter
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('rebyte',
json_data={
'postID': id
})
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Rebyte.de_json(response.data)
if hasattr(response, 'error'):
error = response.error
return Response(response.success, data=data, error=error)
def get_colors(self) -> Response:
"""
Gets available color schemes
:rtype: :class:`Response`
"""
response = self.api.get('account/me/colors')
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Colors.de_json(response.data)
if hasattr(response, 'error'):
error = response.error
return Response(response.success, data=data, error=error)
def set_info(self, bio: str = None, display_name: str = None,
username: str = None, color_scheme: int = None) -> Response:
"""
Sets profile info
:param bio: New bio
:type bio: str
:param display_name: New name to display
:type display_name: str
:param username: New username
:type username: str
:param color_scheme: Id of new color scheme
:type color_scheme: int
:rtype: :class:`Response`
"""
data = {}
if bio:
data['bio'] = bio
if display_name:
data['displayName'] = display_name
if username:
data['username'] = username
if color_scheme:
data['colorScheme'] = color_scheme
response = self.api.put('account/me',
data=data)
return Response.de_json(response)
def set_username(self, username: str) -> Response:
"""
Sets username
:param username: New username
:type username: str
:rtype: :class:`Response`
"""
return self.set_info(username=username)
def set_bio(self, bio: str) -> Response:
"""
Sets bio
:param bio: New bio
:type bio: str
:rtype: :class:`Response`
"""
return self.set_info(bio=bio)
def set_display_name(self, display_name: str) -> Response:
"""
Sets name to display
:param display_name: New name to display
:type display_name: str
:rtype: :class:`Response`
"""
return self.set_info(display_name=display_name)
def set_color_scheme(self, color_scheme: int) -> Response:
"""
Sets color scheme
:param color_scheme: Id of new color scheme
:type color_scheme: str
:rtype: :class:`Response`
"""
return self.set_info(color_scheme=color_scheme) | 0.746971 | 0.218982 |
import sys, math, argparse
import os.path
import ConfigParser
from pymonetdb.sql.connections import Connection as connect
from pymonetdb.exceptions import Error as DBError
from gsm import utils as gu
from gsm.exceptions import GSMException
parser = argparse.ArgumentParser(prog='gsm')
parser.add_argument('--dbconf', '-d', help='config file of GSM database setting'\
' (default ./dbconfig.cfg)')
parser.add_argument('--conf', "-c", help='config file of input params'\
' (default ./config.cfg)')
parser.add_argument('--version', "-v", action='version', version='%(prog)s 2.1.8')
args = parser.parse_args()
dbconf = args.dbconf
conf = args.conf
if dbconf is None:
dbconf = './dbconfig.cfg'
if conf is None:
conf = './config.cfg'
if not os.path.isfile(dbconf):
raise GSMException('No valid dbconfig file is specified.')
if not os.path.isfile(conf):
raise GSMException('No valid config file is specified.')
dbcfg = ConfigParser.ConfigParser(allow_no_value=True)
dbcfg.read(dbconf)
cfg = ConfigParser.ConfigParser(allow_no_value=True)
cfg.read(conf)
basecat = cfg.get("gsmparams", "basecat")
cutoff = cfg.getfloat("gsmparams", "fluxCutoff")
if basecat == 'VLSS':
if cutoff is None:
cutoff = 4.0
elif basecat == 'TGSS':
if cutoff is None:
cutoff = 0.3
else:
raise GSMException("Basecat '%s' is not valid." % (basecat))
theta = cfg.getfloat("gsmparams", "assocTheta")
if theta is None:
theta = 0.00278
storespectraplots = cfg.getboolean("gsmparams", "storespectraplots")
try:
conn = connect(hostname = dbcfg.get("database", "host")
,database = dbcfg.get("database", "dbname")
,username = dbcfg.get("database", "uname")
,password = dbcfg.get("database", "pword")
,port = dbcfg.get("database", "port")
)
gu.expected_fluxes_in_fov(conn
,basecat
,cfg.getfloat("gsmparams", "RA")
,cfg.getfloat("gsmparams", "DEC")
,cfg.getfloat("gsmparams", "radius")
,theta
,cfg.get("gsmparams", "outfile")
,cutoff
,cfg.get("gsmparams", "patchname")
,storespectraplots
,cfg.getfloat("gsmparams", "deRuiter_radius")
)
conn.close()
except DBError, e:
raise | gsm.py |
import sys, math, argparse
import os.path
import ConfigParser
from pymonetdb.sql.connections import Connection as connect
from pymonetdb.exceptions import Error as DBError
from gsm import utils as gu
from gsm.exceptions import GSMException
parser = argparse.ArgumentParser(prog='gsm')
parser.add_argument('--dbconf', '-d', help='config file of GSM database setting'\
' (default ./dbconfig.cfg)')
parser.add_argument('--conf', "-c", help='config file of input params'\
' (default ./config.cfg)')
parser.add_argument('--version', "-v", action='version', version='%(prog)s 2.1.8')
args = parser.parse_args()
dbconf = args.dbconf
conf = args.conf
if dbconf is None:
dbconf = './dbconfig.cfg'
if conf is None:
conf = './config.cfg'
if not os.path.isfile(dbconf):
raise GSMException('No valid dbconfig file is specified.')
if not os.path.isfile(conf):
raise GSMException('No valid config file is specified.')
dbcfg = ConfigParser.ConfigParser(allow_no_value=True)
dbcfg.read(dbconf)
cfg = ConfigParser.ConfigParser(allow_no_value=True)
cfg.read(conf)
basecat = cfg.get("gsmparams", "basecat")
cutoff = cfg.getfloat("gsmparams", "fluxCutoff")
if basecat == 'VLSS':
if cutoff is None:
cutoff = 4.0
elif basecat == 'TGSS':
if cutoff is None:
cutoff = 0.3
else:
raise GSMException("Basecat '%s' is not valid." % (basecat))
theta = cfg.getfloat("gsmparams", "assocTheta")
if theta is None:
theta = 0.00278
storespectraplots = cfg.getboolean("gsmparams", "storespectraplots")
try:
conn = connect(hostname = dbcfg.get("database", "host")
,database = dbcfg.get("database", "dbname")
,username = dbcfg.get("database", "uname")
,password = dbcfg.get("database", "pword")
,port = dbcfg.get("database", "port")
)
gu.expected_fluxes_in_fov(conn
,basecat
,cfg.getfloat("gsmparams", "RA")
,cfg.getfloat("gsmparams", "DEC")
,cfg.getfloat("gsmparams", "radius")
,theta
,cfg.get("gsmparams", "outfile")
,cutoff
,cfg.get("gsmparams", "patchname")
,storespectraplots
,cfg.getfloat("gsmparams", "deRuiter_radius")
)
conn.close()
except DBError, e:
raise | 0.230313 | 0.050121 |
import re
from jinja2 import Template
from praw.models import Comment
import bot_logger
import crypto
import lang
import models
import user_function
# Resend tips to previously unregistered users that are now registered
def replay_pending_tip(reddit, tx_queue, failover_time):
# check if user have pending tips
list_tips = user_function.get_unregistered_tip()
if list_tips:
for arr_tip in list_tips:
tip = models.Tip().create_from_array(arr_tip)
bot_logger.logger.info("replay tipping check for %s" % str(tip.id))
# check if it's not too old & replay tipping
if not tip.is_expired():
if tip.receiver.is_registered():
bot_logger.logger.info(
"replay tipping %s - %s send %s to %s " % (
str(tip.id), tip.sender.username, tip.amount, tip.receiver.username))
tip.tx_id = crypto.tip_user(tip.sender.address, tip.receiver.address, tip.amount, tx_queue,
failover_time)
if tip.tx_id:
tip.finish = True
user_function.remove_pending_tip(tip.id)
if tip.message_fullname is not None:
msg_id = re.sub(r't\d+_(?P<id>\w+)', r'\g<id>', tip.message_fullname)
msg = Comment(reddit, msg_id)
msg.reply(Template(lang.message_tip).render(
sender=tip.sender.username, receiver=tip.receiver.username, amount=str(tip.amount),
value_usd=str(tip.get_value_usd()), txid=tip.tx_id))
else:
tip.status = "waiting registration of receiver"
bot_logger.logger.info(
"replay check for %s - user %s not registered " % (str(tip.id), tip.receiver.username))
else:
tip.status = "receiver not registered in time"
tip.finish = ""
bot_logger.logger.info(
"delete old tipping - %s send %s to %s " % (
tip.sender.username, tip.amount, tip.receiver.username))
user_function.remove_pending_tip(tip.id)
# update tip status
models.HistoryStorage.update_tip(tip.sender.username, tip)
models.HistoryStorage.update_tip(tip.receiver.username, tip)
else:
bot_logger.logger.info("no pending tipping") | bot_command.py | import re
from jinja2 import Template
from praw.models import Comment
import bot_logger
import crypto
import lang
import models
import user_function
# Resend tips to previously unregistered users that are now registered
def replay_pending_tip(reddit, tx_queue, failover_time):
# check if user have pending tips
list_tips = user_function.get_unregistered_tip()
if list_tips:
for arr_tip in list_tips:
tip = models.Tip().create_from_array(arr_tip)
bot_logger.logger.info("replay tipping check for %s" % str(tip.id))
# check if it's not too old & replay tipping
if not tip.is_expired():
if tip.receiver.is_registered():
bot_logger.logger.info(
"replay tipping %s - %s send %s to %s " % (
str(tip.id), tip.sender.username, tip.amount, tip.receiver.username))
tip.tx_id = crypto.tip_user(tip.sender.address, tip.receiver.address, tip.amount, tx_queue,
failover_time)
if tip.tx_id:
tip.finish = True
user_function.remove_pending_tip(tip.id)
if tip.message_fullname is not None:
msg_id = re.sub(r't\d+_(?P<id>\w+)', r'\g<id>', tip.message_fullname)
msg = Comment(reddit, msg_id)
msg.reply(Template(lang.message_tip).render(
sender=tip.sender.username, receiver=tip.receiver.username, amount=str(tip.amount),
value_usd=str(tip.get_value_usd()), txid=tip.tx_id))
else:
tip.status = "waiting registration of receiver"
bot_logger.logger.info(
"replay check for %s - user %s not registered " % (str(tip.id), tip.receiver.username))
else:
tip.status = "receiver not registered in time"
tip.finish = ""
bot_logger.logger.info(
"delete old tipping - %s send %s to %s " % (
tip.sender.username, tip.amount, tip.receiver.username))
user_function.remove_pending_tip(tip.id)
# update tip status
models.HistoryStorage.update_tip(tip.sender.username, tip)
models.HistoryStorage.update_tip(tip.receiver.username, tip)
else:
bot_logger.logger.info("no pending tipping") | 0.316053 | 0.07393 |
import os, sys
from AnyQt.QtWidgets import QSizePolicy, QStyle, QMessageBox, QFileDialog
from AnyQt.QtCore import QTimer
from Orange.misc import DistMatrix
from Orange.widgets import widget, gui
from Orange.data import get_sample_datasets_dir
from Orange.widgets.utils.filedialogs import RecentPathsWComboMixin
from Orange.widgets.widget import Output
class OWDistanceFile(widget.OWWidget, RecentPathsWComboMixin):
name = "Distance File"
id = "orange.widgets.unsupervised.distancefile"
description = "Read distances from a file."
icon = "icons/DistanceFile.svg"
priority = 10
category = "Data"
keywords = ["data", "distances", "load", "read"]
class Outputs:
distances = Output("Distances", DistMatrix, dynamic=False)
want_main_area = False
resizing_enabled = False
def __init__(self):
super().__init__()
RecentPathsWComboMixin.__init__(self)
self.loaded_file = ""
vbox = gui.vBox(self.controlArea, "Distance File", addSpace=True)
box = gui.hBox(vbox)
self.file_combo.setMinimumWidth(300)
box.layout().addWidget(self.file_combo)
self.file_combo.activated[int].connect(self.select_file)
button = gui.button(box, self, "...", callback=self.browse_file)
button.setIcon(self.style().standardIcon(QStyle.SP_DirOpenIcon))
button.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Fixed)
button = gui.button(box, self, "Reload", callback=self.reload, default=True)
button.setIcon(self.style().standardIcon(QStyle.SP_BrowserReload))
button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
box = gui.vBox(self.controlArea, "Info", addSpace=True)
self.infoa = gui.widgetLabel(box, "No data loaded.")
self.warnings = gui.widgetLabel(box, " ")
# Set word wrap, so long warnings won't expand the widget
self.warnings.setWordWrap(True)
self.warnings.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.MinimumExpanding)
box = gui.hBox(self.controlArea)
gui.button(
box,
self,
"Browse documentation datasets",
callback=lambda: self.browse_file(True),
autoDefault=False,
)
box.layout().addSpacing(200)
self.set_file_list()
QTimer.singleShot(0, self.open_file)
def set_file_list(self):
super().set_file_list()
def reload(self):
return self.open_file()
def select_file(self, n):
super().select_file(n)
self.set_file_list()
self.open_file()
def browse_file(self, in_demos=False):
if in_demos:
start_file = get_sample_datasets_dir()
if not os.path.exists(start_file):
QMessageBox.information(
None,
"File",
"Cannot find the directory with documentation datasets",
)
return
else:
start_file = self.last_path() or os.path.expanduser("~/")
filename, _ = QFileDialog.getOpenFileName(
self, "Open Distance File", start_file, "(*.dst)"
)
if not filename:
return
self.add_path(filename)
self.open_file()
# Open a file, create data from it and send it over the data channel
def open_file(self):
self.clear_messages()
fn = self.last_path()
if not fn:
return
if not os.path.exists(fn):
dir_name, basename = os.path.split(fn)
if os.path.exists(os.path.join(".", basename)):
fn = os.path.join(".", basename)
self.information(
"Loading '{}' from the current directory.".format(basename)
)
if fn == "(none)":
self.Outputs.distances.send(None)
self.infoa.setText("No data loaded")
self.infob.setText("")
self.warnings.setText("")
return
self.loaded_file = ""
try:
distances = DistMatrix.from_file(fn)
self.loaded_file = fn
except Exception as exc:
err_value = str(exc)
self.error("Invalid file format")
self.infoa.setText("Data was not loaded due to an error.")
self.warnings.setText(err_value)
distances = None
if distances is not None:
self.infoa.setText(
"{} points(s), ".format(len(distances))
+ (["unlabelled", "labelled"][distances.row_items is not None])
)
self.warnings.setText("")
file_name = os.path.split(fn)[1]
if "." in file_name:
distances.name = file_name[: file_name.rfind(".")]
else:
distances.name = file_name
self.Outputs.distances.send(distances)
def send_report(self):
if not self.loaded_file:
self.report_paragraph("No data was loaded.")
else:
self.report_items([("File name", self.loaded_file)])
if __name__ == "__main__":
from AnyQt.QtWidgets import QApplication
a = QApplication(sys.argv)
ow = OWDistanceFile()
ow.show()
a.exec_()
ow.saveSettings() | orange3/Orange/widgets/unsupervised/owdistancefile.py | import os, sys
from AnyQt.QtWidgets import QSizePolicy, QStyle, QMessageBox, QFileDialog
from AnyQt.QtCore import QTimer
from Orange.misc import DistMatrix
from Orange.widgets import widget, gui
from Orange.data import get_sample_datasets_dir
from Orange.widgets.utils.filedialogs import RecentPathsWComboMixin
from Orange.widgets.widget import Output
class OWDistanceFile(widget.OWWidget, RecentPathsWComboMixin):
name = "Distance File"
id = "orange.widgets.unsupervised.distancefile"
description = "Read distances from a file."
icon = "icons/DistanceFile.svg"
priority = 10
category = "Data"
keywords = ["data", "distances", "load", "read"]
class Outputs:
distances = Output("Distances", DistMatrix, dynamic=False)
want_main_area = False
resizing_enabled = False
def __init__(self):
super().__init__()
RecentPathsWComboMixin.__init__(self)
self.loaded_file = ""
vbox = gui.vBox(self.controlArea, "Distance File", addSpace=True)
box = gui.hBox(vbox)
self.file_combo.setMinimumWidth(300)
box.layout().addWidget(self.file_combo)
self.file_combo.activated[int].connect(self.select_file)
button = gui.button(box, self, "...", callback=self.browse_file)
button.setIcon(self.style().standardIcon(QStyle.SP_DirOpenIcon))
button.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Fixed)
button = gui.button(box, self, "Reload", callback=self.reload, default=True)
button.setIcon(self.style().standardIcon(QStyle.SP_BrowserReload))
button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
box = gui.vBox(self.controlArea, "Info", addSpace=True)
self.infoa = gui.widgetLabel(box, "No data loaded.")
self.warnings = gui.widgetLabel(box, " ")
# Set word wrap, so long warnings won't expand the widget
self.warnings.setWordWrap(True)
self.warnings.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.MinimumExpanding)
box = gui.hBox(self.controlArea)
gui.button(
box,
self,
"Browse documentation datasets",
callback=lambda: self.browse_file(True),
autoDefault=False,
)
box.layout().addSpacing(200)
self.set_file_list()
QTimer.singleShot(0, self.open_file)
def set_file_list(self):
super().set_file_list()
def reload(self):
return self.open_file()
def select_file(self, n):
super().select_file(n)
self.set_file_list()
self.open_file()
def browse_file(self, in_demos=False):
if in_demos:
start_file = get_sample_datasets_dir()
if not os.path.exists(start_file):
QMessageBox.information(
None,
"File",
"Cannot find the directory with documentation datasets",
)
return
else:
start_file = self.last_path() or os.path.expanduser("~/")
filename, _ = QFileDialog.getOpenFileName(
self, "Open Distance File", start_file, "(*.dst)"
)
if not filename:
return
self.add_path(filename)
self.open_file()
# Open a file, create data from it and send it over the data channel
def open_file(self):
self.clear_messages()
fn = self.last_path()
if not fn:
return
if not os.path.exists(fn):
dir_name, basename = os.path.split(fn)
if os.path.exists(os.path.join(".", basename)):
fn = os.path.join(".", basename)
self.information(
"Loading '{}' from the current directory.".format(basename)
)
if fn == "(none)":
self.Outputs.distances.send(None)
self.infoa.setText("No data loaded")
self.infob.setText("")
self.warnings.setText("")
return
self.loaded_file = ""
try:
distances = DistMatrix.from_file(fn)
self.loaded_file = fn
except Exception as exc:
err_value = str(exc)
self.error("Invalid file format")
self.infoa.setText("Data was not loaded due to an error.")
self.warnings.setText(err_value)
distances = None
if distances is not None:
self.infoa.setText(
"{} points(s), ".format(len(distances))
+ (["unlabelled", "labelled"][distances.row_items is not None])
)
self.warnings.setText("")
file_name = os.path.split(fn)[1]
if "." in file_name:
distances.name = file_name[: file_name.rfind(".")]
else:
distances.name = file_name
self.Outputs.distances.send(distances)
def send_report(self):
if not self.loaded_file:
self.report_paragraph("No data was loaded.")
else:
self.report_items([("File name", self.loaded_file)])
if __name__ == "__main__":
from AnyQt.QtWidgets import QApplication
a = QApplication(sys.argv)
ow = OWDistanceFile()
ow.show()
a.exec_()
ow.saveSettings() | 0.390476 | 0.101278 |
import json
import os
from aws_lambda_powertools import Logger, Tracer
from chalice import Chalice, ConvertToMiddleware, Cron
from chalicelib import Database
app = Chalice(app_name="bobotinho", debug=os.environ.get("DEBUG"))
logger = Logger(service=app.app_name)
tracer = Tracer(service=app.app_name)
app.register_middleware(ConvertToMiddleware(logger.inject_lambda_context))
app.register_middleware(ConvertToMiddleware(tracer.capture_lambda_handler))
db: Database = Database(
name=os.environ["DB_NAME"],
user=os.environ["DB_USER"],
password=<PASSWORD>["<PASSWORD>"],
host=os.environ["DB_HOST"],
port=os.environ["DB_PORT"],
)
def reset_users_daily(db: Database) -> None:
"""Reset users daily cookies (update 'daily' to 1).
Args:
db (Database): database instance
"""
db.execute("UPDATE public.cookie SET daily = 1 WHERE daily = 0")
def reset_sponsors_daily(db: Database) -> None:
"""Reset sponsors daily cookies (update 'daily' to 2 if 'sponsor' is True).
Args:
db (Database): database instance
"""
ids = db.fetch("SELECT id FROM public.user WHERE sponsor IS TRUE;")
if ids:
ids = tuple(map(lambda x: x[0], ids)) # [(1,), (2,), ...] -> [1, 2, ...]
db.execute(f"UPDATE public.cookie SET daily = 2 WHERE id IN {ids}")
@app.schedule(expression=Cron("0", "9", "*", "*", "?", "*"), name="reset-daily")
def reset_daily(event: dict, context: object = None) -> bool:
"""Connect to database and reset daily cookies when Lambda function is invoked.
Args:
event (dict): information from the invoke
context (object, optional): information about the invocation, function
and execution env. Defaults to None.
Returns:
bool: indicates success or failure
"""
try:
logger.info("Initiating...")
db.init()
reset_users_daily(db)
reset_sponsors_daily(db)
return {"StatusCode": 200, "body": json.dumps({"FunctionError": "Unhandled"})}
except Exception as e:
logger.exception(e)
return {"StatusCode": 500, "body": json.dumps({"FunctionError": str(e)})}
finally:
logger.info("Closing...")
db.close() | app.py | import json
import os
from aws_lambda_powertools import Logger, Tracer
from chalice import Chalice, ConvertToMiddleware, Cron
from chalicelib import Database
app = Chalice(app_name="bobotinho", debug=os.environ.get("DEBUG"))
logger = Logger(service=app.app_name)
tracer = Tracer(service=app.app_name)
app.register_middleware(ConvertToMiddleware(logger.inject_lambda_context))
app.register_middleware(ConvertToMiddleware(tracer.capture_lambda_handler))
db: Database = Database(
name=os.environ["DB_NAME"],
user=os.environ["DB_USER"],
password=<PASSWORD>["<PASSWORD>"],
host=os.environ["DB_HOST"],
port=os.environ["DB_PORT"],
)
def reset_users_daily(db: Database) -> None:
"""Reset users daily cookies (update 'daily' to 1).
Args:
db (Database): database instance
"""
db.execute("UPDATE public.cookie SET daily = 1 WHERE daily = 0")
def reset_sponsors_daily(db: Database) -> None:
"""Reset sponsors daily cookies (update 'daily' to 2 if 'sponsor' is True).
Args:
db (Database): database instance
"""
ids = db.fetch("SELECT id FROM public.user WHERE sponsor IS TRUE;")
if ids:
ids = tuple(map(lambda x: x[0], ids)) # [(1,), (2,), ...] -> [1, 2, ...]
db.execute(f"UPDATE public.cookie SET daily = 2 WHERE id IN {ids}")
@app.schedule(expression=Cron("0", "9", "*", "*", "?", "*"), name="reset-daily")
def reset_daily(event: dict, context: object = None) -> bool:
"""Connect to database and reset daily cookies when Lambda function is invoked.
Args:
event (dict): information from the invoke
context (object, optional): information about the invocation, function
and execution env. Defaults to None.
Returns:
bool: indicates success or failure
"""
try:
logger.info("Initiating...")
db.init()
reset_users_daily(db)
reset_sponsors_daily(db)
return {"StatusCode": 200, "body": json.dumps({"FunctionError": "Unhandled"})}
except Exception as e:
logger.exception(e)
return {"StatusCode": 500, "body": json.dumps({"FunctionError": str(e)})}
finally:
logger.info("Closing...")
db.close() | 0.581065 | 0.104067 |
import numpy as np
# Navigation Views
import navigation_vis.Raster as NavGridView
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm as cm, pyplot as plt, colors as mplotcolors
def plot_grid_data_helper(data, ann=None, ann_sz=12, ann_col="black", title=None, grid=None, cmap=cm.viridis, **kwargs):
p = NavGridView.Raster(data, ax=plt.gca()).render(cmap=cmap, **kwargs).ticks(minor=False)
if ann is not None:
p.show_cell_text(ann, fontsize=ann_sz, color_cb=lambda x: ann_col)
if grid is not None:
p.grid()
if title is not None:
p.title(title)
return p
def highlight_cell(x, y, ax=None, **kwargs):
rect = plt.Rectangle((x-.5, y-.5), 1, 1, fill=False, **kwargs)
ax = ax or plt.gca()
ax.add_patch(rect)
return rect
def plot_irl_world(S, s_lst_lst=[], titles=["States", "Classes", "Features", "Rewards"],
figsize=(18, 12), cbar_pad=1.0, cbar_size="10%",
r_key=None, phi_key=None, r_round_to=3, clean_title=False,
v_range=[None, None, None, None]):
state_title, class_title, feature_title, reward_title = titles
v_range = [(None, None) if v is None else v for v in v_range]
state_range, class_range, feature_range, reward_range = v_range
if not clean_title:
feature_title += "(type={})".format(phi_key)
reward_title += "(type={})".format(r_key)
plt.figure(figsize=figsize)
p = NavGridViewPlotter(S, r_key=r_key, phi_key=phi_key)
plt.subplot(2, 2, 1)
p.plot_states(
cmap=cm.viridis, ann_col="white",
title=state_title, vmin=state_range[0], vmax=state_range[1]).colorbar(
where="left", pad=cbar_pad, size=cbar_size).grid().add_pixel_trajectories(
s_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(2, 2, 2)
p.plot_classes(
cmap=cm.viridis, ann_col="white",
title=class_title, vmin=class_range[0], vmax=class_range[1]).colorbar(
where="right", pad=cbar_pad, size=cbar_size).grid()
plt.subplot(2, 2, 3)
p.plot_features(
ann=S.idxs.flatten(), cmap=None, ann_col="white",
title=feature_title, vmin=feature_range[0], vmax=feature_range[1]).colorbar(
where="left", pad=cbar_pad, size=cbar_size).grid().add_trajectories(
s_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(2, 2, 4)
p.plot_array(
S.rewards(numpyize=True, key=r_key).round(r_round_to),
cmap=cm.Blues_r, title=reward_title, vmin=reward_range[0], vmax=reward_range[1]).colorbar(
where="right", pad=cbar_pad, size=cbar_size)
plt.tight_layout()
def plot_irl_results(S, s_lst_lst, values, loglik_hist,
titles=["States", "Features", "Rewards", "Values", "Training Performance"],
figsize=(24, 24), cbar_pad=1.0, cbar_size="10%",
r_key=None, phi_key=None, r_round_to=3, clean_title=False,
v_range=[None, None, None, None, None], learned_lst_lst=[]):
state_title, feature_title, reward_title, value_title, perf_title = titles
v_range = [(None, None) if v is None else v for v in v_range]
state_range, feature_range, reward_range, value_range, _ = v_range
if not clean_title:
feature_title += "(type={})".format(phi_key)
reward_title += "(type={})".format(r_key)
plt.figure(figsize=figsize)
p = NavGridViewPlotter(S, r_key=r_key, phi_key=phi_key)
plt.subplot(3, 2, 1)
p.plot_states(
cmap=cm.viridis, ann_col="white",
title=state_title, vmin=state_range[0], vmax=state_range[1]).colorbar(
where="left", pad=cbar_pad, size=cbar_size).grid().add_pixel_trajectories(
s_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(3, 2, 2)
p.plot_features(
ann=S.idxs.flatten(), cmap=cm.viridis, ann_col="white",
title=feature_title, vmin=feature_range[0], vmax=feature_range[1]).colorbar(
where="right", pad=cbar_pad, size=cbar_size).grid().add_trajectories(
s_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(3, 2, 3)
p.plot_array(
S.rewards(numpyize=True, key=r_key).round(r_round_to),
cmap=cm.Blues_r, title=reward_title, vmin=reward_range[0], vmax=reward_range[1]).colorbar(
where="left", pad=cbar_pad, size=cbar_size).add_pixel_trajectories(
learned_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(3, 2, 4)
p.plot_array(
values, cmap=cm.Blues_r, title=value_title, vmin=value_range[0], vmax=value_range[1]).colorbar(
where="right", pad=cbar_pad, size=cbar_size).add_pixel_trajectories(
learned_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(3, 2, 5)
plt.plot(list(range(len(loglik_hist))), loglik_hist)
plt.xlabel("Iteration")
plt.ylabel("Likelihood")
plt.title(perf_title)
plt.tight_layout()
class NavGridViewPlotter:
def __init__(self, S, R=None, cartesian=False, r_key=None, phi_key=None):
self.S = S
self.R = S.rewards(key=r_key) if R is None else R
self.PHI_gridded = self.S.features(gridded=True, key=phi_key)
self.R_grided = self.S._organize_to_grid(self.R)
self.class_ids_grided = self.S._organize_to_grid(self.S.class_ids)
self.idxs_gridded = self.S.idxs
self.cartesian = cartesian
self.p = None
def highlight_terminal_states(self):
for s in self.S.get_terminal_states():
r, c = s.location
highlight_cell(c, r, ax=self.p.ax, color="white", linewidth=5)
def plot_rewards(self, title="Rewards", *args, **kwargs):
data = self.R_grided[..., np.newaxis, np.newaxis, np.newaxis]
ann = self.R
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def plot_states(self, title="States", *args, **kwargs):
data = self.idxs_gridded[..., np.newaxis, np.newaxis, np.newaxis]
ann = self.S.idxs.flatten()
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
self.highlight_terminal_states()
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def plot_array(self, data, title="Data", *args, **kwargs):
data = self.S._organize_to_grid(np.asarray(data).flatten())[..., np.newaxis, np.newaxis, np.newaxis]
ann = data.flatten()
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def plot_classes(self, title="Classes", *args, **kwargs):
data = self.class_ids_grided[..., np.newaxis, np.newaxis, np.newaxis]
ann = self.S.class_ids
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
self.highlight_terminal_states()
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def plot_features(self, ann=None, title="Features", *args, **kwargs):
if self.cartesian:
data = NavGridView.flip_y_axis(self.PHI_gridded)
else:
data = self.PHI_gridded
n_dim = len(data.shape)
if n_dim == 3: # one-hot features
H, W, K = data.shape
if K < 10:
data = data[..., np.newaxis, np.newaxis]
else:
# k1 = int(np.ceil(np.sqrt(K)))
# k2 = int(np.ceil(K / k1))
try:
k1 = H
k2 = W
data = data.reshape(H, W, k1, k2)[..., np.newaxis]
except:
data = data.reshape(H, W, K, 1)[..., np.newaxis]
elif n_dim == 4:
data = data[..., np.newaxis]
elif n_dim == 5:
pass
else:
raise Exception("data dimension {} not supported!".format(n_dim))
if ann is None:
ann = self.S.class_ids
ann = np.asarray(ann).flatten()
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def add_colorbar(self, *args, **kwargs):
self.p.colorbar(*args, **kwargs)
return self
def add_trajectories(self, *args, ** kwargs):
self.p.add_trajectories(*args, **kwargs)
return self | navigation_mdp/plotting.py | import numpy as np
# Navigation Views
import navigation_vis.Raster as NavGridView
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm as cm, pyplot as plt, colors as mplotcolors
def plot_grid_data_helper(data, ann=None, ann_sz=12, ann_col="black", title=None, grid=None, cmap=cm.viridis, **kwargs):
p = NavGridView.Raster(data, ax=plt.gca()).render(cmap=cmap, **kwargs).ticks(minor=False)
if ann is not None:
p.show_cell_text(ann, fontsize=ann_sz, color_cb=lambda x: ann_col)
if grid is not None:
p.grid()
if title is not None:
p.title(title)
return p
def highlight_cell(x, y, ax=None, **kwargs):
rect = plt.Rectangle((x-.5, y-.5), 1, 1, fill=False, **kwargs)
ax = ax or plt.gca()
ax.add_patch(rect)
return rect
def plot_irl_world(S, s_lst_lst=[], titles=["States", "Classes", "Features", "Rewards"],
figsize=(18, 12), cbar_pad=1.0, cbar_size="10%",
r_key=None, phi_key=None, r_round_to=3, clean_title=False,
v_range=[None, None, None, None]):
state_title, class_title, feature_title, reward_title = titles
v_range = [(None, None) if v is None else v for v in v_range]
state_range, class_range, feature_range, reward_range = v_range
if not clean_title:
feature_title += "(type={})".format(phi_key)
reward_title += "(type={})".format(r_key)
plt.figure(figsize=figsize)
p = NavGridViewPlotter(S, r_key=r_key, phi_key=phi_key)
plt.subplot(2, 2, 1)
p.plot_states(
cmap=cm.viridis, ann_col="white",
title=state_title, vmin=state_range[0], vmax=state_range[1]).colorbar(
where="left", pad=cbar_pad, size=cbar_size).grid().add_pixel_trajectories(
s_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(2, 2, 2)
p.plot_classes(
cmap=cm.viridis, ann_col="white",
title=class_title, vmin=class_range[0], vmax=class_range[1]).colorbar(
where="right", pad=cbar_pad, size=cbar_size).grid()
plt.subplot(2, 2, 3)
p.plot_features(
ann=S.idxs.flatten(), cmap=None, ann_col="white",
title=feature_title, vmin=feature_range[0], vmax=feature_range[1]).colorbar(
where="left", pad=cbar_pad, size=cbar_size).grid().add_trajectories(
s_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(2, 2, 4)
p.plot_array(
S.rewards(numpyize=True, key=r_key).round(r_round_to),
cmap=cm.Blues_r, title=reward_title, vmin=reward_range[0], vmax=reward_range[1]).colorbar(
where="right", pad=cbar_pad, size=cbar_size)
plt.tight_layout()
def plot_irl_results(S, s_lst_lst, values, loglik_hist,
titles=["States", "Features", "Rewards", "Values", "Training Performance"],
figsize=(24, 24), cbar_pad=1.0, cbar_size="10%",
r_key=None, phi_key=None, r_round_to=3, clean_title=False,
v_range=[None, None, None, None, None], learned_lst_lst=[]):
state_title, feature_title, reward_title, value_title, perf_title = titles
v_range = [(None, None) if v is None else v for v in v_range]
state_range, feature_range, reward_range, value_range, _ = v_range
if not clean_title:
feature_title += "(type={})".format(phi_key)
reward_title += "(type={})".format(r_key)
plt.figure(figsize=figsize)
p = NavGridViewPlotter(S, r_key=r_key, phi_key=phi_key)
plt.subplot(3, 2, 1)
p.plot_states(
cmap=cm.viridis, ann_col="white",
title=state_title, vmin=state_range[0], vmax=state_range[1]).colorbar(
where="left", pad=cbar_pad, size=cbar_size).grid().add_pixel_trajectories(
s_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(3, 2, 2)
p.plot_features(
ann=S.idxs.flatten(), cmap=cm.viridis, ann_col="white",
title=feature_title, vmin=feature_range[0], vmax=feature_range[1]).colorbar(
where="right", pad=cbar_pad, size=cbar_size).grid().add_trajectories(
s_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(3, 2, 3)
p.plot_array(
S.rewards(numpyize=True, key=r_key).round(r_round_to),
cmap=cm.Blues_r, title=reward_title, vmin=reward_range[0], vmax=reward_range[1]).colorbar(
where="left", pad=cbar_pad, size=cbar_size).add_pixel_trajectories(
learned_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(3, 2, 4)
p.plot_array(
values, cmap=cm.Blues_r, title=value_title, vmin=value_range[0], vmax=value_range[1]).colorbar(
where="right", pad=cbar_pad, size=cbar_size).add_pixel_trajectories(
learned_lst_lst, arrow_props={"lw": 3, "color": "black", "shrinkB": 10, "shrinkA": 10})
plt.subplot(3, 2, 5)
plt.plot(list(range(len(loglik_hist))), loglik_hist)
plt.xlabel("Iteration")
plt.ylabel("Likelihood")
plt.title(perf_title)
plt.tight_layout()
class NavGridViewPlotter:
def __init__(self, S, R=None, cartesian=False, r_key=None, phi_key=None):
self.S = S
self.R = S.rewards(key=r_key) if R is None else R
self.PHI_gridded = self.S.features(gridded=True, key=phi_key)
self.R_grided = self.S._organize_to_grid(self.R)
self.class_ids_grided = self.S._organize_to_grid(self.S.class_ids)
self.idxs_gridded = self.S.idxs
self.cartesian = cartesian
self.p = None
def highlight_terminal_states(self):
for s in self.S.get_terminal_states():
r, c = s.location
highlight_cell(c, r, ax=self.p.ax, color="white", linewidth=5)
def plot_rewards(self, title="Rewards", *args, **kwargs):
data = self.R_grided[..., np.newaxis, np.newaxis, np.newaxis]
ann = self.R
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def plot_states(self, title="States", *args, **kwargs):
data = self.idxs_gridded[..., np.newaxis, np.newaxis, np.newaxis]
ann = self.S.idxs.flatten()
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
self.highlight_terminal_states()
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def plot_array(self, data, title="Data", *args, **kwargs):
data = self.S._organize_to_grid(np.asarray(data).flatten())[..., np.newaxis, np.newaxis, np.newaxis]
ann = data.flatten()
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def plot_classes(self, title="Classes", *args, **kwargs):
data = self.class_ids_grided[..., np.newaxis, np.newaxis, np.newaxis]
ann = self.S.class_ids
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
self.highlight_terminal_states()
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def plot_features(self, ann=None, title="Features", *args, **kwargs):
if self.cartesian:
data = NavGridView.flip_y_axis(self.PHI_gridded)
else:
data = self.PHI_gridded
n_dim = len(data.shape)
if n_dim == 3: # one-hot features
H, W, K = data.shape
if K < 10:
data = data[..., np.newaxis, np.newaxis]
else:
# k1 = int(np.ceil(np.sqrt(K)))
# k2 = int(np.ceil(K / k1))
try:
k1 = H
k2 = W
data = data.reshape(H, W, k1, k2)[..., np.newaxis]
except:
data = data.reshape(H, W, K, 1)[..., np.newaxis]
elif n_dim == 4:
data = data[..., np.newaxis]
elif n_dim == 5:
pass
else:
raise Exception("data dimension {} not supported!".format(n_dim))
if ann is None:
ann = self.S.class_ids
ann = np.asarray(ann).flatten()
self.p = plot_grid_data_helper(data, ann, title=title, *args, **kwargs)
if self.cartesian:
self.p.ax.invert_yaxis()
return self.p
def add_colorbar(self, *args, **kwargs):
self.p.colorbar(*args, **kwargs)
return self
def add_trajectories(self, *args, ** kwargs):
self.p.add_trajectories(*args, **kwargs)
return self | 0.688992 | 0.465205 |
import types
import sys
from peval.core.callable import inspect_callable, Callable
def test_builtin_function():
assert inspect_callable(isinstance) == Callable(isinstance)
def test_builtin_constructor():
assert inspect_callable(str) == Callable(str, init=True)
def test_builtin_unbound_method():
assert inspect_callable(str.__getitem__) == Callable(str.__getitem__)
def test_builtin_bound_method():
assert inspect_callable("a".__getitem__) == Callable(str.__getitem__, self_obj="a")
class mystr1(str):
pass
class mystr2(str):
def __getitem__(self, idx):
return str.__getitem__(self, idx)
def test_builtin_method_in_derived():
s1 = mystr1("a")
assert (inspect_callable(s1.__getitem__) == Callable(str.__getitem__, self_obj=s1))
def test_builtin_method_overloaded_in_derived():
s2 = mystr2("a")
assert (inspect_callable(s2.__getitem__) == Callable(mystr2.__getitem__, self_obj=s2))
def dummy():
pass
class OldStyleDummyInit:
def __init__(self):
pass
class OldStyleDummy:
def __call__(self):
pass
def method(self):
pass
@classmethod
def classmethod(cls):
pass
@staticmethod
def staticmethod():
pass
class OldStyleDerivedDummy(OldStyleDummy):
pass
class NewStyleDummy(object):
def __call__(self):
pass
def method(self):
pass
@classmethod
def classmethod(cls):
pass
@staticmethod
def staticmethod():
pass
class NewStyleDerivedDummy(NewStyleDummy):
pass
def pytest_generate_tests(metafunc):
if 'cls' in metafunc.fixturenames:
clss = [{'base': NewStyleDummy, 'derived': NewStyleDerivedDummy}]
ids = ['new style']
metafunc.parametrize('cls', clss, ids=ids)
def test_function():
assert inspect_callable(dummy) == Callable(dummy)
def test_constructor(cls):
assert inspect_callable(cls['base']) == Callable(cls['base'], init=True)
def test_unbound_method(cls):
assert inspect_callable(cls['base'].method) == Callable(cls['base'].method)
def test_bound_method(cls):
d = cls['base']()
assert inspect_callable(d.method) == Callable(cls['base'].method, self_obj=d)
def test_bound_method_in_derived(cls):
d = cls['derived']()
assert inspect_callable(d.method) == Callable(cls['base'].method, self_obj=d)
def test_call_method(cls):
d = cls['base']()
assert inspect_callable(d) == Callable(cls['base'].__call__, self_obj=d)
def test_static_method(cls):
d = cls['base']()
assert inspect_callable(d.staticmethod) == Callable(cls['base'].staticmethod)
def test_class_method(cls):
d = cls['base']()
classmethod_func = cls['base'].classmethod.__func__
assert inspect_callable(d.classmethod) == Callable(classmethod_func, self_obj=cls['base'])
def test_class_method_in_derived(cls):
d = cls['derived']()
classmethod_func = cls['base'].classmethod.__func__
assert inspect_callable(d.classmethod) == Callable(classmethod_func, self_obj=cls['derived']) | tests/test_core/test_callable.py | import types
import sys
from peval.core.callable import inspect_callable, Callable
def test_builtin_function():
assert inspect_callable(isinstance) == Callable(isinstance)
def test_builtin_constructor():
assert inspect_callable(str) == Callable(str, init=True)
def test_builtin_unbound_method():
assert inspect_callable(str.__getitem__) == Callable(str.__getitem__)
def test_builtin_bound_method():
assert inspect_callable("a".__getitem__) == Callable(str.__getitem__, self_obj="a")
class mystr1(str):
pass
class mystr2(str):
def __getitem__(self, idx):
return str.__getitem__(self, idx)
def test_builtin_method_in_derived():
s1 = mystr1("a")
assert (inspect_callable(s1.__getitem__) == Callable(str.__getitem__, self_obj=s1))
def test_builtin_method_overloaded_in_derived():
s2 = mystr2("a")
assert (inspect_callable(s2.__getitem__) == Callable(mystr2.__getitem__, self_obj=s2))
def dummy():
pass
class OldStyleDummyInit:
def __init__(self):
pass
class OldStyleDummy:
def __call__(self):
pass
def method(self):
pass
@classmethod
def classmethod(cls):
pass
@staticmethod
def staticmethod():
pass
class OldStyleDerivedDummy(OldStyleDummy):
pass
class NewStyleDummy(object):
def __call__(self):
pass
def method(self):
pass
@classmethod
def classmethod(cls):
pass
@staticmethod
def staticmethod():
pass
class NewStyleDerivedDummy(NewStyleDummy):
pass
def pytest_generate_tests(metafunc):
if 'cls' in metafunc.fixturenames:
clss = [{'base': NewStyleDummy, 'derived': NewStyleDerivedDummy}]
ids = ['new style']
metafunc.parametrize('cls', clss, ids=ids)
def test_function():
assert inspect_callable(dummy) == Callable(dummy)
def test_constructor(cls):
assert inspect_callable(cls['base']) == Callable(cls['base'], init=True)
def test_unbound_method(cls):
assert inspect_callable(cls['base'].method) == Callable(cls['base'].method)
def test_bound_method(cls):
d = cls['base']()
assert inspect_callable(d.method) == Callable(cls['base'].method, self_obj=d)
def test_bound_method_in_derived(cls):
d = cls['derived']()
assert inspect_callable(d.method) == Callable(cls['base'].method, self_obj=d)
def test_call_method(cls):
d = cls['base']()
assert inspect_callable(d) == Callable(cls['base'].__call__, self_obj=d)
def test_static_method(cls):
d = cls['base']()
assert inspect_callable(d.staticmethod) == Callable(cls['base'].staticmethod)
def test_class_method(cls):
d = cls['base']()
classmethod_func = cls['base'].classmethod.__func__
assert inspect_callable(d.classmethod) == Callable(classmethod_func, self_obj=cls['base'])
def test_class_method_in_derived(cls):
d = cls['derived']()
classmethod_func = cls['base'].classmethod.__func__
assert inspect_callable(d.classmethod) == Callable(classmethod_func, self_obj=cls['derived']) | 0.340485 | 0.397646 |
import atexit
import traitlets
from traitlets.config.configurable import Configurable
class MotorController:
def __init__(self, channel, base_speed=400):
self.channel = channel
self.base_speed = float(base_speed)
self.power_stat = 0
def _set_motor_l_speed(self, motor_speed):
try:
with open('/dev/rtmotor_raw_l0','w') as f:
f.write(str(int(motor_speed)))
except Exception as e:
print(e)
def _set_motor_r_speed(self, motor_speed):
try:
with open('/dev/rtmotor_raw_r0','w') as f:
f.write(str(int(motor_speed)))
except Exception as e:
print(e)
def _set_motor_power(self, mode):
try:
with open('/dev/rtmotoren0','w') as f:
f.write('1' if mode else '0')
except Exception as e:
print(e)
def set_speed(self, speed):
if self.channel == 1:
self._set_motor_l_speed(speed * self.base_speed)
elif self.channel == 2:
self._set_motor_r_speed(speed * self.base_speed)
def set_power(self, stat):
if self.power_stat != stat:
self._set_motor_power(stat)
self.power_stat = stat
class Motor(Configurable):
value = traitlets.Float()
# config
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets
self._driver = driver
self._motor = MotorController(channel)
self._motor.set_speed(0)
self._motor.set_power(1)
atexit.register(self._release)
@traitlets.observe('value')
def _observe_value(self, change):
if self._motor.power_stat == 1:
self._write_value(change['new'])
else:
self._motor.set_power(1)
self._write_value(change['new'])
def _write_value(self, value):
"""Sets motor value between [-1, 1]"""
speed = (self.alpha * value + self.beta)
self._motor.set_speed(speed)
def _release(self):
"""Power off stepper motor by releasing control"""
self._motor.set_speed(0)
self.release()
def release(self):
self._motor.set_power(0) | jnmouse/motor.py | import atexit
import traitlets
from traitlets.config.configurable import Configurable
class MotorController:
def __init__(self, channel, base_speed=400):
self.channel = channel
self.base_speed = float(base_speed)
self.power_stat = 0
def _set_motor_l_speed(self, motor_speed):
try:
with open('/dev/rtmotor_raw_l0','w') as f:
f.write(str(int(motor_speed)))
except Exception as e:
print(e)
def _set_motor_r_speed(self, motor_speed):
try:
with open('/dev/rtmotor_raw_r0','w') as f:
f.write(str(int(motor_speed)))
except Exception as e:
print(e)
def _set_motor_power(self, mode):
try:
with open('/dev/rtmotoren0','w') as f:
f.write('1' if mode else '0')
except Exception as e:
print(e)
def set_speed(self, speed):
if self.channel == 1:
self._set_motor_l_speed(speed * self.base_speed)
elif self.channel == 2:
self._set_motor_r_speed(speed * self.base_speed)
def set_power(self, stat):
if self.power_stat != stat:
self._set_motor_power(stat)
self.power_stat = stat
class Motor(Configurable):
value = traitlets.Float()
# config
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets
self._driver = driver
self._motor = MotorController(channel)
self._motor.set_speed(0)
self._motor.set_power(1)
atexit.register(self._release)
@traitlets.observe('value')
def _observe_value(self, change):
if self._motor.power_stat == 1:
self._write_value(change['new'])
else:
self._motor.set_power(1)
self._write_value(change['new'])
def _write_value(self, value):
"""Sets motor value between [-1, 1]"""
speed = (self.alpha * value + self.beta)
self._motor.set_speed(speed)
def _release(self):
"""Power off stepper motor by releasing control"""
self._motor.set_speed(0)
self.release()
def release(self):
self._motor.set_power(0) | 0.408041 | 0.139367 |
import datetime
import decimal
import gzip
import logging
import os
from db_hammer.util.date import date_to_str
def get_headers(cursor):
col_names = cursor.description
cc = []
for c in col_names:
cc.append(c[0])
return cc
def count_rows(cursor, sql, log=None):
log = log or logging.getLogger(__name__)
count_sql = f"SELECT COUNT(0) FROM ({sql}) tmp_count"
log.debug("execute sql:" + count_sql.replace("\n", " "))
cursor.execute(count_sql)
data = cursor.fetchone()
log.debug("fetch rows:" + str(len(data)))
num = data[0]
return int(num)
def start(cursor, sql, bachSize, PACK_SIZE, path, file_mode, add_header, CSV_SPLIT, CSV_FIELD_CLOSE, encoding,
callback, log=None):
log = log or logging.getLogger(__name__)
try:
os.makedirs(path, exist_ok=True)
log.info(f"export path:%s" % path)
cursor.execute(sql)
col_names = get_headers(cursor)
csv_data = cursor.fetchmany(int(bachSize))
file_i = 1
CACHE_COUNT = PACK_SIZE
CACHE_ROWS = 0
write_f = None
c_count = 0
total = 0
if callback is not None:
total = count_rows(cursor, sql, log)
callback(c_count, total)
while len(csv_data) > 0:
log.debug("export row::" + str(c_count))
CACHE_ROWS += len(csv_data)
if CACHE_ROWS < CACHE_COUNT and c_count > 0:
file_i, file_name, write_f = write_file(col_names, csv_data, path, file_i, False, write_f, file_mode,
add_header, CSV_SPLIT, CSV_FIELD_CLOSE, encoding)
else:
file_i, file_name, write_f = write_file(col_names, csv_data, path, file_i, True, write_f, file_mode,
add_header, CSV_SPLIT, CSV_FIELD_CLOSE, encoding)
CACHE_ROWS = 0
c_count += len(csv_data)
csv_data = cursor.fetchmany(int(bachSize))
if callback is not None:
callback(c_count, total)
if write_f is not None:
write_f.flush()
write_f.close()
log.info(f"export end, total row::%d" % c_count)
except Exception as e:
log.exception(e)
finally:
pass
def write_file(col_names, csv_data, path, file_i, new, f, file_mode, add_header, CSV_SPLIT, CSV_FIELD_CLOSE,
encoding):
add = False
if file_mode == 'gz':
file_name = f"{str(file_i).rjust(6, '0')}.gz"
elif file_mode == 'csv':
file_name = f"{str(file_i).rjust(6, '0')}.csv"
else:
file_name = f"{str(file_i).rjust(6, '0')}.txt"
if len(csv_data) == 0:
return file_i, None, None
if new:
if f is not None:
f.flush()
f.close()
file_i += 1
add = True
if file_mode == 'gz':
f = gzip.open(f"{path}/{file_name}", 'wb+')
else:
f = open(f"{path}/{file_name}", 'wb+')
__run_rows(rows=csv_data,
header=col_names,
file=f,
add_header=add and add_header,
CSV_SPLIT=CSV_SPLIT,
CSV_FIELD_CLOSE=CSV_FIELD_CLOSE,
encoding=encoding
)
return file_i, file_name, f
def __run_rows(rows, header, add_header=False, file=None, CSV_FIELD_CLOSE='"', CSV_SPLIT=",", encoding="utf-8"):
for r in rows:
fields = r
field_map = {}
for i in range(0, len(header)):
c = header[i]
v = fields[i]
if isinstance(v, datetime.datetime) or isinstance(v, datetime.date):
if v is None:
field_map[c] = ""
else:
field_map[c] = date_to_str(v)
elif isinstance(v, int):
if v is None:
field_map[c] = '0'
else:
field_map[c] = str(v)
elif isinstance(v, decimal.Decimal):
if v is None:
field_map[c] = '0'
else:
field_map[c] = str(round(v, 4))
elif isinstance(v, float):
if v is None:
v = '0'
field_map[c] = str(v)
elif isinstance(v, str):
# 空处理
if v is None:
field_map[c] = ""
else:
# 回车换行处理
field_map[c] = str(v).replace("\n", "").replace(CSV_FIELD_CLOSE, ""). \
replace(CSV_SPLIT, "")
else:
if v is None:
field_map[c] = ""
else:
raise Exception(f"[{c}]--NO SUPPORT TYPE--[{type(v)}]")
if add_header:
row_str = CSV_FIELD_CLOSE + f'{CSV_FIELD_CLOSE}{CSV_SPLIT}{CSV_FIELD_CLOSE}'.join(header) + CSV_FIELD_CLOSE
line = (row_str + '\n').encode(encoding)
add_header = False
file.write(line)
row_str = CSV_FIELD_CLOSE + f'{CSV_FIELD_CLOSE}{CSV_SPLIT}{CSV_FIELD_CLOSE}'.join(
field_map.values()) + CSV_FIELD_CLOSE
line = (row_str + '\n').encode(encoding)
file.write(line) | db_hammer/csv.py | import datetime
import decimal
import gzip
import logging
import os
from db_hammer.util.date import date_to_str
def get_headers(cursor):
col_names = cursor.description
cc = []
for c in col_names:
cc.append(c[0])
return cc
def count_rows(cursor, sql, log=None):
log = log or logging.getLogger(__name__)
count_sql = f"SELECT COUNT(0) FROM ({sql}) tmp_count"
log.debug("execute sql:" + count_sql.replace("\n", " "))
cursor.execute(count_sql)
data = cursor.fetchone()
log.debug("fetch rows:" + str(len(data)))
num = data[0]
return int(num)
def start(cursor, sql, bachSize, PACK_SIZE, path, file_mode, add_header, CSV_SPLIT, CSV_FIELD_CLOSE, encoding,
callback, log=None):
log = log or logging.getLogger(__name__)
try:
os.makedirs(path, exist_ok=True)
log.info(f"export path:%s" % path)
cursor.execute(sql)
col_names = get_headers(cursor)
csv_data = cursor.fetchmany(int(bachSize))
file_i = 1
CACHE_COUNT = PACK_SIZE
CACHE_ROWS = 0
write_f = None
c_count = 0
total = 0
if callback is not None:
total = count_rows(cursor, sql, log)
callback(c_count, total)
while len(csv_data) > 0:
log.debug("export row::" + str(c_count))
CACHE_ROWS += len(csv_data)
if CACHE_ROWS < CACHE_COUNT and c_count > 0:
file_i, file_name, write_f = write_file(col_names, csv_data, path, file_i, False, write_f, file_mode,
add_header, CSV_SPLIT, CSV_FIELD_CLOSE, encoding)
else:
file_i, file_name, write_f = write_file(col_names, csv_data, path, file_i, True, write_f, file_mode,
add_header, CSV_SPLIT, CSV_FIELD_CLOSE, encoding)
CACHE_ROWS = 0
c_count += len(csv_data)
csv_data = cursor.fetchmany(int(bachSize))
if callback is not None:
callback(c_count, total)
if write_f is not None:
write_f.flush()
write_f.close()
log.info(f"export end, total row::%d" % c_count)
except Exception as e:
log.exception(e)
finally:
pass
def write_file(col_names, csv_data, path, file_i, new, f, file_mode, add_header, CSV_SPLIT, CSV_FIELD_CLOSE,
encoding):
add = False
if file_mode == 'gz':
file_name = f"{str(file_i).rjust(6, '0')}.gz"
elif file_mode == 'csv':
file_name = f"{str(file_i).rjust(6, '0')}.csv"
else:
file_name = f"{str(file_i).rjust(6, '0')}.txt"
if len(csv_data) == 0:
return file_i, None, None
if new:
if f is not None:
f.flush()
f.close()
file_i += 1
add = True
if file_mode == 'gz':
f = gzip.open(f"{path}/{file_name}", 'wb+')
else:
f = open(f"{path}/{file_name}", 'wb+')
__run_rows(rows=csv_data,
header=col_names,
file=f,
add_header=add and add_header,
CSV_SPLIT=CSV_SPLIT,
CSV_FIELD_CLOSE=CSV_FIELD_CLOSE,
encoding=encoding
)
return file_i, file_name, f
def __run_rows(rows, header, add_header=False, file=None, CSV_FIELD_CLOSE='"', CSV_SPLIT=",", encoding="utf-8"):
for r in rows:
fields = r
field_map = {}
for i in range(0, len(header)):
c = header[i]
v = fields[i]
if isinstance(v, datetime.datetime) or isinstance(v, datetime.date):
if v is None:
field_map[c] = ""
else:
field_map[c] = date_to_str(v)
elif isinstance(v, int):
if v is None:
field_map[c] = '0'
else:
field_map[c] = str(v)
elif isinstance(v, decimal.Decimal):
if v is None:
field_map[c] = '0'
else:
field_map[c] = str(round(v, 4))
elif isinstance(v, float):
if v is None:
v = '0'
field_map[c] = str(v)
elif isinstance(v, str):
# 空处理
if v is None:
field_map[c] = ""
else:
# 回车换行处理
field_map[c] = str(v).replace("\n", "").replace(CSV_FIELD_CLOSE, ""). \
replace(CSV_SPLIT, "")
else:
if v is None:
field_map[c] = ""
else:
raise Exception(f"[{c}]--NO SUPPORT TYPE--[{type(v)}]")
if add_header:
row_str = CSV_FIELD_CLOSE + f'{CSV_FIELD_CLOSE}{CSV_SPLIT}{CSV_FIELD_CLOSE}'.join(header) + CSV_FIELD_CLOSE
line = (row_str + '\n').encode(encoding)
add_header = False
file.write(line)
row_str = CSV_FIELD_CLOSE + f'{CSV_FIELD_CLOSE}{CSV_SPLIT}{CSV_FIELD_CLOSE}'.join(
field_map.values()) + CSV_FIELD_CLOSE
line = (row_str + '\n').encode(encoding)
file.write(line) | 0.14885 | 0.091342 |
import math
import pygame
from pygame.locals import *
DRAW_LETTERS = [chr(i) for i in range(65, 71)]
FORWARD_LETTERS = [chr(i) for i in range(71, 77)]
NON_CONSTRUCTION_LETTERS = DRAW_LETTERS + FORWARD_LETTERS
class LSystem:
'''
* A class for generating L-Systems from
* a given set of rule definitions.
'''
__mRules = {} # Dictionary holding all conversion rules
__mAxiom = '' # Initial axiom
__mCompleteLSys = '' # The L-System after calling lsys.GenerateLSystem(recurs)
__mLineCoordinates = [] # The list of line coordinates after calling lsys.GenerateLines(origin, angle, lineLength)
__mLineLength = 0 # The length of the lines when drawn.
__mStartAngle = 0 # The initial angle to draw with.
def __init__(self):
pass
def SetAxiom(self, axiom):
'''
* Set the axiom for the L-System.
'''
self.__mAxiom = axiom
self.__mCompleteLSys = axiom
def NewRule(self, rule):
'''
* Takes a string of the format "X=X+Y-Z-Y+X".
*
* Defines a new rule.
'''
self.__mRules[rule[0]] = rule[2:]
def GetLSystem(self):
'''
* Returns the completed LSystem.
'''
return self.__mCompleteLSys
def GenerateLSystem(self, recursions):
'''
* Produces L-System using axiom supplied at instantiation
* with 'recursions' number of recursions.
'''
self.__mCompleteLSys = self.__genLSys(self.__mAxiom, recursions)
def __genLSys(self, axiom, recursions):
'''
* Private method for generating L-System.
* Keeps self.__mAxiom intact.
'''
if recursions == 0:
return axiom
else:
newAxiom = []
for i in range(len(axiom)):
if axiom[i] in self.__mRules and axiom[i] not in ['-', '+']:
newAxiom.append(self.__mRules[axiom[i]])
else:
newAxiom.append(axiom[i])
return self.__genLSys(''.join(newAxiom), (recursions - 1))
def GetLines(self):
'''
* Returns list of line coordinates.
'''
return self.__mLineCoordinates
def GenerateLines(self, startPoint, angle, lineLength):
'''
* Takes the origin of the draw, the initial angle to start
* drawing at, and the length of each line.
*
* Populates self.__mLineCoordinates with the coordinates of
* all lines required to draw the L-System.
'''
self.__mLineLength = lineLength
self.__mStartAngle = angle
self.__mLineCoordinates = []
stack = []
for ch in self.__mCompleteLSys:
if ch in self.__mRules:
if ch in DRAW_LETTERS:
newLine = [startPoint]
startPoint = self.__findNextPoint(startPoint, angle, lineLength)
newLine.append(startPoint)
self.__mLineCoordinates.append(newLine)
elif ch in FORWARD_LETTERS:
startPoint = self.__findNextPoint(startPoint, angle, lineLength)
elif ch == '+':
angle = self.__turnLeft(angle)
elif ch == '-':
angle = self.__turnRight(angle)
elif ch == '[':
stack.append([startPoint, angle])
elif ch == ']':
startPoint, angle = stack.pop()
def __turnLeft(self, angle):
'''
* Private drawing method.
* Increases draw angle by amount specified
* when '+' rule was created.
'''
angle += math.radians(int(self.__mRules['+']))
angle %= (2 * math.pi)
return angle
def __turnRight(self, angle):
'''
* Private drawing method.
* Decreases draw angle by amount specified
* when '-' rule was created.
'''
angle -= math.radians(int(self.__mRules['-']))
angle %= (2 * math.pi)
return angle
def __findNextPoint(self, origin, angle, lineLength):
'''
* Private drawing method.
* Returns the end point of a line.
'''
return (origin[0] + lineLength * math.cos(angle), origin[1] + lineLength * math.sin(angle))
def PygameDraw(self, windowSize, color):
'''
* Takes the size of the Pygame window,
* and the colour to use for the lines.
* Draws the LSystem using Pygame.
'''
pygame.init()
windowSurfaceObj = pygame.display.set_mode(windowSize)
running = True
startPoint = self.__mLineCoordinates[0][0]
redraw = False
while running:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
keys = pygame.key.get_pressed()
if keys[K_LEFT]:
startPoint = (startPoint[0] + 5, startPoint[1])
redraw = True
if keys[K_RIGHT]:
startPoint = (startPoint[0] - 5, startPoint[1])
redraw = True
if keys[K_UP]:
startPoint = (startPoint[0], startPoint[1] + 5)
redraw = True
if keys[K_DOWN]:
startPoint = (startPoint[0], startPoint[1] - 5)
redraw = True
if keys[K_x]:
self.__mLineLength += 0.2
redraw = True
if keys[K_z]:
self.__mLineLength -= 0.2
redraw = True
if redraw:
self.GenerateLines(startPoint, self.__mStartAngle, self.__mLineLength)
windowSurfaceObj.fill((255,255,255))
self.__drawLSystem(windowSurfaceObj, color)
pygame.display.flip()
def __drawLSystem(self, surface, color):
'''
* Private drawing method.
* Draws each line segment produced
* after calling obj.GenerateLines(origin, angle, lineLength)
'''
for line in self.__mLineCoordinates:
pygame.draw.line(surface, color, line[0], line[1], 1)
@staticmethod
def UnitTest():
'''
* Unit test for LSystem class.
'''
ls = LSystem()
ls.NewRule("F=F-F++F-F")
ls.NewRule("+=60")
ls.NewRule("-=60")
ls.SetAxiom("-F++F++F")
ls.GenerateLSystem(5)
ls.GenerateLines((320,480), 0, 4)
ls.PygameDraw((1024,768),(255,0,0))
if __name__ == '__main__':
LSystem.UnitTest()
''' Generate plant
myLSys = LSystem()
myLSys.NewRule("X=F-[[X]+X]+F[+FX]-X")
myLSys.NewRule("F=FF")
myLSys.NewRule("+=25")
myLSys.NewRule("-=25")
myLSys.SetAxiom("X")
myLSys.GenerateLSystem(5)
myLSys.GenerateLines((512, 768), 270 * (math.pi/180), 5)
myLSys.PygameDraw((1024, 768), (255, 0, 0))
'''
''' Generate Quadratic Koch Island
myLSys = LSystem()
myLSys.NewRule("F=F+L-FF+F+FF+FL+FF-L+FF-F-FF-FL-FFF")
myLSys.NewRule("L=LLLLLL")
myLSys.NewRule("+=90")
myLSys.NewRule("-=90")
myLSys.SetAxiom("F+F+F+F")
myLSys.GenerateLSystem(3)
myLSys.GenerateLines((320, 480), 3 * math.pi / 2, 4)
myLSys.PygameDraw((1024, 768), (255, 0, 0))
'''
''' Generate Koch Snowflake
ls = LSystem()
ls.NewRule("F=F-F++F-F")
ls.NewRule("+=60")
ls.NewRule("-=60")
ls.SetAxiom("-F++F++F")
ls.GenerateLSystem(5)
ls.GenerateLines((320,480), 0, 4)
ls.PygameDraw((1024,768),(255,0,0))
''' | LSystem.py | import math
import pygame
from pygame.locals import *
DRAW_LETTERS = [chr(i) for i in range(65, 71)]
FORWARD_LETTERS = [chr(i) for i in range(71, 77)]
NON_CONSTRUCTION_LETTERS = DRAW_LETTERS + FORWARD_LETTERS
class LSystem:
'''
* A class for generating L-Systems from
* a given set of rule definitions.
'''
__mRules = {} # Dictionary holding all conversion rules
__mAxiom = '' # Initial axiom
__mCompleteLSys = '' # The L-System after calling lsys.GenerateLSystem(recurs)
__mLineCoordinates = [] # The list of line coordinates after calling lsys.GenerateLines(origin, angle, lineLength)
__mLineLength = 0 # The length of the lines when drawn.
__mStartAngle = 0 # The initial angle to draw with.
def __init__(self):
pass
def SetAxiom(self, axiom):
'''
* Set the axiom for the L-System.
'''
self.__mAxiom = axiom
self.__mCompleteLSys = axiom
def NewRule(self, rule):
'''
* Takes a string of the format "X=X+Y-Z-Y+X".
*
* Defines a new rule.
'''
self.__mRules[rule[0]] = rule[2:]
def GetLSystem(self):
'''
* Returns the completed LSystem.
'''
return self.__mCompleteLSys
def GenerateLSystem(self, recursions):
'''
* Produces L-System using axiom supplied at instantiation
* with 'recursions' number of recursions.
'''
self.__mCompleteLSys = self.__genLSys(self.__mAxiom, recursions)
def __genLSys(self, axiom, recursions):
'''
* Private method for generating L-System.
* Keeps self.__mAxiom intact.
'''
if recursions == 0:
return axiom
else:
newAxiom = []
for i in range(len(axiom)):
if axiom[i] in self.__mRules and axiom[i] not in ['-', '+']:
newAxiom.append(self.__mRules[axiom[i]])
else:
newAxiom.append(axiom[i])
return self.__genLSys(''.join(newAxiom), (recursions - 1))
def GetLines(self):
'''
* Returns list of line coordinates.
'''
return self.__mLineCoordinates
def GenerateLines(self, startPoint, angle, lineLength):
'''
* Takes the origin of the draw, the initial angle to start
* drawing at, and the length of each line.
*
* Populates self.__mLineCoordinates with the coordinates of
* all lines required to draw the L-System.
'''
self.__mLineLength = lineLength
self.__mStartAngle = angle
self.__mLineCoordinates = []
stack = []
for ch in self.__mCompleteLSys:
if ch in self.__mRules:
if ch in DRAW_LETTERS:
newLine = [startPoint]
startPoint = self.__findNextPoint(startPoint, angle, lineLength)
newLine.append(startPoint)
self.__mLineCoordinates.append(newLine)
elif ch in FORWARD_LETTERS:
startPoint = self.__findNextPoint(startPoint, angle, lineLength)
elif ch == '+':
angle = self.__turnLeft(angle)
elif ch == '-':
angle = self.__turnRight(angle)
elif ch == '[':
stack.append([startPoint, angle])
elif ch == ']':
startPoint, angle = stack.pop()
def __turnLeft(self, angle):
'''
* Private drawing method.
* Increases draw angle by amount specified
* when '+' rule was created.
'''
angle += math.radians(int(self.__mRules['+']))
angle %= (2 * math.pi)
return angle
def __turnRight(self, angle):
'''
* Private drawing method.
* Decreases draw angle by amount specified
* when '-' rule was created.
'''
angle -= math.radians(int(self.__mRules['-']))
angle %= (2 * math.pi)
return angle
def __findNextPoint(self, origin, angle, lineLength):
'''
* Private drawing method.
* Returns the end point of a line.
'''
return (origin[0] + lineLength * math.cos(angle), origin[1] + lineLength * math.sin(angle))
def PygameDraw(self, windowSize, color):
'''
* Takes the size of the Pygame window,
* and the colour to use for the lines.
* Draws the LSystem using Pygame.
'''
pygame.init()
windowSurfaceObj = pygame.display.set_mode(windowSize)
running = True
startPoint = self.__mLineCoordinates[0][0]
redraw = False
while running:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
keys = pygame.key.get_pressed()
if keys[K_LEFT]:
startPoint = (startPoint[0] + 5, startPoint[1])
redraw = True
if keys[K_RIGHT]:
startPoint = (startPoint[0] - 5, startPoint[1])
redraw = True
if keys[K_UP]:
startPoint = (startPoint[0], startPoint[1] + 5)
redraw = True
if keys[K_DOWN]:
startPoint = (startPoint[0], startPoint[1] - 5)
redraw = True
if keys[K_x]:
self.__mLineLength += 0.2
redraw = True
if keys[K_z]:
self.__mLineLength -= 0.2
redraw = True
if redraw:
self.GenerateLines(startPoint, self.__mStartAngle, self.__mLineLength)
windowSurfaceObj.fill((255,255,255))
self.__drawLSystem(windowSurfaceObj, color)
pygame.display.flip()
def __drawLSystem(self, surface, color):
'''
* Private drawing method.
* Draws each line segment produced
* after calling obj.GenerateLines(origin, angle, lineLength)
'''
for line in self.__mLineCoordinates:
pygame.draw.line(surface, color, line[0], line[1], 1)
@staticmethod
def UnitTest():
'''
* Unit test for LSystem class.
'''
ls = LSystem()
ls.NewRule("F=F-F++F-F")
ls.NewRule("+=60")
ls.NewRule("-=60")
ls.SetAxiom("-F++F++F")
ls.GenerateLSystem(5)
ls.GenerateLines((320,480), 0, 4)
ls.PygameDraw((1024,768),(255,0,0))
if __name__ == '__main__':
LSystem.UnitTest()
''' Generate plant
myLSys = LSystem()
myLSys.NewRule("X=F-[[X]+X]+F[+FX]-X")
myLSys.NewRule("F=FF")
myLSys.NewRule("+=25")
myLSys.NewRule("-=25")
myLSys.SetAxiom("X")
myLSys.GenerateLSystem(5)
myLSys.GenerateLines((512, 768), 270 * (math.pi/180), 5)
myLSys.PygameDraw((1024, 768), (255, 0, 0))
'''
''' Generate Quadratic Koch Island
myLSys = LSystem()
myLSys.NewRule("F=F+L-FF+F+FF+FL+FF-L+FF-F-FF-FL-FFF")
myLSys.NewRule("L=LLLLLL")
myLSys.NewRule("+=90")
myLSys.NewRule("-=90")
myLSys.SetAxiom("F+F+F+F")
myLSys.GenerateLSystem(3)
myLSys.GenerateLines((320, 480), 3 * math.pi / 2, 4)
myLSys.PygameDraw((1024, 768), (255, 0, 0))
'''
''' Generate Koch Snowflake
ls = LSystem()
ls.NewRule("F=F-F++F-F")
ls.NewRule("+=60")
ls.NewRule("-=60")
ls.SetAxiom("-F++F++F")
ls.GenerateLSystem(5)
ls.GenerateLines((320,480), 0, 4)
ls.PygameDraw((1024,768),(255,0,0))
''' | 0.425128 | 0.197232 |
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow.contrib.keras as kr
from cnn_model import TCNNConfig, TextCNN
from data.cnews_loader import read_category, read_vocab
from rnn_model import TRNNConfig,TextRNN
try:
bool(type(unicode))
except NameError:
unicode = str
base_dir = 'data/cnews'
vocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')
save_dir = 'checkpoints/textcnn'
save_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径
save_dir1 = 'checkpoints/textrnn'
save_path1 = os.path.join(save_dir1, 'best_validation') # 最佳验证结果保存路径
g1 = tf.Graph() # 加载到Session 1的graph
g2 = tf.Graph() # 加载到Session 2的graph
sess1 = tf.Session(graph=g1) # Session1
sess2 = tf.Session(graph=g2) # Session2
class CnnModel:
def __init__(self):
self.config = TCNNConfig()
self.categories, self.cat_to_id = read_category()
self.words, self.word_to_id = read_vocab(vocab_dir)
self.config.vocab_size = len(self.words)
self.model = TextCNN(self.config)
self.session = sess1
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=self.session, save_path=save_path) # 读取保存的模型
def predict(self, message):
# 支持不论在python2还是python3下训练的模型都可以在2或者3的环境下运行
content = unicode(message)
data = [self.word_to_id[x] for x in content if x in self.word_to_id]
feed_dict = {
self.model.input_x: kr.preprocessing.sequence.pad_sequences([data], self.config.seq_length),
self.model.keep_prob: 1.0
}
y_pred_cls = self.session.run(self.model.y_pred_cls, feed_dict=feed_dict)
return self.categories[y_pred_cls[0]]
class RnnModel:
def __init__(self):
self.config = TRNNConfig()
self.categories, self.cat_to_id = read_category()
self.words, self.word_to_id = read_vocab(vocab_dir)
self.config.vocab_size = len(self.words)
self.model = TextRNN(self.config)
self.session = sess2
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=self.session, save_path=save_path1) # 读取保存的模型
def predict(self, message):
# 支持不论在python2还是python3下训练的模型都可以在2或者3的环境下运行
content = unicode(message)
data = [self.word_to_id[x] for x in content if x in self.word_to_id]
feed_dict = {
self.model.input_x: kr.preprocessing.sequence.pad_sequences([data], self.config.seq_length),
self.model.keep_prob: 1.0
}
y_pred_cls = self.session.run(self.model.y_pred_cls, feed_dict=feed_dict)
pred_matrix = self.session.run(self.model.pred_matrix, feed_dict=feed_dict)
return self.categories[y_pred_cls[0]],pred_matrix
if __name__ == '__main__':
with sess1.as_default():
with g1.as_default():
cnn_model = CnnModel()
test_demo = ['本院经审查认为,因合同纠纷提起的诉讼,由被告住所地或者合同履行地人民法院管辖。经查,被告马静、徐建、夏慈训、折云峰、任重兮、程炳来的住所地为均不在天津市河西区。根据《最高人民法院关于审理民间借贷案件适用法律若干问题的规定》,借贷双方就合同履行地未约定或者约定不明确,事后未达成补充协议,按照合同有关条款或者交易习惯仍不能确定的,以接受货币一方所在地为合同履行地。根据法律规定,公民的经常居住地是指公民离开住所地至起诉时已连续居住一年以上的地方,但公民住院就医的地方除外。而原告的户籍系2018年11月8日由天津市河东区沙柳北路冠云东里5号楼4门603号迁来,原告作为接受货币一方,其在天津市河西区居住不满一年,故天津市河西区不应认为是其经常居住地。故本院对该案没有管辖权,应移送至有管辖权的天津市静海区人民法院审理。依照《中华人民共和国民事诉讼法》第二十四条、第三十六条,《最高人民法院关于适用<中华人民共和国民事诉讼法>的解释》第四条、第二十一条,《最高人民法院关于审理民间借贷案件适用法律若干问题的规定》第三条之规定,裁定如下:',
'本院认为,涉案房屋的水、电系供水、供电单位提供,故只有供水、供电单位有权按照国家规定中止供水、供电。现被告通过代售水、电的便利条件,以停售涉案房屋水、电的方式,达到其收取涉案房屋物业费的目的,既不符合法律规定,也侵害了原告正常使用涉案房屋水、电的权利,存在过错。故此,被告应停止侵害,使得原告能够正常购买水、电,从而保证涉案房屋正常的用水、用电。关于涉案房屋欠付的物业服务费,被告应通过合法途径依法主张权利。综上所述,原告要求被告开通其所有房产的水、电使用的诉讼请求,本院判定被告向原告正常出售水、电,以保证涉案房屋正常用水、用电。依照《中华人民共和国侵权责任法》第六条、第十五条第一款第(一)项规定,判决如下:']
for i in test_demo:
print(cnn_model.predict(i))
with sess2.as_default(): # 1
with g2.as_default():
rnn_model = RnnModel()
for i in test_demo:
print(rnn_model.predict(i)) | predict.py |
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow.contrib.keras as kr
from cnn_model import TCNNConfig, TextCNN
from data.cnews_loader import read_category, read_vocab
from rnn_model import TRNNConfig,TextRNN
try:
bool(type(unicode))
except NameError:
unicode = str
base_dir = 'data/cnews'
vocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')
save_dir = 'checkpoints/textcnn'
save_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径
save_dir1 = 'checkpoints/textrnn'
save_path1 = os.path.join(save_dir1, 'best_validation') # 最佳验证结果保存路径
g1 = tf.Graph() # 加载到Session 1的graph
g2 = tf.Graph() # 加载到Session 2的graph
sess1 = tf.Session(graph=g1) # Session1
sess2 = tf.Session(graph=g2) # Session2
class CnnModel:
def __init__(self):
self.config = TCNNConfig()
self.categories, self.cat_to_id = read_category()
self.words, self.word_to_id = read_vocab(vocab_dir)
self.config.vocab_size = len(self.words)
self.model = TextCNN(self.config)
self.session = sess1
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=self.session, save_path=save_path) # 读取保存的模型
def predict(self, message):
# 支持不论在python2还是python3下训练的模型都可以在2或者3的环境下运行
content = unicode(message)
data = [self.word_to_id[x] for x in content if x in self.word_to_id]
feed_dict = {
self.model.input_x: kr.preprocessing.sequence.pad_sequences([data], self.config.seq_length),
self.model.keep_prob: 1.0
}
y_pred_cls = self.session.run(self.model.y_pred_cls, feed_dict=feed_dict)
return self.categories[y_pred_cls[0]]
class RnnModel:
def __init__(self):
self.config = TRNNConfig()
self.categories, self.cat_to_id = read_category()
self.words, self.word_to_id = read_vocab(vocab_dir)
self.config.vocab_size = len(self.words)
self.model = TextRNN(self.config)
self.session = sess2
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=self.session, save_path=save_path1) # 读取保存的模型
def predict(self, message):
# 支持不论在python2还是python3下训练的模型都可以在2或者3的环境下运行
content = unicode(message)
data = [self.word_to_id[x] for x in content if x in self.word_to_id]
feed_dict = {
self.model.input_x: kr.preprocessing.sequence.pad_sequences([data], self.config.seq_length),
self.model.keep_prob: 1.0
}
y_pred_cls = self.session.run(self.model.y_pred_cls, feed_dict=feed_dict)
pred_matrix = self.session.run(self.model.pred_matrix, feed_dict=feed_dict)
return self.categories[y_pred_cls[0]],pred_matrix
if __name__ == '__main__':
with sess1.as_default():
with g1.as_default():
cnn_model = CnnModel()
test_demo = ['本院经审查认为,因合同纠纷提起的诉讼,由被告住所地或者合同履行地人民法院管辖。经查,被告马静、徐建、夏慈训、折云峰、任重兮、程炳来的住所地为均不在天津市河西区。根据《最高人民法院关于审理民间借贷案件适用法律若干问题的规定》,借贷双方就合同履行地未约定或者约定不明确,事后未达成补充协议,按照合同有关条款或者交易习惯仍不能确定的,以接受货币一方所在地为合同履行地。根据法律规定,公民的经常居住地是指公民离开住所地至起诉时已连续居住一年以上的地方,但公民住院就医的地方除外。而原告的户籍系2018年11月8日由天津市河东区沙柳北路冠云东里5号楼4门603号迁来,原告作为接受货币一方,其在天津市河西区居住不满一年,故天津市河西区不应认为是其经常居住地。故本院对该案没有管辖权,应移送至有管辖权的天津市静海区人民法院审理。依照《中华人民共和国民事诉讼法》第二十四条、第三十六条,《最高人民法院关于适用<中华人民共和国民事诉讼法>的解释》第四条、第二十一条,《最高人民法院关于审理民间借贷案件适用法律若干问题的规定》第三条之规定,裁定如下:',
'本院认为,涉案房屋的水、电系供水、供电单位提供,故只有供水、供电单位有权按照国家规定中止供水、供电。现被告通过代售水、电的便利条件,以停售涉案房屋水、电的方式,达到其收取涉案房屋物业费的目的,既不符合法律规定,也侵害了原告正常使用涉案房屋水、电的权利,存在过错。故此,被告应停止侵害,使得原告能够正常购买水、电,从而保证涉案房屋正常的用水、用电。关于涉案房屋欠付的物业服务费,被告应通过合法途径依法主张权利。综上所述,原告要求被告开通其所有房产的水、电使用的诉讼请求,本院判定被告向原告正常出售水、电,以保证涉案房屋正常用水、用电。依照《中华人民共和国侵权责任法》第六条、第十五条第一款第(一)项规定,判决如下:']
for i in test_demo:
print(cnn_model.predict(i))
with sess2.as_default(): # 1
with g2.as_default():
rnn_model = RnnModel()
for i in test_demo:
print(rnn_model.predict(i)) | 0.376394 | 0.112673 |
import logging
import threading
import serial
import serial.tools.list_ports
import fiber_reading
from collections import deque
def select_device():
"""User-provided serial device selector.
Args:
None
Returns:
The selected serial device as ListPortInfo.
"""
while True:
print('Pick the serial device:')
ports = serial.tools.list_ports.comports()
for i, port in enumerate(ports):
print('{}: {}'.format(i, port))
try:
chosen_port = ports[int(input())]
print('Selected {}'.format(chosen_port))
return chosen_port
except IndexError:
print('Invalid device!')
continue
class SerialDataSource(object):
"""A datasource that reads from a bound serial port interface."""
def __init__(self, device):
self.q = deque()
self.ser = serial.Serial(device, 115200)
self.running = False
self.t = None
def start(self):
"""Starts the packet_service."""
if self.running:
return
self.running = True
self.t = threading.Thread(target=self.packet_service)
self.t.start()
def stop(self):
self.running = False
self.t.join()
self.t = None
def get_packet(self):
if self.q:
return self.q.popleft()
def packet_service(self):
# Discard the first packet
self.ser.readline().decode('ascii')
while True:
line = ''
try:
line = self.ser.readline().decode('ascii')
except Exception:
continue
if not line:
continue
ints = line.split(',')
l = len(ints)
if l < 3:
print(line)
continue
axis_char = int(ints[0])
axis = fiber_reading.Axis.UNKNOWN
if (axis_char == 0):
axis = fiber_reading.Axis.X_AXIS
elif (axis_char == 1):
axis = fiber_reading.Axis.Y_AXIS
index = int(ints[1])
callib = int(ints[2])
reading = fiber_reading.FiberReading(axis, index, callib)
for i in range(3, l):
reading.AddData(int(ints[i]))
self.q.append(reading) | ui/serial_reader.py |
import logging
import threading
import serial
import serial.tools.list_ports
import fiber_reading
from collections import deque
def select_device():
"""User-provided serial device selector.
Args:
None
Returns:
The selected serial device as ListPortInfo.
"""
while True:
print('Pick the serial device:')
ports = serial.tools.list_ports.comports()
for i, port in enumerate(ports):
print('{}: {}'.format(i, port))
try:
chosen_port = ports[int(input())]
print('Selected {}'.format(chosen_port))
return chosen_port
except IndexError:
print('Invalid device!')
continue
class SerialDataSource(object):
"""A datasource that reads from a bound serial port interface."""
def __init__(self, device):
self.q = deque()
self.ser = serial.Serial(device, 115200)
self.running = False
self.t = None
def start(self):
"""Starts the packet_service."""
if self.running:
return
self.running = True
self.t = threading.Thread(target=self.packet_service)
self.t.start()
def stop(self):
self.running = False
self.t.join()
self.t = None
def get_packet(self):
if self.q:
return self.q.popleft()
def packet_service(self):
# Discard the first packet
self.ser.readline().decode('ascii')
while True:
line = ''
try:
line = self.ser.readline().decode('ascii')
except Exception:
continue
if not line:
continue
ints = line.split(',')
l = len(ints)
if l < 3:
print(line)
continue
axis_char = int(ints[0])
axis = fiber_reading.Axis.UNKNOWN
if (axis_char == 0):
axis = fiber_reading.Axis.X_AXIS
elif (axis_char == 1):
axis = fiber_reading.Axis.Y_AXIS
index = int(ints[1])
callib = int(ints[2])
reading = fiber_reading.FiberReading(axis, index, callib)
for i in range(3, l):
reading.AddData(int(ints[i]))
self.q.append(reading) | 0.58676 | 0.131145 |
# This could be quite useful...
from random import randrange
dice = []
running = True
class Die:
def __init__(self, number_of_sides, myname):
self.__number_of_sides = number_of_sides
self.myname = myname
def throw_dice(self):
return str(randrange(1, self.__number_of_sides))
def throw_all_dice():
for die in dice:
print(die.myname, ": ", die.throw_dice(), sep="")
while running is True:
print("")
player_input = str(input("Enter a command: "))
if player_input == "c" or "create" in player_input or "Create" in player_input:
die_nsides = int(input("Enter number of sides of the new die: "))
die_newname = str(input("Enter a name for the die: "))
dice.append(Die(die_nsides, die_newname))
print("Die created")
elif player_input == "otd" or player_input == "t" or player_input == "throw" or player_input == "Throw":
player_input = int(input("How many sides should the die have? "))
print("Threw a ", randrange(1, player_input), "!", sep="")
elif player_input == "h" or "help" in player_input or "Help" in player_input or "HELP" in player_input:
print("Available commands:")
print("create, c - Creates a die")
print("delete, d - Deletes a die")
print("throw, t, otd - Throws a \"one-time-die\" (OTD)")
print("throw_all, throw all, ta - Throws all the dice.")
print("help, h - Prints this help")
print("exit, e - Exits the program")
elif player_input == "ta" or player_input == "throw_all" or player_input == "throw all" or player_input == "Throw All" or player_input == "THROW ALL" or player_input == "Throw all":
throw_all_dice()
elif player_input == "e" or "exit" in player_input or "Exit" in player_input or "EXIT" in player_input:
running = False
elif player_input == "d" or "Delete" in player_input or "delete" in player_input:
print("What die would you like to destroy?")
player_input = int(input("Enter its number here: "))
counter = 0
for die in dice:
counter += 1
print(counter, ".", die.myname, sep="")
dice.pop(player_input - 1)
print("Destroyed die number ", player_input, sep="")
else:
print("Wrong command!") | school_stuff/useless_programs/python/dice_manager/dice.py |
# This could be quite useful...
from random import randrange
dice = []
running = True
class Die:
def __init__(self, number_of_sides, myname):
self.__number_of_sides = number_of_sides
self.myname = myname
def throw_dice(self):
return str(randrange(1, self.__number_of_sides))
def throw_all_dice():
for die in dice:
print(die.myname, ": ", die.throw_dice(), sep="")
while running is True:
print("")
player_input = str(input("Enter a command: "))
if player_input == "c" or "create" in player_input or "Create" in player_input:
die_nsides = int(input("Enter number of sides of the new die: "))
die_newname = str(input("Enter a name for the die: "))
dice.append(Die(die_nsides, die_newname))
print("Die created")
elif player_input == "otd" or player_input == "t" or player_input == "throw" or player_input == "Throw":
player_input = int(input("How many sides should the die have? "))
print("Threw a ", randrange(1, player_input), "!", sep="")
elif player_input == "h" or "help" in player_input or "Help" in player_input or "HELP" in player_input:
print("Available commands:")
print("create, c - Creates a die")
print("delete, d - Deletes a die")
print("throw, t, otd - Throws a \"one-time-die\" (OTD)")
print("throw_all, throw all, ta - Throws all the dice.")
print("help, h - Prints this help")
print("exit, e - Exits the program")
elif player_input == "ta" or player_input == "throw_all" or player_input == "throw all" or player_input == "Throw All" or player_input == "THROW ALL" or player_input == "Throw all":
throw_all_dice()
elif player_input == "e" or "exit" in player_input or "Exit" in player_input or "EXIT" in player_input:
running = False
elif player_input == "d" or "Delete" in player_input or "delete" in player_input:
print("What die would you like to destroy?")
player_input = int(input("Enter its number here: "))
counter = 0
for die in dice:
counter += 1
print(counter, ".", die.myname, sep="")
dice.pop(player_input - 1)
print("Destroyed die number ", player_input, sep="")
else:
print("Wrong command!") | 0.330039 | 0.191026 |
from lammps_data.bonds import extrapolate_periodic_bonds
from lammps_data.bonds import extrapolate_bonds
def test_zero_cell_vectors_should_give_same_bonds_with_nonperiodic():
cell_vectors = [[0] * 3] * 3
atoms = [(0, 0, 0, 1)]
assert extrapolate_bonds(atoms) == extrapolate_periodic_bonds(atoms, cell_vectors)
def test_periodic_atoms_too_close_should_not_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.9, 1), (0.0, 0.0, 10.059, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 0
def test_periodic_atoms_at_0_16_angstrom_should_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.9, 1), (0.0, 0.0, 10.06, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_perodic_h_atoms_at_lte_1_09_angstrom_should_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.0, 1), (0.0, 0.0, 10.09, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_periodic_h_atoms_at_gt_1_09_angstrom_should_not_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.0, 1), (0.0, 0.0, 10.10, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 0
def test_periodic_si_atoms_at_lte_2_77_angstrom_should_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.0, 14), (0.0, 0.0, 11.77, 14)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_periodic_si_atoms_at_gt_2_77_angstrom_should_not_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.0, 14), (0.0, 0.0, 11.78, 14)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 0
def test_periodic_bond_tuples_should_be_sorted_by_atom_index():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.9, 1), (0.0, 0.0, 10.06, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert bonds == [(0, 1)]
atoms = [(0.0, 0.0, 10.06, 1), (0.0, 0.0, 9.9, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert bonds == [(0, 1)] | tests/test_extrapolate_periodic_bonds.py | from lammps_data.bonds import extrapolate_periodic_bonds
from lammps_data.bonds import extrapolate_bonds
def test_zero_cell_vectors_should_give_same_bonds_with_nonperiodic():
cell_vectors = [[0] * 3] * 3
atoms = [(0, 0, 0, 1)]
assert extrapolate_bonds(atoms) == extrapolate_periodic_bonds(atoms, cell_vectors)
def test_periodic_atoms_too_close_should_not_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.9, 1), (0.0, 0.0, 10.059, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 0
def test_periodic_atoms_at_0_16_angstrom_should_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.9, 1), (0.0, 0.0, 10.06, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_perodic_h_atoms_at_lte_1_09_angstrom_should_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.0, 1), (0.0, 0.0, 10.09, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_periodic_h_atoms_at_gt_1_09_angstrom_should_not_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.0, 1), (0.0, 0.0, 10.10, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 0
def test_periodic_si_atoms_at_lte_2_77_angstrom_should_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.0, 14), (0.0, 0.0, 11.77, 14)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_periodic_si_atoms_at_gt_2_77_angstrom_should_not_be_bonded():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.0, 14), (0.0, 0.0, 11.78, 14)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert len(bonds) == 0
def test_periodic_bond_tuples_should_be_sorted_by_atom_index():
cell_vectors = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]
atoms = [(0.0, 0.0, 9.9, 1), (0.0, 0.0, 10.06, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert bonds == [(0, 1)]
atoms = [(0.0, 0.0, 10.06, 1), (0.0, 0.0, 9.9, 1)]
bonds = extrapolate_periodic_bonds(atoms, cell_vectors)
assert bonds == [(0, 1)] | 0.774413 | 0.857082 |
import numpy
from crystal_util import crystal_fh2, bragg_calc2
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
import argparse
parser = argparse.ArgumentParser(description='Calculation structure factor')
#args = parser.parse_args()
parser.add_argument('-n','--name',dest='descriptor', default=['YB66'],type=str, nargs=1, help='Crystal name')
parser.add_argument('-m','--m', metavar='H K L', default=[4,0,0],type=int, nargs='+', help='Miller indic [H, K, L]')
parser.add_argument('-e','--e', dest='EngRange', metavar='emin,emax,estep', default=[8040,8050,1],type=float, nargs='+', help='[emin,emax,estep]')
args = parser.parse_args()
print(">>>>", args)
descriptor = args.descriptor[0].strip()
HMILLER = args.m[0]
KMILLER = args.m[1]
LMILLER = args.m[2]
ENERGY = args.EngRange[0]
ENERGY_END = args.EngRange[1]
estep = args.EngRange[2]
NPOINTS = int((ENERGY_END-ENERGY)/estep + 1)
print("Using crystal descriptor: ",descriptor)
bragg_dictionary = bragg_calc2(descriptor=descriptor,hh=HMILLER,kk=KMILLER,ll=LMILLER,temper=1.0,
emin=ENERGY,emax=ENERGY_END,estep=estep,fileout=None) #50eV, replaced with estep
energy = numpy.linspace(ENERGY,ENERGY_END,NPOINTS)
print("\nCrystal = %s, Miller Index = (%d,%d,%d)\n" % (descriptor,HMILLER,KMILLER,LMILLER))
for i,ienergy in enumerate(energy):
dic2 = crystal_fh2(bragg_dictionary,ienergy)
print("Energy=%g eV FH=(%g,%g)"%(ienergy,dic2["STRUCT"].real,dic2["STRUCT"].imag))
else:
emin = 8040
emax = 8050
estep = 1
bragg_dictionary = bragg_calc2(descriptor="YB66",
hh=4,
kk=0,
ll=0,
temper=1.0,
emin=emin,
emax=emax,
estep=estep, # 50eV, replaced with estep
fileout=None)
energy = numpy.linspace(emin, emax, 1 + int( (emax-emin) / (estep)))
print("\nCrystal = %s, Miller Index = (%d,%d,%d)\n" % ("YB66",4,0,0))
for i, ienergy in enumerate(energy):
dic2 = crystal_fh2(bragg_dictionary, ienergy)
print("Energy=%g eV F(0,0,0)=%s, FH=(%g,%g)" % (ienergy, repr(dic2["F_0"]), dic2["STRUCT"].real, dic2["STRUCT"].imag)) | yb66/calc_struct.py | import numpy
from crystal_util import crystal_fh2, bragg_calc2
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
import argparse
parser = argparse.ArgumentParser(description='Calculation structure factor')
#args = parser.parse_args()
parser.add_argument('-n','--name',dest='descriptor', default=['YB66'],type=str, nargs=1, help='Crystal name')
parser.add_argument('-m','--m', metavar='H K L', default=[4,0,0],type=int, nargs='+', help='Miller indic [H, K, L]')
parser.add_argument('-e','--e', dest='EngRange', metavar='emin,emax,estep', default=[8040,8050,1],type=float, nargs='+', help='[emin,emax,estep]')
args = parser.parse_args()
print(">>>>", args)
descriptor = args.descriptor[0].strip()
HMILLER = args.m[0]
KMILLER = args.m[1]
LMILLER = args.m[2]
ENERGY = args.EngRange[0]
ENERGY_END = args.EngRange[1]
estep = args.EngRange[2]
NPOINTS = int((ENERGY_END-ENERGY)/estep + 1)
print("Using crystal descriptor: ",descriptor)
bragg_dictionary = bragg_calc2(descriptor=descriptor,hh=HMILLER,kk=KMILLER,ll=LMILLER,temper=1.0,
emin=ENERGY,emax=ENERGY_END,estep=estep,fileout=None) #50eV, replaced with estep
energy = numpy.linspace(ENERGY,ENERGY_END,NPOINTS)
print("\nCrystal = %s, Miller Index = (%d,%d,%d)\n" % (descriptor,HMILLER,KMILLER,LMILLER))
for i,ienergy in enumerate(energy):
dic2 = crystal_fh2(bragg_dictionary,ienergy)
print("Energy=%g eV FH=(%g,%g)"%(ienergy,dic2["STRUCT"].real,dic2["STRUCT"].imag))
else:
emin = 8040
emax = 8050
estep = 1
bragg_dictionary = bragg_calc2(descriptor="YB66",
hh=4,
kk=0,
ll=0,
temper=1.0,
emin=emin,
emax=emax,
estep=estep, # 50eV, replaced with estep
fileout=None)
energy = numpy.linspace(emin, emax, 1 + int( (emax-emin) / (estep)))
print("\nCrystal = %s, Miller Index = (%d,%d,%d)\n" % ("YB66",4,0,0))
for i, ienergy in enumerate(energy):
dic2 = crystal_fh2(bragg_dictionary, ienergy)
print("Energy=%g eV F(0,0,0)=%s, FH=(%g,%g)" % (ienergy, repr(dic2["F_0"]), dic2["STRUCT"].real, dic2["STRUCT"].imag)) | 0.193681 | 0.121738 |
import cartography.intel.aws.ec2
import cartography.intel.aws.iam
import tests.data.aws.ec2.instances
import tests.data.aws.iam
from cartography.util import run_analysis_job
TEST_ACCOUNT_ID = '000000000000'
TEST_REGION = 'us-east-1'
TEST_UPDATE_TAG = 123456789
def test_load_ec2_instances(neo4j_session, *args):
"""
Ensure that instances actually get loaded and have their key fields
"""
data = tests.data.aws.ec2.instances.DESCRIBE_INSTANCES['Reservations']
cartography.intel.aws.ec2.instances.load_ec2_instances(
neo4j_session, data, TEST_REGION, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,
)
expected_nodes = {
(
"i-01",
"i-01",
),
(
"i-02",
"i-02",
),
(
"i-03",
"i-03",
),
(
"i-04",
"i-04",
),
}
nodes = neo4j_session.run(
"""
MATCH (i:EC2Instance) return i.id, i.instanceid
""",
)
actual_nodes = {
(
n['i.id'],
n['i.instanceid'],
)
for n in nodes
}
assert actual_nodes == expected_nodes
def test_ec2_reservations_to_instances(neo4j_session, *args):
"""
Ensure that instances are connected to their expected reservations
"""
data = tests.data.aws.ec2.instances.DESCRIBE_INSTANCES['Reservations']
cartography.intel.aws.ec2.instances.load_ec2_instances(
neo4j_session, data, TEST_REGION, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,
)
expected_nodes = {
(
"r-01",
"i-01",
),
(
"r-02",
"i-02",
),
(
"r-03",
"i-03",
),
(
"r-03",
"i-04",
),
}
nodes = neo4j_session.run(
"""
MATCH (r:EC2Reservation)<-[:MEMBER_OF_EC2_RESERVATION]-(i:EC2Instance) RETURN r.reservationid, i.id
""",
)
actual_nodes = {
(
n['r.reservationid'],
n['i.id'],
)
for n in nodes
}
assert actual_nodes == expected_nodes
def test_ec2_iaminstanceprofiles(neo4j_session):
"""
Ensure that EC2Instances are attached to the IAM Roles that they can assume due to their IAM instance profiles
"""
neo4j_session.run(
"""
MERGE (aws:AWSAccount{id: {aws_account_id}})
ON CREATE SET aws.firstseen = timestamp()
SET aws.lastupdated = {aws_update_tag}
""",
aws_account_id=TEST_ACCOUNT_ID,
aws_update_tag=TEST_UPDATE_TAG,
)
data_instances = tests.data.aws.ec2.instances.DESCRIBE_INSTANCES['Reservations']
data_iam = tests.data.aws.iam.INSTACE['Roles']
cartography.intel.aws.ec2.instances.load_ec2_instances(
neo4j_session, data_instances, TEST_REGION, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,
)
cartography.intel.aws.iam.load_roles(
neo4j_session, data_iam, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,
)
common_job_parameters = {
"UPDATE_TAG": TEST_UPDATE_TAG,
}
run_analysis_job(
'aws_ec2_iaminstanceprofile.json',
neo4j_session,
common_job_parameters,
)
expected_nodes = {
('arn:aws:iam::000000000000:role/SERVICE_NAME_2', 'i-02'),
('arn:aws:iam::000000000000:role/ANOTHER_SERVICE_NAME', 'i-03'),
('arn:aws:iam::000000000000:role/ANOTHER_SERVICE_NAME', 'i-04'),
}
nodes = neo4j_session.run(
"""
MATCH (i:EC2Instance)-[:STS_ASSUMEROLE_ALLOW]->(r:AWSRole) return r.arn, i.id
""",
)
actual_nodes = {
(
n['r.arn'],
n['i.id'],
)
for n in nodes
}
assert actual_nodes == expected_nodes | tests/integration/cartography/intel/aws/ec2/test_ec2_instances.py | import cartography.intel.aws.ec2
import cartography.intel.aws.iam
import tests.data.aws.ec2.instances
import tests.data.aws.iam
from cartography.util import run_analysis_job
TEST_ACCOUNT_ID = '000000000000'
TEST_REGION = 'us-east-1'
TEST_UPDATE_TAG = 123456789
def test_load_ec2_instances(neo4j_session, *args):
"""
Ensure that instances actually get loaded and have their key fields
"""
data = tests.data.aws.ec2.instances.DESCRIBE_INSTANCES['Reservations']
cartography.intel.aws.ec2.instances.load_ec2_instances(
neo4j_session, data, TEST_REGION, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,
)
expected_nodes = {
(
"i-01",
"i-01",
),
(
"i-02",
"i-02",
),
(
"i-03",
"i-03",
),
(
"i-04",
"i-04",
),
}
nodes = neo4j_session.run(
"""
MATCH (i:EC2Instance) return i.id, i.instanceid
""",
)
actual_nodes = {
(
n['i.id'],
n['i.instanceid'],
)
for n in nodes
}
assert actual_nodes == expected_nodes
def test_ec2_reservations_to_instances(neo4j_session, *args):
"""
Ensure that instances are connected to their expected reservations
"""
data = tests.data.aws.ec2.instances.DESCRIBE_INSTANCES['Reservations']
cartography.intel.aws.ec2.instances.load_ec2_instances(
neo4j_session, data, TEST_REGION, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,
)
expected_nodes = {
(
"r-01",
"i-01",
),
(
"r-02",
"i-02",
),
(
"r-03",
"i-03",
),
(
"r-03",
"i-04",
),
}
nodes = neo4j_session.run(
"""
MATCH (r:EC2Reservation)<-[:MEMBER_OF_EC2_RESERVATION]-(i:EC2Instance) RETURN r.reservationid, i.id
""",
)
actual_nodes = {
(
n['r.reservationid'],
n['i.id'],
)
for n in nodes
}
assert actual_nodes == expected_nodes
def test_ec2_iaminstanceprofiles(neo4j_session):
"""
Ensure that EC2Instances are attached to the IAM Roles that they can assume due to their IAM instance profiles
"""
neo4j_session.run(
"""
MERGE (aws:AWSAccount{id: {aws_account_id}})
ON CREATE SET aws.firstseen = timestamp()
SET aws.lastupdated = {aws_update_tag}
""",
aws_account_id=TEST_ACCOUNT_ID,
aws_update_tag=TEST_UPDATE_TAG,
)
data_instances = tests.data.aws.ec2.instances.DESCRIBE_INSTANCES['Reservations']
data_iam = tests.data.aws.iam.INSTACE['Roles']
cartography.intel.aws.ec2.instances.load_ec2_instances(
neo4j_session, data_instances, TEST_REGION, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,
)
cartography.intel.aws.iam.load_roles(
neo4j_session, data_iam, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,
)
common_job_parameters = {
"UPDATE_TAG": TEST_UPDATE_TAG,
}
run_analysis_job(
'aws_ec2_iaminstanceprofile.json',
neo4j_session,
common_job_parameters,
)
expected_nodes = {
('arn:aws:iam::000000000000:role/SERVICE_NAME_2', 'i-02'),
('arn:aws:iam::000000000000:role/ANOTHER_SERVICE_NAME', 'i-03'),
('arn:aws:iam::000000000000:role/ANOTHER_SERVICE_NAME', 'i-04'),
}
nodes = neo4j_session.run(
"""
MATCH (i:EC2Instance)-[:STS_ASSUMEROLE_ALLOW]->(r:AWSRole) return r.arn, i.id
""",
)
actual_nodes = {
(
n['r.arn'],
n['i.id'],
)
for n in nodes
}
assert actual_nodes == expected_nodes | 0.5083 | 0.456955 |
from socket import getfqdn
import logging
import os
from datetime import datetime
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def format_body_request(
docker_image='',
dockerim_version=None,
registry_url='',
input_data={},
param_as_envar=True,
volume_mapping={},
queue_name='celery'):
return {
'docker_image': docker_image,
'input_data': input_data,
'param_as_envar': param_as_envar,
'volume_mapping': volume_mapping,
'queue_name': queue_name,
}
class Request(object):
"""
Container class for all attributes relative to an annotation request.
An instance of this class is meant to exist only during the processing
of a request. (Hence it's name).
Also offers general helper functions in the context of the Vesta workgroup
annotators. (Can be used elsewhere also).
"""
body = None
url = None
current_progress = None
process_version = None
def __init__(self, body, task_handler, required_args=None, download=True):
"""
Constructor.
:param body: Body of request message as defined by Vesta-workgroup.
:param task_handler: Task instance of a Celery application.
:param required_args: Required argments in 'misc', expressed as a dict
where the key is the name of the arg and the
value is a description of it's use.
"""
self.body = body
self.logger = logging.getLogger(__name__)
self.logger.info("Handling task")
self.logger.debug("Body has contents %s", body)
self.host = getfqdn()
# Docker params
self.docker_image = self.body['docker_image']
self.volume_mapping = self.body['volume_mapping']
# Cloud params
self.queue_name = self.body['queue_name']
# Process params
self.input_data = self.body['input_data']
# This variable won't be needed later, it is used here to adapt the way ogc-processing apps process its
# parameters (as command line argument instead of environment variable)
self.param_as_envar = self.body['param_as_envar']
self.task_handler = task_handler
self.start_time = datetime.now().strftime(DATETIME_FORMAT)
def set_progress(self, progress):
"""
Helper function to set the progress state in the Celery Task backend.
:param progress: Progress value between 0 and 100.
:type progress: int
"""
self.logger.debug("Setting progress to value %s", progress)
if not isinstance(progress, int):
raise TypeError("Progress must be expressed as an int")
if progress < 0 or 100 < progress:
raise ValueError("Progress must be between 0 and 100")
self.current_progress = progress
if self.task_handler:
meta = {'current': progress,
'total': 100,
'worker_id_version': self.process_version,
'start_time': self.start_time,
'host': self.host,}
self.task_handler.update_state(state='PROGRESS', meta=meta)
else:
self.logger.warning("Could not set progress at back-end") | ogcservice/celery_request.py | from socket import getfqdn
import logging
import os
from datetime import datetime
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def format_body_request(
docker_image='',
dockerim_version=None,
registry_url='',
input_data={},
param_as_envar=True,
volume_mapping={},
queue_name='celery'):
return {
'docker_image': docker_image,
'input_data': input_data,
'param_as_envar': param_as_envar,
'volume_mapping': volume_mapping,
'queue_name': queue_name,
}
class Request(object):
"""
Container class for all attributes relative to an annotation request.
An instance of this class is meant to exist only during the processing
of a request. (Hence it's name).
Also offers general helper functions in the context of the Vesta workgroup
annotators. (Can be used elsewhere also).
"""
body = None
url = None
current_progress = None
process_version = None
def __init__(self, body, task_handler, required_args=None, download=True):
"""
Constructor.
:param body: Body of request message as defined by Vesta-workgroup.
:param task_handler: Task instance of a Celery application.
:param required_args: Required argments in 'misc', expressed as a dict
where the key is the name of the arg and the
value is a description of it's use.
"""
self.body = body
self.logger = logging.getLogger(__name__)
self.logger.info("Handling task")
self.logger.debug("Body has contents %s", body)
self.host = getfqdn()
# Docker params
self.docker_image = self.body['docker_image']
self.volume_mapping = self.body['volume_mapping']
# Cloud params
self.queue_name = self.body['queue_name']
# Process params
self.input_data = self.body['input_data']
# This variable won't be needed later, it is used here to adapt the way ogc-processing apps process its
# parameters (as command line argument instead of environment variable)
self.param_as_envar = self.body['param_as_envar']
self.task_handler = task_handler
self.start_time = datetime.now().strftime(DATETIME_FORMAT)
def set_progress(self, progress):
"""
Helper function to set the progress state in the Celery Task backend.
:param progress: Progress value between 0 and 100.
:type progress: int
"""
self.logger.debug("Setting progress to value %s", progress)
if not isinstance(progress, int):
raise TypeError("Progress must be expressed as an int")
if progress < 0 or 100 < progress:
raise ValueError("Progress must be between 0 and 100")
self.current_progress = progress
if self.task_handler:
meta = {'current': progress,
'total': 100,
'worker_id_version': self.process_version,
'start_time': self.start_time,
'host': self.host,}
self.task_handler.update_state(state='PROGRESS', meta=meta)
else:
self.logger.warning("Could not set progress at back-end") | 0.432183 | 0.101367 |
from typing import List
from ball.pybullet_ball import PyBulletBall
from utils.button import Button
from utils.physics import get_force_vector
class PyBulletBallController:
def __init__(self, ball: PyBulletBall):
self.pybullet_client = ball.pybullet_client
self._ball = ball
self._set_rotation_sliders = [
self.pybullet_client.addUserDebugParameter(
"Angular velocity x",
-ball.MAX_ANGULAR_VELOCITY,
ball.MAX_ANGULAR_VELOCITY,
0,
),
self.pybullet_client.addUserDebugParameter(
"Angular velocity y",
-ball.MAX_ANGULAR_VELOCITY,
ball.MAX_ANGULAR_VELOCITY,
0,
),
self.pybullet_client.addUserDebugParameter(
"Angular velocity z",
-ball.MAX_ANGULAR_VELOCITY,
ball.MAX_ANGULAR_VELOCITY,
0,
),
]
self._set_ball_height_slider = self.pybullet_client.addUserDebugParameter(
"Set initial ball height", 0, ball.MAX_HEIGHT, ball.DEFAULT_POSITION[2]
)
self._set_ball_height_button = Button(
self.pybullet_client.addUserDebugParameter(
"Drop ball with rotation", 1, 0, 0
)
)
self._set_rotation_button = Button(
self.pybullet_client.addUserDebugParameter(
"Set rotation without changing position", 1, 0, 0
)
)
self._throw_ball_button = Button(
self.pybullet_client.addUserDebugParameter(
"Throw ball towards paddle", 1, 0, 0
)
)
def check_if_drop_with_rotation(self):
if self._set_ball_height_button.was_clicked():
self._reset_ball_position(
self.pybullet_client.readUserDebugParameter(
self._set_ball_height_slider
)
)
self._ball.set_ball_angular_velocity(
[
self.pybullet_client.readUserDebugParameter(i)
for i in self._set_rotation_sliders
]
)
def check_and_update_rotation(self):
if self._set_rotation_button.was_clicked():
self._ball.set_ball_angular_velocity(
[
self.pybullet_client.readUserDebugParameter(i)
for i in self._set_rotation_sliders
]
)
def _reset_ball_position(self, height: float):
self._ball.set_position(
[self._ball.DEFAULT_POSITION[0], self._ball.DEFAULT_POSITION[1], height],
self._ball.DEFAULT_ORIENTATION,
)
def should_throw_ball(self) -> bool:
return self._throw_ball_button.was_clicked()
def throw_ball(self, position: List[float]):
assert len(position) == 3
vec = get_force_vector(self._ball.get_position(), position)
self.pybullet_client.applyExternalForce(
self._ball.id, -1, vec, [0, 0, 0], self.pybullet_client.WORLD_FRAME
) | ball/pybullet_ball_controller.py | from typing import List
from ball.pybullet_ball import PyBulletBall
from utils.button import Button
from utils.physics import get_force_vector
class PyBulletBallController:
def __init__(self, ball: PyBulletBall):
self.pybullet_client = ball.pybullet_client
self._ball = ball
self._set_rotation_sliders = [
self.pybullet_client.addUserDebugParameter(
"Angular velocity x",
-ball.MAX_ANGULAR_VELOCITY,
ball.MAX_ANGULAR_VELOCITY,
0,
),
self.pybullet_client.addUserDebugParameter(
"Angular velocity y",
-ball.MAX_ANGULAR_VELOCITY,
ball.MAX_ANGULAR_VELOCITY,
0,
),
self.pybullet_client.addUserDebugParameter(
"Angular velocity z",
-ball.MAX_ANGULAR_VELOCITY,
ball.MAX_ANGULAR_VELOCITY,
0,
),
]
self._set_ball_height_slider = self.pybullet_client.addUserDebugParameter(
"Set initial ball height", 0, ball.MAX_HEIGHT, ball.DEFAULT_POSITION[2]
)
self._set_ball_height_button = Button(
self.pybullet_client.addUserDebugParameter(
"Drop ball with rotation", 1, 0, 0
)
)
self._set_rotation_button = Button(
self.pybullet_client.addUserDebugParameter(
"Set rotation without changing position", 1, 0, 0
)
)
self._throw_ball_button = Button(
self.pybullet_client.addUserDebugParameter(
"Throw ball towards paddle", 1, 0, 0
)
)
def check_if_drop_with_rotation(self):
if self._set_ball_height_button.was_clicked():
self._reset_ball_position(
self.pybullet_client.readUserDebugParameter(
self._set_ball_height_slider
)
)
self._ball.set_ball_angular_velocity(
[
self.pybullet_client.readUserDebugParameter(i)
for i in self._set_rotation_sliders
]
)
def check_and_update_rotation(self):
if self._set_rotation_button.was_clicked():
self._ball.set_ball_angular_velocity(
[
self.pybullet_client.readUserDebugParameter(i)
for i in self._set_rotation_sliders
]
)
def _reset_ball_position(self, height: float):
self._ball.set_position(
[self._ball.DEFAULT_POSITION[0], self._ball.DEFAULT_POSITION[1], height],
self._ball.DEFAULT_ORIENTATION,
)
def should_throw_ball(self) -> bool:
return self._throw_ball_button.was_clicked()
def throw_ball(self, position: List[float]):
assert len(position) == 3
vec = get_force_vector(self._ball.get_position(), position)
self.pybullet_client.applyExternalForce(
self._ball.id, -1, vec, [0, 0, 0], self.pybullet_client.WORLD_FRAME
) | 0.90387 | 0.272595 |
import json
import tempfile
import zipfile
from contextlib import contextmanager
from utils import (
codepipeline_lambda_handler,
create_zip_file,
get_artifact_s3_client,
get_cloudformation_template,
get_input_artifact_location,
get_output_artifact_location,
get_session,
get_user_parameters,
log,
)
@codepipeline_lambda_handler
def lambda_handler(event, context):
"""
Prepares for an AMI deployment.
"""
# Get details from the event.
job = event["CodePipeline.job"]
input_bucket, input_key = get_input_artifact_location(job)
output_bucket, output_key = get_output_artifact_location(job)
user_params = get_user_parameters(job)
assume_role_arn = user_params["AssumeRoleArn"]
image_parameter_name = user_params["ImageParameterName"]
stack_name = user_params["StackName"]
template_filename = user_params["TemplateFilename"]
# Create client in the pipeline account.
pipeline_s3_client = get_artifact_s3_client(job)
# Create clients in the target account.
target_session = get_session(
role_arn=assume_role_arn, session_name="prepare-ami-deployment"
)
target_cfn_client = target_session.client("cloudformation")
target_ssm_client = target_session.client("ssm")
# Download the input artifact zip file, read manifest.json from it,
# and get the AMI it references. Also look up the associated image name.
with download_zip_file(
s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key
) as zip_file:
image_detail_string = zip_file.read("imageDetail.json").decode("utf-8")
log("IMAGE_DETAIL_STRING", image_detail_string)
image_detail = json.loads(image_detail_string)
image = image_detail["ImageURI"]
log("IMAGE", image)
# Update the SSM parameters with the image,
# to be used by the CloudFormation deployment stage of the pipeline.
target_ssm_client.put_parameter(
Name=image_parameter_name, Value=image, Type="String", Overwrite=True
)
# Write the CloudFormation stack's template to the output artifact location,
# to be used by the CloudFormation deployment stage of the pipeline.
template = get_cloudformation_template(
cfn_client=target_cfn_client, stack_name=stack_name
)
with create_zip_file({template_filename: template}) as zip_path:
pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)
@contextmanager
def download_zip_file(s3_client, bucket, key):
"""
Downloads and extracts a zip file from S3.
"""
temp_file = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as temp_file:
s3_client.download_file(bucket, key, temp_file.name)
with zipfile.ZipFile(temp_file.name, "r") as zip_file:
yield zip_file | modules/pipeline/prepare_deployment/lambda.py | import json
import tempfile
import zipfile
from contextlib import contextmanager
from utils import (
codepipeline_lambda_handler,
create_zip_file,
get_artifact_s3_client,
get_cloudformation_template,
get_input_artifact_location,
get_output_artifact_location,
get_session,
get_user_parameters,
log,
)
@codepipeline_lambda_handler
def lambda_handler(event, context):
"""
Prepares for an AMI deployment.
"""
# Get details from the event.
job = event["CodePipeline.job"]
input_bucket, input_key = get_input_artifact_location(job)
output_bucket, output_key = get_output_artifact_location(job)
user_params = get_user_parameters(job)
assume_role_arn = user_params["AssumeRoleArn"]
image_parameter_name = user_params["ImageParameterName"]
stack_name = user_params["StackName"]
template_filename = user_params["TemplateFilename"]
# Create client in the pipeline account.
pipeline_s3_client = get_artifact_s3_client(job)
# Create clients in the target account.
target_session = get_session(
role_arn=assume_role_arn, session_name="prepare-ami-deployment"
)
target_cfn_client = target_session.client("cloudformation")
target_ssm_client = target_session.client("ssm")
# Download the input artifact zip file, read manifest.json from it,
# and get the AMI it references. Also look up the associated image name.
with download_zip_file(
s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key
) as zip_file:
image_detail_string = zip_file.read("imageDetail.json").decode("utf-8")
log("IMAGE_DETAIL_STRING", image_detail_string)
image_detail = json.loads(image_detail_string)
image = image_detail["ImageURI"]
log("IMAGE", image)
# Update the SSM parameters with the image,
# to be used by the CloudFormation deployment stage of the pipeline.
target_ssm_client.put_parameter(
Name=image_parameter_name, Value=image, Type="String", Overwrite=True
)
# Write the CloudFormation stack's template to the output artifact location,
# to be used by the CloudFormation deployment stage of the pipeline.
template = get_cloudformation_template(
cfn_client=target_cfn_client, stack_name=stack_name
)
with create_zip_file({template_filename: template}) as zip_path:
pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)
@contextmanager
def download_zip_file(s3_client, bucket, key):
"""
Downloads and extracts a zip file from S3.
"""
temp_file = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as temp_file:
s3_client.download_file(bucket, key, temp_file.name)
with zipfile.ZipFile(temp_file.name, "r") as zip_file:
yield zip_file | 0.455925 | 0.083516 |
from abc import ABC, abstractmethod
from typing import Callable, Dict, List, Tuple
import numpy as np
from showml.losses.base_loss import Loss
from showml.optimizers.base_optimizer import Optimizer
from showml.utils.dataset import Dataset
from showml.utils.model_utilities import generate_minibatches, initialize_params
from showml.utils.plots import generic_metric_plot
from showml.deep_learning.activations import Sigmoid
class Regression(ABC):
"""Base Regression class.
"""
def compile(
self, optimizer: Optimizer, loss: Loss, metrics: List[Callable] = []
) -> None:
"""Compiles the model with the specified optimizer and evaluation metrics.
This method also initializes the model.history object to store metric values during training.
Args:
optimizer (Optimizer): The optimizer to be used for training (showml.optimizers).
loss (Loss): The loss function used by the model to evaluate the solution.
metrics (List[Callable], optional): A list of metrics which have to be calculated and displayed for model evaluation. Defaults to [].
"""
self.optimizer = optimizer
self.loss = loss
self.metrics = metrics
self.history: Dict[str, List[float]] = {
metric.__name__: [] for metric in self.metrics
}
@abstractmethod
def predict(self, X: np.ndarray) -> np.ndarray:
"""Computes a forward pass of the model on the given data.
Args:
X (np.ndarray): The input data to the network.
Returns:
np.ndarray: Outputs of the last layer of the network [shape: (num_samples_of_X x num_classes)]].
"""
pass
def evaluate(self, X: np.ndarray, y: np.ndarray) -> None:
"""Evaluate the model and display all the required metrics (accuracy, r^2 score, etc.).
Args:
X (np.ndarray): The input dataset.
y (np.ndarray): The true labels of the training data.
"""
z = self.predict(X)
for metric in self.metrics:
self.history[metric.__name__].append(metric(y, z))
text_to_display = ""
for metric_name in self.history:
text_to_display += f", {metric_name}: {self.history[metric_name][-1]}"
print(text_to_display)
def plot_metrics(self) -> None:
"""Display the plot after training for the specified metrics
"""
for metric in self.history:
generic_metric_plot(metric, self.history[metric])
def optimize(
self, X: np.ndarray, y: np.ndarray, z: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""This method optimizes the weights and bias of the model using the specified loss function and optimizer.
Args:
X (np.ndarray): The input data.
y (np.ndarray): The true labels/values.
z (np.ndarray): The predicted labels/values.
Returns:
np.ndarray: The Updated weights.
np.ndarray: The Updated bias value(s).
"""
dw, db = self.loss.parameter_gradient(X, y, z)
weights, bias = self.optimizer.update_weights(self.weights, self.bias, dw, db)
return weights, bias
def fit(self, dataset: Dataset, batch_size: int = 32, epochs: int = 1) -> None:
"""This method trains the model given the showml.utils.dataset.Dataset object (initialized with input data X and labels y).
Args:
dataset (Dataset): An object of the showml.utils.dataset.Dataset class - the input dataset and true labels/values of the dataset.
batch_size (int, optional): Number of samples per gradient update. Defaults to 32.
epochs (int, optional): The number of epochs for training. Defaults to 50.
"""
num_samples, num_dimensions = dataset.X.shape
self.weights, self.bias = initialize_params(num_dimensions)
for epoch in range(1, epochs + 1):
print(f"Epoch: {epoch}/{epochs}", end="")
for X_batch, y_batch in generate_minibatches(
dataset.X, dataset.y, batch_size, shuffle=True
):
# Forward pass
z = self.predict(X_batch)
# Optimize weights
self.weights, self.bias = self.optimize(X_batch, y_batch, z)
# Evaluate the model on the entire dataset
self.evaluate(dataset.X, dataset.y)
class LinearRegression(Regression):
def predict(self, X: np.ndarray) -> np.ndarray:
return np.dot(X, self.weights) + self.bias
class LogisticRegression(Regression):
def sigmoid(self, X: np.ndarray) -> np.ndarray:
"""The Sigmoid activation function.
Args:
X (np.ndarray): The input to the sigmoid function.
Returns:
np.ndarray: The output after passing the input through a sigmoid function (showml.deep_learning.activations.Sigmoid).
"""
return Sigmoid().forward(X)
def predict(self, X: np.ndarray) -> np.ndarray:
return self.sigmoid(np.dot(X, self.weights) + self.bias) | showml/linear_model/regression.py | from abc import ABC, abstractmethod
from typing import Callable, Dict, List, Tuple
import numpy as np
from showml.losses.base_loss import Loss
from showml.optimizers.base_optimizer import Optimizer
from showml.utils.dataset import Dataset
from showml.utils.model_utilities import generate_minibatches, initialize_params
from showml.utils.plots import generic_metric_plot
from showml.deep_learning.activations import Sigmoid
class Regression(ABC):
"""Base Regression class.
"""
def compile(
self, optimizer: Optimizer, loss: Loss, metrics: List[Callable] = []
) -> None:
"""Compiles the model with the specified optimizer and evaluation metrics.
This method also initializes the model.history object to store metric values during training.
Args:
optimizer (Optimizer): The optimizer to be used for training (showml.optimizers).
loss (Loss): The loss function used by the model to evaluate the solution.
metrics (List[Callable], optional): A list of metrics which have to be calculated and displayed for model evaluation. Defaults to [].
"""
self.optimizer = optimizer
self.loss = loss
self.metrics = metrics
self.history: Dict[str, List[float]] = {
metric.__name__: [] for metric in self.metrics
}
@abstractmethod
def predict(self, X: np.ndarray) -> np.ndarray:
"""Computes a forward pass of the model on the given data.
Args:
X (np.ndarray): The input data to the network.
Returns:
np.ndarray: Outputs of the last layer of the network [shape: (num_samples_of_X x num_classes)]].
"""
pass
def evaluate(self, X: np.ndarray, y: np.ndarray) -> None:
"""Evaluate the model and display all the required metrics (accuracy, r^2 score, etc.).
Args:
X (np.ndarray): The input dataset.
y (np.ndarray): The true labels of the training data.
"""
z = self.predict(X)
for metric in self.metrics:
self.history[metric.__name__].append(metric(y, z))
text_to_display = ""
for metric_name in self.history:
text_to_display += f", {metric_name}: {self.history[metric_name][-1]}"
print(text_to_display)
def plot_metrics(self) -> None:
"""Display the plot after training for the specified metrics
"""
for metric in self.history:
generic_metric_plot(metric, self.history[metric])
def optimize(
self, X: np.ndarray, y: np.ndarray, z: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""This method optimizes the weights and bias of the model using the specified loss function and optimizer.
Args:
X (np.ndarray): The input data.
y (np.ndarray): The true labels/values.
z (np.ndarray): The predicted labels/values.
Returns:
np.ndarray: The Updated weights.
np.ndarray: The Updated bias value(s).
"""
dw, db = self.loss.parameter_gradient(X, y, z)
weights, bias = self.optimizer.update_weights(self.weights, self.bias, dw, db)
return weights, bias
def fit(self, dataset: Dataset, batch_size: int = 32, epochs: int = 1) -> None:
"""This method trains the model given the showml.utils.dataset.Dataset object (initialized with input data X and labels y).
Args:
dataset (Dataset): An object of the showml.utils.dataset.Dataset class - the input dataset and true labels/values of the dataset.
batch_size (int, optional): Number of samples per gradient update. Defaults to 32.
epochs (int, optional): The number of epochs for training. Defaults to 50.
"""
num_samples, num_dimensions = dataset.X.shape
self.weights, self.bias = initialize_params(num_dimensions)
for epoch in range(1, epochs + 1):
print(f"Epoch: {epoch}/{epochs}", end="")
for X_batch, y_batch in generate_minibatches(
dataset.X, dataset.y, batch_size, shuffle=True
):
# Forward pass
z = self.predict(X_batch)
# Optimize weights
self.weights, self.bias = self.optimize(X_batch, y_batch, z)
# Evaluate the model on the entire dataset
self.evaluate(dataset.X, dataset.y)
class LinearRegression(Regression):
def predict(self, X: np.ndarray) -> np.ndarray:
return np.dot(X, self.weights) + self.bias
class LogisticRegression(Regression):
def sigmoid(self, X: np.ndarray) -> np.ndarray:
"""The Sigmoid activation function.
Args:
X (np.ndarray): The input to the sigmoid function.
Returns:
np.ndarray: The output after passing the input through a sigmoid function (showml.deep_learning.activations.Sigmoid).
"""
return Sigmoid().forward(X)
def predict(self, X: np.ndarray) -> np.ndarray:
return self.sigmoid(np.dot(X, self.weights) + self.bias) | 0.976118 | 0.751124 |
import logging
import os
# Current wrappers version. Note that this is not the same as the Engine version.
WRAPPER_VERSION = '1.5.1'
# CHANGELOG:
# 1.5.1 (06 Jan 2022):
# - Minor fix to mod9-asr-switchboard-benchmark.
# 1.5.0 (06 Jan 2022):
# - Fixed handling of gs:// URIs in which the blob name requires percent-encoded URLs.
# - Make "languageCode" optional, in contrast to Google; default is first Engine model loaded.
# - Do not accept "command_and_search" as model type.
# - The speech_mod9 module and REST API now extend support for "asrModel".
# - This will override Google-compatible "languageCode" and "model", if specified.
# - The speech_mod9 module and REST API now extend support for "maxWordAlternatives".
# - Only allow "maxAlternatives" up to 1000 transcript-level alternatives.
# - Improve determination of WAV files by checking header of HTTP(S) URI files.
# - Confirm existence of audio via URI to avoid waiting for timeout if audio does not exist.
# - The speech_mod9 module and REST API now extend support for "intervalsJson".
# - Remove "enablePhraseConfidence" option from speech_mod9 and REST API; always report biases.
# - The REST API now always reports 19-digit operation names.
# - Various minor changes to mod9-asr-switchboard-benchmark:
# - Default of 0 for --max-expansions, fully-expanded alternatives assuming bugfixed SCTK.
# - Improved parsing of Google STT-formatted results, and optimization of refiltered CTM.
# - Added --alternatives-max to allow scoring variable depth lists of alternatives.
# - Added --verbose option, false by default since the tool was otherwise too verbose.
# - When verbose, report statistics about the size, depth, and width of alternatives.
# 1.4.2 (16 Dec 2021):
# - Enable mod9-asr-websocket-client to request non-recognize commands without audio data.
# 1.4.1 (27 Nov 2021):
# - Minor bugfixes.
# 1.4.0 (23 Nov 2021):
# - Add mod9-asr-switchboard-benchmark to replicate results at rmtg.co/benchmark.
# 1.3.0 (18 Nov 2021):
# - Add mod9-asr-elasticsearch-client to demonstrate indexing of phrase alternatives.
# - Enable non-English languages to be specified with "languageCode" option.
# Unlike Google STT, a region suffix may be omitted, e.g. "en" instead of "en-US".
# - Support the "model" option, similarly to Google STT.
# 1.2.1 (11 Nov 2021):
# - Bugfix to allow WebSocket server to handle responses up to 1 MiB (instead of 64KiB).
# - This setting may be overriden with the --websocket-limit-bytes option.
# 1.2.0 (30 Aug 2021):
# - Improved logging.
# - Allow "rate" option to be in the range [8000,48000], as with Google STT.
# - Added "speed" option to speech_mod9.
# - Added "options_json" to speech_mod9.
# 1.1.1 (11 Aug 2021):
# - Rebuild correctly (after `rm -rf build/ dist/ *.egg-info`)
# 1.1.0 (11 Aug 2021):
# - Released in coordination with Engine version 1.1.0 (coincidental version match, not causal).
# - Added "latency" request option to speech_mod9.
# - REST API now logs to a file, with UUIDs both for itself and the proxied Engine.
# 1.0.0 (31 Jul 2021):
# - This version is not compatible with Engine version < 1.0.0 (due to "asr-model" option).
# - Bugfixes to WebSocket interface; also add --skip-engine-check and --allow-*-uri (for REST).
# 0.5.0 (28 May 2021): Add Websocket Interface.
# 0.4.1 (20 May 2021): Additional minor documentation fixes; Flask-RESTful version pinning.
# 0.4.0 (30 Apr 2021): Rename mod9-rest-server to mod9-asr-rest-api; minor documentation fixes.
# Range of compatible Engine versions for current wrappers.
# Lower bound is inclusive, upper bound is exclusive.
# ``None`` indicates no bound.
WRAPPER_ENGINE_COMPATIBILITY_RANGE = ('1.0.0', None) # tested at 1.2.0 as of 2021 Aug 29.
ASR_ENGINE_HOST = os.getenv('ASR_ENGINE_HOST', 'localhost')
ASR_ENGINE_PORT = int(os.getenv('ASR_ENGINE_PORT', 9900))
SOCKET_CONNECTION_TIMEOUT_SECONDS = 10.0
SOCKET_INACTIVITY_TIMEOUT_SECONDS = 120.0
ENGINE_CONNECTION_RETRY_SECONDS = 1.0
# These should be small enough so that it doesn't trigger the Engine's read timeout (10s default).
MAX_CHUNK_SIZE = 128 * 1024 # Used as chunk size for URI producers; limits generators.
GS_CHUNK_SIZE = 262144 # Google requires chunks be multiples of 262144
FLASK_ENV = os.getenv('FLASK_ENV', None)
# Audio URI prefixes to accept, used by REST only (PySDK allows all).
# Operator can set at server launch; default is allow none.
ASR_REST_API_ALLOWED_URI_SCHEMES = os.getenv('ASR_REST_API_ALLOWED_URI_SCHEMES', set())
if ASR_REST_API_ALLOWED_URI_SCHEMES:
ASR_REST_API_ALLOWED_URI_SCHEMES = ASR_REST_API_ALLOWED_URI_SCHEMES.lower().split(sep=',')
ASR_REST_API_ALLOWED_URI_SCHEMES = set(
scheme.replace('://', '') for scheme in ASR_REST_API_ALLOWED_URI_SCHEMES
)
if 'http' in ASR_REST_API_ALLOWED_URI_SCHEMES and 'https' not in ASR_REST_API_ALLOWED_URI_SCHEMES:
logging.warning('REST API set to allow http:// but NOT https:// audio URIs.')
# Limit on number of bytes allowed per reply line read by WebSocket server.
WEBSOCKET_LIMIT_BYTES = 1024 * 1024 # 1 MiB | mod9/reformat/config.py | import logging
import os
# Current wrappers version. Note that this is not the same as the Engine version.
WRAPPER_VERSION = '1.5.1'
# CHANGELOG:
# 1.5.1 (06 Jan 2022):
# - Minor fix to mod9-asr-switchboard-benchmark.
# 1.5.0 (06 Jan 2022):
# - Fixed handling of gs:// URIs in which the blob name requires percent-encoded URLs.
# - Make "languageCode" optional, in contrast to Google; default is first Engine model loaded.
# - Do not accept "command_and_search" as model type.
# - The speech_mod9 module and REST API now extend support for "asrModel".
# - This will override Google-compatible "languageCode" and "model", if specified.
# - The speech_mod9 module and REST API now extend support for "maxWordAlternatives".
# - Only allow "maxAlternatives" up to 1000 transcript-level alternatives.
# - Improve determination of WAV files by checking header of HTTP(S) URI files.
# - Confirm existence of audio via URI to avoid waiting for timeout if audio does not exist.
# - The speech_mod9 module and REST API now extend support for "intervalsJson".
# - Remove "enablePhraseConfidence" option from speech_mod9 and REST API; always report biases.
# - The REST API now always reports 19-digit operation names.
# - Various minor changes to mod9-asr-switchboard-benchmark:
# - Default of 0 for --max-expansions, fully-expanded alternatives assuming bugfixed SCTK.
# - Improved parsing of Google STT-formatted results, and optimization of refiltered CTM.
# - Added --alternatives-max to allow scoring variable depth lists of alternatives.
# - Added --verbose option, false by default since the tool was otherwise too verbose.
# - When verbose, report statistics about the size, depth, and width of alternatives.
# 1.4.2 (16 Dec 2021):
# - Enable mod9-asr-websocket-client to request non-recognize commands without audio data.
# 1.4.1 (27 Nov 2021):
# - Minor bugfixes.
# 1.4.0 (23 Nov 2021):
# - Add mod9-asr-switchboard-benchmark to replicate results at rmtg.co/benchmark.
# 1.3.0 (18 Nov 2021):
# - Add mod9-asr-elasticsearch-client to demonstrate indexing of phrase alternatives.
# - Enable non-English languages to be specified with "languageCode" option.
# Unlike Google STT, a region suffix may be omitted, e.g. "en" instead of "en-US".
# - Support the "model" option, similarly to Google STT.
# 1.2.1 (11 Nov 2021):
# - Bugfix to allow WebSocket server to handle responses up to 1 MiB (instead of 64KiB).
# - This setting may be overriden with the --websocket-limit-bytes option.
# 1.2.0 (30 Aug 2021):
# - Improved logging.
# - Allow "rate" option to be in the range [8000,48000], as with Google STT.
# - Added "speed" option to speech_mod9.
# - Added "options_json" to speech_mod9.
# 1.1.1 (11 Aug 2021):
# - Rebuild correctly (after `rm -rf build/ dist/ *.egg-info`)
# 1.1.0 (11 Aug 2021):
# - Released in coordination with Engine version 1.1.0 (coincidental version match, not causal).
# - Added "latency" request option to speech_mod9.
# - REST API now logs to a file, with UUIDs both for itself and the proxied Engine.
# 1.0.0 (31 Jul 2021):
# - This version is not compatible with Engine version < 1.0.0 (due to "asr-model" option).
# - Bugfixes to WebSocket interface; also add --skip-engine-check and --allow-*-uri (for REST).
# 0.5.0 (28 May 2021): Add Websocket Interface.
# 0.4.1 (20 May 2021): Additional minor documentation fixes; Flask-RESTful version pinning.
# 0.4.0 (30 Apr 2021): Rename mod9-rest-server to mod9-asr-rest-api; minor documentation fixes.
# Range of compatible Engine versions for current wrappers.
# Lower bound is inclusive, upper bound is exclusive.
# ``None`` indicates no bound.
WRAPPER_ENGINE_COMPATIBILITY_RANGE = ('1.0.0', None) # tested at 1.2.0 as of 2021 Aug 29.
ASR_ENGINE_HOST = os.getenv('ASR_ENGINE_HOST', 'localhost')
ASR_ENGINE_PORT = int(os.getenv('ASR_ENGINE_PORT', 9900))
SOCKET_CONNECTION_TIMEOUT_SECONDS = 10.0
SOCKET_INACTIVITY_TIMEOUT_SECONDS = 120.0
ENGINE_CONNECTION_RETRY_SECONDS = 1.0
# These should be small enough so that it doesn't trigger the Engine's read timeout (10s default).
MAX_CHUNK_SIZE = 128 * 1024 # Used as chunk size for URI producers; limits generators.
GS_CHUNK_SIZE = 262144 # Google requires chunks be multiples of 262144
FLASK_ENV = os.getenv('FLASK_ENV', None)
# Audio URI prefixes to accept, used by REST only (PySDK allows all).
# Operator can set at server launch; default is allow none.
ASR_REST_API_ALLOWED_URI_SCHEMES = os.getenv('ASR_REST_API_ALLOWED_URI_SCHEMES', set())
if ASR_REST_API_ALLOWED_URI_SCHEMES:
ASR_REST_API_ALLOWED_URI_SCHEMES = ASR_REST_API_ALLOWED_URI_SCHEMES.lower().split(sep=',')
ASR_REST_API_ALLOWED_URI_SCHEMES = set(
scheme.replace('://', '') for scheme in ASR_REST_API_ALLOWED_URI_SCHEMES
)
if 'http' in ASR_REST_API_ALLOWED_URI_SCHEMES and 'https' not in ASR_REST_API_ALLOWED_URI_SCHEMES:
logging.warning('REST API set to allow http:// but NOT https:// audio URIs.')
# Limit on number of bytes allowed per reply line read by WebSocket server.
WEBSOCKET_LIMIT_BYTES = 1024 * 1024 # 1 MiB | 0.531209 | 0.12711 |
from magicbot import AutonomousStateMachine, state, feedback, timed_state
from components.Actuators.LowLevel.driveTrain import DriveTrain
from components.Input.colorSensor import ColorSensor
from components.Actuators.LowLevel.intakeMotor import IntakeMotor
from components.Actuators.HighLevel.hopperMotor import HopperMotor
from components.Actuators.LowLevel.shooterMotors import ShooterMotors
from components.Actuators.LowLevel.turretThreshold import TurretThreshold
from components.Actuators.AutonomousControl.turretTurn import TurretTurn
from components.Actuators.HighLevel.turretCalibrate import CalibrateTurret
from components.Input.breakSensors import Sensors, State
from components.Input.navx import Navx
from components.Actuators.AutonomousControl.turnToAngle import TurnToAngle
import logging as log
from utils.DirectionEnums import Direction
class SmokeTest(AutonomousStateMachine):
compatString = ["teapot"]
MODE_NAME = "Smoke Test"
DEFAULT = False
driveTrain: DriveTrain
intakeMotor: IntakeMotor
colorSensor: ColorSensor
hopperMotor: HopperMotor
shooterMotors: ShooterMotors
turretCalibrate: CalibrateTurret
turretThreshold: TurretThreshold
turretTurn: TurretTurn
sensors: Sensors
navx: Navx
turnToAngle: TurnToAngle
dumbSpeed = .25
dumbRPMs = 3000
time = 2
toDo = None
@feedback
def getToDo(self):
"""Returns the instructions for the smoke test"""
return self.toDo
@state
def driveSetup(self):
self.driveTrain.resetDistTraveled()
self.next_state("drive")
@state
def drive(self):
"""Tests to see if the motors are working with an input from the driver"""
self.toDo = "Drives robot forwards until it reaches a certain distance"
self.driveTrain.setTank(-self.dumbSpeed, -self.dumbSpeed)
if int(self.driveTrain.getEstTotalDistTraveled()) >= 100 and int(self.driveTrain.getEstTotalDistTraveled()) <=115:
self.driveTrain.setTank(0, 0)
log.error("Drove forwards about 100 inches")
self.next_state("runIntakeMotor")
else:
log.error("Driving")
self.next_state("drive")
@state
def delpoyIntake(self):
"""Deploys the intake"""
self.toDo = "Check to see if intake is deployed"
pass
@timed_state(duration = time, next_state = "runShooterMotors")
def runIntakeMotor(self):
"""Runs the intake motor for 2 seconds"""
self.toDo = "Check to see if the intake motor is running"
self.intakeMotor.runIntake(iSpeed = self.dumbSpeed, direction = Direction.kForwards)
log.error("Running intake motor")
@timed_state(duration = time, next_state = "runHopperMotor2")
def runHopperMotor1(self):
"""Runs the first hopper motor for 2 seconds"""
self.toDo = "Check to see if the front hopper motor is running"
self.intakeMotor.stopIntake()
#self.hopperMotor.runHopperMotor1(lSpeed = self.dumbSpeed, direction = Direction.kForwards)
log.error("Running hopper motor 1")
pass
@timed_state(duration = time, next_state = "runShooterMotors")
def runHopperMotor2(self):
"""Stops the first hopper motor adn runs the second motor for 2 seconds"""
self.toDo = "Check to see if the back hopper motor is running"
#self.hopperMotor.stopHopperMotor1()
#self.hopperMotor.runHopperMotor2(lSpeed = self.dumbSpeed, direction = Direction.kForwards)
log.error("Running hopper motor 2")
pass
@timed_state(duration = time, next_state = "finishShooting")
def runShooterMotors(self):
"""Stops the second hopper motor and runs both shooter motors for 2 seconds"""
self.toDo = "Check to see if the shooter motors are running"
#self.hopperMotor.stopHopperMotor2
self.intakeMotor.stopIntake()
self.shooterMotors.runShooter(sSpeed1 = self.dumbRPMs, sSpeed2 = self.dumbRPMs)
self.shooterMotors.execute()
log.error("Running both shooter motors")
@state
def finishShooting(self):
self.shooterMotors.stopShooter()
self.shooterMotors.execute()
self.next_state("calibrateTurret")
@state(first=True)
def calibrateTurret(self):
"""Calibrates the turret's deadzones and checks to see if the turret motor is working"""
self.toDo = "Check to see if the turret is moving and that the deadzones are calibrated"
self.turretCalibrate.setUseMotor(True)
self.turretCalibrate.engage()
self.next_state("calibrateTurret")
if self.turretThreshold.calibrated == True:
self.turretTurn.done()
self.turretThreshold.setTurretspeed(0)
@state
def finishCalibration(self):
self.turretThreshold.setTurretspeed(0)
self.next_state("colorSensorCheckRed")
@state
def colorSensorCheckRed(self):
"""Has the user put up a red ball to the color sensor. Will not move on until the ball is red."""
self.toDo = "Put up a red ball to the color sensor"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
self.colorSensor.execute()
if self.colorSensor.colorMatched == "red":
log.error("The ball is red")
self.next_state("colorSensorCheckBlue")
elif self.colorSensor.colorMatched == "blue":
log.error("The ball is not red")
self.next_state("colorSensorCheckRed")
else:
log.error("There is no ball")
self.next_state("colorSensorCheckRed")
@state
def colorSensorCheckBlue(self):
self.toDo = "Put up a blue ball to the color sensor"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
self.colorSensor.execute()
if self.colorSensor.colorMatched == "blue":
log.error("The ball is blue")
self.next_state("checkIntakeSensor")
elif self.colorSensor.colorMatched == "red":
log.error("The ball is not blue")
self.next_state("colorSensorCheckBlue")
else:
log.error("There is no ball")
self.next_state("colorSensorCheckBlue")
@state
def checkNavx(self):
"""Has user turn the robot until it gets to a certain angle. Once angle is reached, it moves to the next state. This state uses turnToAngle"""
self.toDo = "Turn the bot to the right about 45 degrees"
self.turnToAngle.setAngle(angle = 45)
if self.turnToAngle.running:
log.error("Keep turning")
else:
log.error("Done turning")
self.next_state = "checkIntakeSensor"
@state
def checkIntakeSensor(self):
"""Checks to see if the intake break sensor is broken"""
self.toDo = "Break the break sensor on the intake"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
if self.sensors.loadingSensor(State.kTripped):
log.error("Tripped")
self.next_state("checkHopperSensor")
else:
log.error("Intake sensor not broken")
self.next_state("checkIntakeSensor")
@state
def checkHopperSensor(self):
"""Checks to see if the hopper break sensor is broken"""
self.toDo = "Break the break sensor on the hopper"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
if self.sensors.hopperSensor(State.kTripped):
log.error("Tripped")
self.next_state("checkShooterSensor")
else:
log.error("Hopper sensor not broken")
self.next_state("checkHopperSensor")
@state
def checkShooterSensor(self):
"""Checks to see if the shooter break sensor is broken"""
self.toDo = "Break the break sensor on the shooter"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
if self.sensors.shootingSensor(State.kTripped):
log.error("Tripped")
log.error("Done")
self.done()
else:
log.error("Shooting sensor not broken")
self.next_state("checkShooterSensor") | autonomous/smokeTest.py | from magicbot import AutonomousStateMachine, state, feedback, timed_state
from components.Actuators.LowLevel.driveTrain import DriveTrain
from components.Input.colorSensor import ColorSensor
from components.Actuators.LowLevel.intakeMotor import IntakeMotor
from components.Actuators.HighLevel.hopperMotor import HopperMotor
from components.Actuators.LowLevel.shooterMotors import ShooterMotors
from components.Actuators.LowLevel.turretThreshold import TurretThreshold
from components.Actuators.AutonomousControl.turretTurn import TurretTurn
from components.Actuators.HighLevel.turretCalibrate import CalibrateTurret
from components.Input.breakSensors import Sensors, State
from components.Input.navx import Navx
from components.Actuators.AutonomousControl.turnToAngle import TurnToAngle
import logging as log
from utils.DirectionEnums import Direction
class SmokeTest(AutonomousStateMachine):
compatString = ["teapot"]
MODE_NAME = "Smoke Test"
DEFAULT = False
driveTrain: DriveTrain
intakeMotor: IntakeMotor
colorSensor: ColorSensor
hopperMotor: HopperMotor
shooterMotors: ShooterMotors
turretCalibrate: CalibrateTurret
turretThreshold: TurretThreshold
turretTurn: TurretTurn
sensors: Sensors
navx: Navx
turnToAngle: TurnToAngle
dumbSpeed = .25
dumbRPMs = 3000
time = 2
toDo = None
@feedback
def getToDo(self):
"""Returns the instructions for the smoke test"""
return self.toDo
@state
def driveSetup(self):
self.driveTrain.resetDistTraveled()
self.next_state("drive")
@state
def drive(self):
"""Tests to see if the motors are working with an input from the driver"""
self.toDo = "Drives robot forwards until it reaches a certain distance"
self.driveTrain.setTank(-self.dumbSpeed, -self.dumbSpeed)
if int(self.driveTrain.getEstTotalDistTraveled()) >= 100 and int(self.driveTrain.getEstTotalDistTraveled()) <=115:
self.driveTrain.setTank(0, 0)
log.error("Drove forwards about 100 inches")
self.next_state("runIntakeMotor")
else:
log.error("Driving")
self.next_state("drive")
@state
def delpoyIntake(self):
"""Deploys the intake"""
self.toDo = "Check to see if intake is deployed"
pass
@timed_state(duration = time, next_state = "runShooterMotors")
def runIntakeMotor(self):
"""Runs the intake motor for 2 seconds"""
self.toDo = "Check to see if the intake motor is running"
self.intakeMotor.runIntake(iSpeed = self.dumbSpeed, direction = Direction.kForwards)
log.error("Running intake motor")
@timed_state(duration = time, next_state = "runHopperMotor2")
def runHopperMotor1(self):
"""Runs the first hopper motor for 2 seconds"""
self.toDo = "Check to see if the front hopper motor is running"
self.intakeMotor.stopIntake()
#self.hopperMotor.runHopperMotor1(lSpeed = self.dumbSpeed, direction = Direction.kForwards)
log.error("Running hopper motor 1")
pass
@timed_state(duration = time, next_state = "runShooterMotors")
def runHopperMotor2(self):
"""Stops the first hopper motor adn runs the second motor for 2 seconds"""
self.toDo = "Check to see if the back hopper motor is running"
#self.hopperMotor.stopHopperMotor1()
#self.hopperMotor.runHopperMotor2(lSpeed = self.dumbSpeed, direction = Direction.kForwards)
log.error("Running hopper motor 2")
pass
@timed_state(duration = time, next_state = "finishShooting")
def runShooterMotors(self):
"""Stops the second hopper motor and runs both shooter motors for 2 seconds"""
self.toDo = "Check to see if the shooter motors are running"
#self.hopperMotor.stopHopperMotor2
self.intakeMotor.stopIntake()
self.shooterMotors.runShooter(sSpeed1 = self.dumbRPMs, sSpeed2 = self.dumbRPMs)
self.shooterMotors.execute()
log.error("Running both shooter motors")
@state
def finishShooting(self):
self.shooterMotors.stopShooter()
self.shooterMotors.execute()
self.next_state("calibrateTurret")
@state(first=True)
def calibrateTurret(self):
"""Calibrates the turret's deadzones and checks to see if the turret motor is working"""
self.toDo = "Check to see if the turret is moving and that the deadzones are calibrated"
self.turretCalibrate.setUseMotor(True)
self.turretCalibrate.engage()
self.next_state("calibrateTurret")
if self.turretThreshold.calibrated == True:
self.turretTurn.done()
self.turretThreshold.setTurretspeed(0)
@state
def finishCalibration(self):
self.turretThreshold.setTurretspeed(0)
self.next_state("colorSensorCheckRed")
@state
def colorSensorCheckRed(self):
"""Has the user put up a red ball to the color sensor. Will not move on until the ball is red."""
self.toDo = "Put up a red ball to the color sensor"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
self.colorSensor.execute()
if self.colorSensor.colorMatched == "red":
log.error("The ball is red")
self.next_state("colorSensorCheckBlue")
elif self.colorSensor.colorMatched == "blue":
log.error("The ball is not red")
self.next_state("colorSensorCheckRed")
else:
log.error("There is no ball")
self.next_state("colorSensorCheckRed")
@state
def colorSensorCheckBlue(self):
self.toDo = "Put up a blue ball to the color sensor"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
self.colorSensor.execute()
if self.colorSensor.colorMatched == "blue":
log.error("The ball is blue")
self.next_state("checkIntakeSensor")
elif self.colorSensor.colorMatched == "red":
log.error("The ball is not blue")
self.next_state("colorSensorCheckBlue")
else:
log.error("There is no ball")
self.next_state("colorSensorCheckBlue")
@state
def checkNavx(self):
"""Has user turn the robot until it gets to a certain angle. Once angle is reached, it moves to the next state. This state uses turnToAngle"""
self.toDo = "Turn the bot to the right about 45 degrees"
self.turnToAngle.setAngle(angle = 45)
if self.turnToAngle.running:
log.error("Keep turning")
else:
log.error("Done turning")
self.next_state = "checkIntakeSensor"
@state
def checkIntakeSensor(self):
"""Checks to see if the intake break sensor is broken"""
self.toDo = "Break the break sensor on the intake"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
if self.sensors.loadingSensor(State.kTripped):
log.error("Tripped")
self.next_state("checkHopperSensor")
else:
log.error("Intake sensor not broken")
self.next_state("checkIntakeSensor")
@state
def checkHopperSensor(self):
"""Checks to see if the hopper break sensor is broken"""
self.toDo = "Break the break sensor on the hopper"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
if self.sensors.hopperSensor(State.kTripped):
log.error("Tripped")
self.next_state("checkShooterSensor")
else:
log.error("Hopper sensor not broken")
self.next_state("checkHopperSensor")
@state
def checkShooterSensor(self):
"""Checks to see if the shooter break sensor is broken"""
self.toDo = "Break the break sensor on the shooter"
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
if self.sensors.shootingSensor(State.kTripped):
log.error("Tripped")
log.error("Done")
self.done()
else:
log.error("Shooting sensor not broken")
self.next_state("checkShooterSensor") | 0.676406 | 0.38769 |
import serial
import time
ports = {
"fpga_in": "COM15",
"x_motor": "COM19",
}
fpga_in = serial.Serial(ports["fpga_in"], 115200)
x_motor = serial.Serial(ports["x_motor"], 9600, timeout=1)
fpga_in.name = "fpga_in"
x_motor.name = "x_motor"
def wait_then_write(interface, cmd, receive_interface=None):
if receive_interface is None:
receive_interface = interface
interface.write(cmd)
while receive_interface.in_waiting == 0:
pass
while receive_interface.in_waiting > 0:
print(receive_interface.name, receive_interface.readline().rstrip())
print()
wait_then_write(x_motor, b"\x03")
wait_then_write(x_motor, b"L\r")
wait_then_write(x_motor, b"EE=1\r")
wait_then_write(x_motor, b"VI=1\r")
wait_then_write(x_motor, b"PM = 0\r")
wait_then_write(x_motor, b'PR "C1 ", C1\r')
wait_then_write(x_motor, b'PR "C2 ", C2\r')
wait_then_write(x_motor, b"H 50\r")
wait_then_write(x_motor, b"HM 1\r")
wait_then_write(x_motor, b"H\r")
wait_then_write(x_motor, b'PR "Homed at ", P\r')
wait_then_write(x_motor, b"P = 0\r")
wait_then_write(x_motor, b'PR "Homed OK"\r')
wait_then_write(x_motor, b"E\r")
wait_then_write(x_motor, b"PG\r")
wait_then_write(x_motor, b"L\r")
wait_then_write(x_motor, b"EE = 1\r")
wait_then_write(x_motor, b"VI = 1\r")
wait_then_write(x_motor, b"VM = 30720\r")
wait_then_write(x_motor, b"VI = 40\r")
wait_then_write(x_motor, b"A = 40000\r")
wait_then_write(x_motor, b"D = A\r")
wait_then_write(x_motor, b"HC = 20\r")
wait_then_write(x_motor, b"RC = 100\r")
wait_then_write(x_motor, b"MT = 100\r")
wait_then_write(x_motor, b"SM = 0\r")
wait_then_write(x_motor, b"SF = 15\r")
wait_then_write(x_motor, b"DB = 8\r")
wait_then_write(x_motor, b"LM = 1\r")
wait_then_write(x_motor, b"S1 = 1,0,0\r")
wait_then_write(x_motor, b"S2 = 3,1,0\r")
wait_then_write(x_motor, b"S3 = 2,1,0\r")
wait_then_write(x_motor, b"S4 = 0,0,0\r")
wait_then_write(x_motor, b"D1 = 5\r")
wait_then_write(x_motor, b"PR PN\r")
wait_then_write(x_motor, b"PR VR\r")
wait_then_write(x_motor, b"VI=1\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"VM=6144\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"VI=410\r")
wait_then_write(x_motor, b"A=16384\r")
wait_then_write(x_motor, b"D=A\r")
wait_then_write(x_motor, b"SF=8192\r")
#wait_then_write(x_motor, b"P=0\r")
wait_then_write(x_motor, b"DE=1\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"MA 0\r")
wait_then_write(x_motor, b"PR ER\r")
# SerialPort B1 (VICIB1) VR\r
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"VI=1\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"VM=4096\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"VI=410\r")
# KloehnSerialPort A (KLOEHNA) /1&\r
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"MR -409190\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR ER\r")
wait_then_write(x_motor, b"VI=1\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"VM=6144\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"VI=410\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"DE=1\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"EX 1\r")
wait_then_write(x_motor, b"PM=0\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"MA -4915\r")
wait_then_write(x_motor, b"PR ER\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r") | init_x_motor.py | import serial
import time
ports = {
"fpga_in": "COM15",
"x_motor": "COM19",
}
fpga_in = serial.Serial(ports["fpga_in"], 115200)
x_motor = serial.Serial(ports["x_motor"], 9600, timeout=1)
fpga_in.name = "fpga_in"
x_motor.name = "x_motor"
def wait_then_write(interface, cmd, receive_interface=None):
if receive_interface is None:
receive_interface = interface
interface.write(cmd)
while receive_interface.in_waiting == 0:
pass
while receive_interface.in_waiting > 0:
print(receive_interface.name, receive_interface.readline().rstrip())
print()
wait_then_write(x_motor, b"\x03")
wait_then_write(x_motor, b"L\r")
wait_then_write(x_motor, b"EE=1\r")
wait_then_write(x_motor, b"VI=1\r")
wait_then_write(x_motor, b"PM = 0\r")
wait_then_write(x_motor, b'PR "C1 ", C1\r')
wait_then_write(x_motor, b'PR "C2 ", C2\r')
wait_then_write(x_motor, b"H 50\r")
wait_then_write(x_motor, b"HM 1\r")
wait_then_write(x_motor, b"H\r")
wait_then_write(x_motor, b'PR "Homed at ", P\r')
wait_then_write(x_motor, b"P = 0\r")
wait_then_write(x_motor, b'PR "Homed OK"\r')
wait_then_write(x_motor, b"E\r")
wait_then_write(x_motor, b"PG\r")
wait_then_write(x_motor, b"L\r")
wait_then_write(x_motor, b"EE = 1\r")
wait_then_write(x_motor, b"VI = 1\r")
wait_then_write(x_motor, b"VM = 30720\r")
wait_then_write(x_motor, b"VI = 40\r")
wait_then_write(x_motor, b"A = 40000\r")
wait_then_write(x_motor, b"D = A\r")
wait_then_write(x_motor, b"HC = 20\r")
wait_then_write(x_motor, b"RC = 100\r")
wait_then_write(x_motor, b"MT = 100\r")
wait_then_write(x_motor, b"SM = 0\r")
wait_then_write(x_motor, b"SF = 15\r")
wait_then_write(x_motor, b"DB = 8\r")
wait_then_write(x_motor, b"LM = 1\r")
wait_then_write(x_motor, b"S1 = 1,0,0\r")
wait_then_write(x_motor, b"S2 = 3,1,0\r")
wait_then_write(x_motor, b"S3 = 2,1,0\r")
wait_then_write(x_motor, b"S4 = 0,0,0\r")
wait_then_write(x_motor, b"D1 = 5\r")
wait_then_write(x_motor, b"PR PN\r")
wait_then_write(x_motor, b"PR VR\r")
wait_then_write(x_motor, b"VI=1\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"VM=6144\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"VI=410\r")
wait_then_write(x_motor, b"A=16384\r")
wait_then_write(x_motor, b"D=A\r")
wait_then_write(x_motor, b"SF=8192\r")
#wait_then_write(x_motor, b"P=0\r")
wait_then_write(x_motor, b"DE=1\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"MA 0\r")
wait_then_write(x_motor, b"PR ER\r")
# SerialPort B1 (VICIB1) VR\r
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"VI=1\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"VM=4096\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"VI=410\r")
# KloehnSerialPort A (KLOEHNA) /1&\r
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"MR -409190\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR MV\r")
wait_then_write(x_motor, b"PR ER\r")
wait_then_write(x_motor, b"VI=1\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"VM=6144\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"VI=410\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"DE=1\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"EX 1\r")
wait_then_write(x_motor, b"PM=0\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r")
wait_then_write(x_motor, b"MA -4915\r")
wait_then_write(x_motor, b"PR ER\r")
wait_then_write(x_motor, b"PR VM\r")
wait_then_write(x_motor, b"PR VI\r") | 0.149376 | 0.099164 |
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.datasets.cifar import load_batch
def get_cifar10():
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
filedir = os.path.dirname(__file__)
path = os.path.join(filedir, "../../data/", "cifar-10-batches-py")
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :], y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
x_test = x_test.astype(x_train.dtype)
y_test = y_test.astype(y_train.dtype)
return (x_train, y_train), (x_test, y_test)
# compute nodes not connected to the internet
#(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
(x_train, y_train), (x_test, y_test) = get_cifar10()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='SAME'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='SAME'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='SAME'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10)
model.evaluate(x_test, y_test, verbose=2) | src/models/train_cifar10.py | import tensorflow as tf
import os
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.datasets.cifar import load_batch
def get_cifar10():
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
filedir = os.path.dirname(__file__)
path = os.path.join(filedir, "../../data/", "cifar-10-batches-py")
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :], y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
x_test = x_test.astype(x_train.dtype)
y_test = y_test.astype(y_train.dtype)
return (x_train, y_train), (x_test, y_test)
# compute nodes not connected to the internet
#(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
(x_train, y_train), (x_test, y_test) = get_cifar10()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='SAME'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='SAME'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='SAME'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10)
model.evaluate(x_test, y_test, verbose=2) | 0.789031 | 0.417509 |
from os.path import join
from sklearn.externals import joblib
import sys
from grasping_position_inference.training.model_generator import generate_models
from grasping_position_inference.inference.model import Model
MODEL_PATH = 'models'
def get_probability_distribution_for_grid(x, y, model_name):
model_filepath = join(MODEL_PATH, model_name)
model = joblib.load(model_filepath)
result = []
for i in range(0, len(x)):
success_rate = []
for predict_values in model.predict_proba(map(list, zip(x[i], y[i]))):
success_rate.append(predict_values[1])
result.append(success_rate)
return result
if __name__ == "__main__":
args = sys.argv[1:]
generate_models(args[0], args[1])
model = Model(args[1])
model.add_predictor('cup.n.01', 'FRONT','FRONT', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('cup.n.01', 'LEFT-SIDE', 'LEFT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('cup.n.01', 'RIGHT-SIDE', 'RIGHT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('cup.n.01', 'BACK', 'BACK', 'BOTTOM', 'pr2_left_arm')
result = model.get_probability_distribution_for_grid()
model = Model(args[1])
model.add_predictor('bowl.n.01', 'TOP', 'FRONT', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('bowl.n.01', 'TOP', 'LEFT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('bowl.n.01', 'TOP', 'RIGHT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('bowl.n.01', 'TOP', 'BACK', 'BOTTOM', 'pr2_left_arm')
result = model.get_probability_distribution_for_grid()
model = Model(args[1])
model.add_predictor('spoon.n.01', 'TOP', 'FRONT', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('spoon.n.01', 'TOP', 'LEFT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('spoon.n.01', 'TOP', 'RIGHT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('spoon.n.01', 'TOP', 'BACK', 'BOTTOM', 'pr2_left_arm')
result = model.get_probability_distribution_for_grid()
model = Model(args[1])
model.add_predictor('not_object.n.01', 'TOP', 'FRONT', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('not_object.n.01', 'TOP', 'LEFT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('not_object.n.01', 'TOP', 'RIGHT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('not_object.n.01', 'TOP', 'BACK', 'BOTTOM', 'pr2_left_arm')
result = model.get_probability_distribution_for_grid()
print result
print 'done' | __main__.py | from os.path import join
from sklearn.externals import joblib
import sys
from grasping_position_inference.training.model_generator import generate_models
from grasping_position_inference.inference.model import Model
MODEL_PATH = 'models'
def get_probability_distribution_for_grid(x, y, model_name):
model_filepath = join(MODEL_PATH, model_name)
model = joblib.load(model_filepath)
result = []
for i in range(0, len(x)):
success_rate = []
for predict_values in model.predict_proba(map(list, zip(x[i], y[i]))):
success_rate.append(predict_values[1])
result.append(success_rate)
return result
if __name__ == "__main__":
args = sys.argv[1:]
generate_models(args[0], args[1])
model = Model(args[1])
model.add_predictor('cup.n.01', 'FRONT','FRONT', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('cup.n.01', 'LEFT-SIDE', 'LEFT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('cup.n.01', 'RIGHT-SIDE', 'RIGHT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('cup.n.01', 'BACK', 'BACK', 'BOTTOM', 'pr2_left_arm')
result = model.get_probability_distribution_for_grid()
model = Model(args[1])
model.add_predictor('bowl.n.01', 'TOP', 'FRONT', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('bowl.n.01', 'TOP', 'LEFT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('bowl.n.01', 'TOP', 'RIGHT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('bowl.n.01', 'TOP', 'BACK', 'BOTTOM', 'pr2_left_arm')
result = model.get_probability_distribution_for_grid()
model = Model(args[1])
model.add_predictor('spoon.n.01', 'TOP', 'FRONT', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('spoon.n.01', 'TOP', 'LEFT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('spoon.n.01', 'TOP', 'RIGHT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('spoon.n.01', 'TOP', 'BACK', 'BOTTOM', 'pr2_left_arm')
result = model.get_probability_distribution_for_grid()
model = Model(args[1])
model.add_predictor('not_object.n.01', 'TOP', 'FRONT', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('not_object.n.01', 'TOP', 'LEFT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('not_object.n.01', 'TOP', 'RIGHT-SIDE', 'BOTTOM', 'pr2_left_arm')
model.add_predictor('not_object.n.01', 'TOP', 'BACK', 'BOTTOM', 'pr2_left_arm')
result = model.get_probability_distribution_for_grid()
print result
print 'done' | 0.385375 | 0.121399 |
from collections import defaultdict
from blockdiag.tests.utils import BuilderTestCase
class TestBuilderNode(BuilderTestCase):
def test_single_node_diagram(self):
diagram = self.build('single_node.diag')
self.assertEqual(1, len(diagram.nodes))
self.assertEqual(0, len(diagram.edges))
self.assertEqual('A', diagram.nodes[0].label)
self.assertEqual((0, 0), diagram.nodes[0].xy)
def test_node_shape_diagram(self):
expected = {'A': 'box', 'B': 'roundedbox', 'C': 'diamond',
'D': 'ellipse', 'E': 'note', 'F': 'cloud',
'G': 'mail', 'H': 'beginpoint', 'I': 'endpoint',
'J': 'minidiamond', 'K': 'flowchart.condition',
'L': 'flowchart.database', 'M': 'flowchart.input',
'N': 'flowchart.loopin', 'O': 'flowchart.loopout',
'P': 'actor', 'Q': 'flowchart.terminator', 'R': 'textbox',
'S': 'dots', 'T': 'none', 'U': 'square', 'V': 'circle',
'Z': 'box'}
diagram = self.build('node_shape.diag')
self.assertNodeShape(diagram, expected)
def test_node_shape_namespace_diagram(self):
diagram = self.build('node_shape_namespace.diag')
self.assertNodeShape(diagram, {'A': 'flowchart.condition',
'B': 'condition',
'Z': 'box'})
def test_node_has_multilined_label_diagram(self):
diagram = self.build('node_has_multilined_label.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'Z': (0, 1)})
self.assertNodeLabel(diagram, {'A': "foo\nbar", 'Z': 'Z'})
def test_quoted_node_id_diagram(self):
diagram = self.build('quoted_node_id.diag')
self.assertNodeXY(diagram, {'A': (0, 0), "'A'": (1, 0),
'B': (2, 0), 'Z': (0, 1)})
def test_node_id_includes_dot_diagram(self):
diagram = self.build('node_id_includes_dot.diag')
self.assertNodeXY(diagram, {'A.B': (0, 0), 'C.D': (1, 0),
'Z': (0, 1)})
def test_multiple_nodes_definition_diagram(self):
diagram = self.build('multiple_nodes_definition.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (0, 1),
'Z': (0, 2)})
self.assertNodeColor(diagram, {'A': (255, 0, 0), 'B': (255, 0, 0),
'Z': (255, 255, 255)})
def test_multiple_node_relation_diagram(self):
diagram = self.build('multiple_node_relation.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (1, 1), 'D': (2, 0),
'Z': (0, 2)})
def test_node_attribute(self):
labels = {'A': 'B', 'B': 'double quoted', 'C': 'single quoted',
'D': '\'"double" quoted\'', 'E': '"\'single\' quoted"',
'F': 'F', 'G': 'G', 'H': 'H', 'I': 'I',
'J': 'Hello', 'K': 'K'}
colors = {'A': (255, 0, 0), 'B': (255, 255, 255), 'C': (255, 0, 0),
'D': (255, 0, 0), 'E': (255, 0, 0), 'F': (255, 255, 255),
'G': (255, 255, 255), 'H': (255, 255, 255),
'I': (255, 255, 255), 'J': (255, 255, 255),
'K': (255, 255, 255)}
textcolors = defaultdict(lambda: (0, 0, 0))
textcolors['F'] = (255, 0, 0)
linecolors = defaultdict(lambda: (0, 0, 0))
linecolors['I'] = (255, 0, 0)
numbered = defaultdict(lambda: None)
numbered['E'] = '1'
stacked = defaultdict(lambda: False)
stacked['G'] = True
fontsize = defaultdict(lambda: None)
fontsize['H'] = 16
orientations = defaultdict(lambda: 'horizontal')
orientations['J'] = 'vertical'
backgrounds = defaultdict(lambda: None)
backgrounds['K'] = ('src/blockdiag/tests/diagrams/'
'debian-logo-256color-palettealpha.png')
diagram = self.build('node_attribute.diag')
self.assertNodeLabel(diagram, labels)
self.assertNodeColor(diagram, colors)
self.assertNodeTextColor(diagram, textcolors)
self.assertNodeLineColor(diagram, linecolors)
self.assertNodeNumbered(diagram, numbered)
self.assertNodeStacked(diagram, stacked)
self.assertNodeFontsize(diagram, fontsize)
self.assertNodeLabel_Orientation(diagram, orientations)
self.assertNodeBackground(diagram, backgrounds)
def test_node_height_diagram(self):
diagram = self.build('node_height.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'D': (2, 1),
'E': (1, 1), 'Z': (0, 2)})
def test_branched_diagram(self):
diagram = self.build('branched.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'D': (1, 1),
'E': (2, 1), 'Z': (0, 2)})
def test_multiple_parent_node_diagram(self):
diagram = self.build('multiple_parent_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (0, 2), 'D': (1, 2),
'E': (0, 1), 'Z': (0, 3)})
def test_twin_multiple_parent_node_diagram(self):
diagram = self.build('twin_multiple_parent_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (0, 1), 'D': (1, 1),
'E': (0, 2), 'Z': (0, 3)})
def test_flowable_node_diagram(self):
diagram = self.build('flowable_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'Z': (0, 1)})
def test_plugin_autoclass_diagram(self):
diagram = self.build('plugin_autoclass.diag')
self.assertNodeXY(diagram, {'A_emphasis': (0, 0),
'B_emphasis': (1, 0),
'C': (1, 1)})
self.assertNodeStyle(diagram, {'A_emphasis': 'dashed',
'B_emphasis': 'dashed',
'C': None})
self.assertNodeColor(diagram, {'A_emphasis': (255, 0, 0),
'B_emphasis': (255, 0, 0),
'C': (255, 255, 255)})
def test_plugin_attributes_diagram(self):
diagram = self.build('plugin_attributes.diag')
self.assertNodeTest_Attr1(diagram, {'A': "1", 'B': None})
self.assertNodeTest_Attr2(diagram, {'A': "2", 'B': None})
self.assertNodeTest_Attr3(diagram, {'A': "3", 'B': None}) | src/blockdiag/tests/test_builder_node.py |
from collections import defaultdict
from blockdiag.tests.utils import BuilderTestCase
class TestBuilderNode(BuilderTestCase):
def test_single_node_diagram(self):
diagram = self.build('single_node.diag')
self.assertEqual(1, len(diagram.nodes))
self.assertEqual(0, len(diagram.edges))
self.assertEqual('A', diagram.nodes[0].label)
self.assertEqual((0, 0), diagram.nodes[0].xy)
def test_node_shape_diagram(self):
expected = {'A': 'box', 'B': 'roundedbox', 'C': 'diamond',
'D': 'ellipse', 'E': 'note', 'F': 'cloud',
'G': 'mail', 'H': 'beginpoint', 'I': 'endpoint',
'J': 'minidiamond', 'K': 'flowchart.condition',
'L': 'flowchart.database', 'M': 'flowchart.input',
'N': 'flowchart.loopin', 'O': 'flowchart.loopout',
'P': 'actor', 'Q': 'flowchart.terminator', 'R': 'textbox',
'S': 'dots', 'T': 'none', 'U': 'square', 'V': 'circle',
'Z': 'box'}
diagram = self.build('node_shape.diag')
self.assertNodeShape(diagram, expected)
def test_node_shape_namespace_diagram(self):
diagram = self.build('node_shape_namespace.diag')
self.assertNodeShape(diagram, {'A': 'flowchart.condition',
'B': 'condition',
'Z': 'box'})
def test_node_has_multilined_label_diagram(self):
diagram = self.build('node_has_multilined_label.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'Z': (0, 1)})
self.assertNodeLabel(diagram, {'A': "foo\nbar", 'Z': 'Z'})
def test_quoted_node_id_diagram(self):
diagram = self.build('quoted_node_id.diag')
self.assertNodeXY(diagram, {'A': (0, 0), "'A'": (1, 0),
'B': (2, 0), 'Z': (0, 1)})
def test_node_id_includes_dot_diagram(self):
diagram = self.build('node_id_includes_dot.diag')
self.assertNodeXY(diagram, {'A.B': (0, 0), 'C.D': (1, 0),
'Z': (0, 1)})
def test_multiple_nodes_definition_diagram(self):
diagram = self.build('multiple_nodes_definition.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (0, 1),
'Z': (0, 2)})
self.assertNodeColor(diagram, {'A': (255, 0, 0), 'B': (255, 0, 0),
'Z': (255, 255, 255)})
def test_multiple_node_relation_diagram(self):
diagram = self.build('multiple_node_relation.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (1, 1), 'D': (2, 0),
'Z': (0, 2)})
def test_node_attribute(self):
labels = {'A': 'B', 'B': 'double quoted', 'C': 'single quoted',
'D': '\'"double" quoted\'', 'E': '"\'single\' quoted"',
'F': 'F', 'G': 'G', 'H': 'H', 'I': 'I',
'J': 'Hello', 'K': 'K'}
colors = {'A': (255, 0, 0), 'B': (255, 255, 255), 'C': (255, 0, 0),
'D': (255, 0, 0), 'E': (255, 0, 0), 'F': (255, 255, 255),
'G': (255, 255, 255), 'H': (255, 255, 255),
'I': (255, 255, 255), 'J': (255, 255, 255),
'K': (255, 255, 255)}
textcolors = defaultdict(lambda: (0, 0, 0))
textcolors['F'] = (255, 0, 0)
linecolors = defaultdict(lambda: (0, 0, 0))
linecolors['I'] = (255, 0, 0)
numbered = defaultdict(lambda: None)
numbered['E'] = '1'
stacked = defaultdict(lambda: False)
stacked['G'] = True
fontsize = defaultdict(lambda: None)
fontsize['H'] = 16
orientations = defaultdict(lambda: 'horizontal')
orientations['J'] = 'vertical'
backgrounds = defaultdict(lambda: None)
backgrounds['K'] = ('src/blockdiag/tests/diagrams/'
'debian-logo-256color-palettealpha.png')
diagram = self.build('node_attribute.diag')
self.assertNodeLabel(diagram, labels)
self.assertNodeColor(diagram, colors)
self.assertNodeTextColor(diagram, textcolors)
self.assertNodeLineColor(diagram, linecolors)
self.assertNodeNumbered(diagram, numbered)
self.assertNodeStacked(diagram, stacked)
self.assertNodeFontsize(diagram, fontsize)
self.assertNodeLabel_Orientation(diagram, orientations)
self.assertNodeBackground(diagram, backgrounds)
def test_node_height_diagram(self):
diagram = self.build('node_height.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'D': (2, 1),
'E': (1, 1), 'Z': (0, 2)})
def test_branched_diagram(self):
diagram = self.build('branched.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'D': (1, 1),
'E': (2, 1), 'Z': (0, 2)})
def test_multiple_parent_node_diagram(self):
diagram = self.build('multiple_parent_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (0, 2), 'D': (1, 2),
'E': (0, 1), 'Z': (0, 3)})
def test_twin_multiple_parent_node_diagram(self):
diagram = self.build('twin_multiple_parent_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (0, 1), 'D': (1, 1),
'E': (0, 2), 'Z': (0, 3)})
def test_flowable_node_diagram(self):
diagram = self.build('flowable_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'Z': (0, 1)})
def test_plugin_autoclass_diagram(self):
diagram = self.build('plugin_autoclass.diag')
self.assertNodeXY(diagram, {'A_emphasis': (0, 0),
'B_emphasis': (1, 0),
'C': (1, 1)})
self.assertNodeStyle(diagram, {'A_emphasis': 'dashed',
'B_emphasis': 'dashed',
'C': None})
self.assertNodeColor(diagram, {'A_emphasis': (255, 0, 0),
'B_emphasis': (255, 0, 0),
'C': (255, 255, 255)})
def test_plugin_attributes_diagram(self):
diagram = self.build('plugin_attributes.diag')
self.assertNodeTest_Attr1(diagram, {'A': "1", 'B': None})
self.assertNodeTest_Attr2(diagram, {'A': "2", 'B': None})
self.assertNodeTest_Attr3(diagram, {'A': "3", 'B': None}) | 0.797478 | 0.541773 |
# FIXME:
# - Too many buttons -- saving should be automatic?
# - Make purpose of 'Add' button clearer.
# - Indicate when the match was fuzzy in the buffer text.
import os
import threading
import urllib
from gi.repository import Gtk, GLib
from quodlibet import const
from quodlibet import qltk
from quodlibet import util
class LyricsPane(Gtk.VBox):
def __init__(self, song):
# Commented code in this method is due to Lyric Wiki's disappearance.
# See issue 273.
super(LyricsPane, self).__init__(spacing=12)
self.set_border_width(12)
view = Gtk.TextView()
sw = Gtk.ScrolledWindow()
sw.add(view)
refresh = qltk.Button(_("_Download"), Gtk.STOCK_CONNECT)
save = Gtk.Button(stock=Gtk.STOCK_SAVE)
delete = Gtk.Button(stock=Gtk.STOCK_DELETE)
add = Gtk.Button(stock=Gtk.STOCK_EDIT)
view.set_wrap_mode(Gtk.WrapMode.WORD)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
lyricname = song.lyric_filename
buffer = view.get_buffer()
refresh.connect('clicked', self.__refresh, add, buffer, song)
save.connect('clicked', self.__save, lyricname, buffer, delete)
delete.connect('clicked', self.__delete, lyricname, save)
add.connect('clicked', self.__add, song)
sw.set_shadow_type(Gtk.ShadowType.IN)
self.pack_start(sw, True, True, 0)
bbox = Gtk.HButtonBox()
bbox.pack_start(save, True, True, 0)
bbox.pack_start(delete, True, True, 0)
#bbox.pack_start(refresh, True, True, 0)
bbox.pack_start(add, True, True, 0)
self.pack_start(bbox, False, True, 0)
save.set_sensitive(False)
add.set_sensitive(True)
if os.path.exists(lyricname):
buffer.set_text(file(lyricname).read())
else:
#buffer.set_text(_("No lyrics found.\n\nYou can click the "
# "Download button to have Quod Libet search "
# "for lyrics online. You can also enter them "
# "yourself and click save."))
buffer.set_text(_("No lyrics found for this song."))
buffer.connect_object('changed', save.set_sensitive, True)
def __add(self, add, song):
artist = song.comma('artist').encode('utf-8')
util.website("http://lyricwiki.org/%s" % (urllib.quote(artist)))
def __refresh(self, refresh, add, buffer, song):
buffer.set_text(_("Searching for lyrics..."))
refresh.set_sensitive(False)
thread = threading.Thread(
target=self.__search, args=(song, buffer, refresh, add))
thread.setDaemon(True)
thread.start()
def __search(self, song, buffer, refresh, add):
artist = song.comma("artist")
title = song.comma("title")
try:
sock = urllib.urlopen(
"http://lyricwiki.org/api.php?"
"client=QuodLibet&func=getSong&artist=%s&song=%s&fmt=text" % (
urllib.quote(artist.encode('utf-8')),
urllib.quote(title.encode('utf-8'))))
text = sock.read()
except Exception, err:
try:
err = err.strerror.decode(const.ENCODING, 'replace')
except:
err = _("Unable to download lyrics.")
GLib.idle_add(buffer.set_text, err)
return
sock.close()
if text == 'Not found':
GLib.idle_add(
buffer.set_text, _("No lyrics found for this song."))
return
else:
GLib.idle_add(buffer.set_text, text)
GLib.idle_add(refresh.set_sensitive, True)
def __save(self, save, lyricname, buffer, delete):
try:
os.makedirs(os.path.dirname(lyricname))
except EnvironmentError, err:
pass
try:
f = file(lyricname, "w")
except EnvironmentError, err:
print_w(err.strerror.decode(const.ENCODING, "replace"))
else:
start, end = buffer.get_bounds()
f.write(buffer.get_text(start, end, True))
f.close()
delete.set_sensitive(True)
save.set_sensitive(False)
def __delete(self, delete, lyricname, save):
try:
os.unlink(lyricname)
except EnvironmentError:
pass
lyricname = os.path.dirname(lyricname)
try:
os.rmdir(lyricname)
except EnvironmentError:
pass
delete.set_sensitive(False)
save.set_sensitive(True) | stdlib2-src/dist-packages/quodlibet/qltk/lyrics.py |
# FIXME:
# - Too many buttons -- saving should be automatic?
# - Make purpose of 'Add' button clearer.
# - Indicate when the match was fuzzy in the buffer text.
import os
import threading
import urllib
from gi.repository import Gtk, GLib
from quodlibet import const
from quodlibet import qltk
from quodlibet import util
class LyricsPane(Gtk.VBox):
def __init__(self, song):
# Commented code in this method is due to Lyric Wiki's disappearance.
# See issue 273.
super(LyricsPane, self).__init__(spacing=12)
self.set_border_width(12)
view = Gtk.TextView()
sw = Gtk.ScrolledWindow()
sw.add(view)
refresh = qltk.Button(_("_Download"), Gtk.STOCK_CONNECT)
save = Gtk.Button(stock=Gtk.STOCK_SAVE)
delete = Gtk.Button(stock=Gtk.STOCK_DELETE)
add = Gtk.Button(stock=Gtk.STOCK_EDIT)
view.set_wrap_mode(Gtk.WrapMode.WORD)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
lyricname = song.lyric_filename
buffer = view.get_buffer()
refresh.connect('clicked', self.__refresh, add, buffer, song)
save.connect('clicked', self.__save, lyricname, buffer, delete)
delete.connect('clicked', self.__delete, lyricname, save)
add.connect('clicked', self.__add, song)
sw.set_shadow_type(Gtk.ShadowType.IN)
self.pack_start(sw, True, True, 0)
bbox = Gtk.HButtonBox()
bbox.pack_start(save, True, True, 0)
bbox.pack_start(delete, True, True, 0)
#bbox.pack_start(refresh, True, True, 0)
bbox.pack_start(add, True, True, 0)
self.pack_start(bbox, False, True, 0)
save.set_sensitive(False)
add.set_sensitive(True)
if os.path.exists(lyricname):
buffer.set_text(file(lyricname).read())
else:
#buffer.set_text(_("No lyrics found.\n\nYou can click the "
# "Download button to have Quod Libet search "
# "for lyrics online. You can also enter them "
# "yourself and click save."))
buffer.set_text(_("No lyrics found for this song."))
buffer.connect_object('changed', save.set_sensitive, True)
def __add(self, add, song):
artist = song.comma('artist').encode('utf-8')
util.website("http://lyricwiki.org/%s" % (urllib.quote(artist)))
def __refresh(self, refresh, add, buffer, song):
buffer.set_text(_("Searching for lyrics..."))
refresh.set_sensitive(False)
thread = threading.Thread(
target=self.__search, args=(song, buffer, refresh, add))
thread.setDaemon(True)
thread.start()
def __search(self, song, buffer, refresh, add):
artist = song.comma("artist")
title = song.comma("title")
try:
sock = urllib.urlopen(
"http://lyricwiki.org/api.php?"
"client=QuodLibet&func=getSong&artist=%s&song=%s&fmt=text" % (
urllib.quote(artist.encode('utf-8')),
urllib.quote(title.encode('utf-8'))))
text = sock.read()
except Exception, err:
try:
err = err.strerror.decode(const.ENCODING, 'replace')
except:
err = _("Unable to download lyrics.")
GLib.idle_add(buffer.set_text, err)
return
sock.close()
if text == 'Not found':
GLib.idle_add(
buffer.set_text, _("No lyrics found for this song."))
return
else:
GLib.idle_add(buffer.set_text, text)
GLib.idle_add(refresh.set_sensitive, True)
def __save(self, save, lyricname, buffer, delete):
try:
os.makedirs(os.path.dirname(lyricname))
except EnvironmentError, err:
pass
try:
f = file(lyricname, "w")
except EnvironmentError, err:
print_w(err.strerror.decode(const.ENCODING, "replace"))
else:
start, end = buffer.get_bounds()
f.write(buffer.get_text(start, end, True))
f.close()
delete.set_sensitive(True)
save.set_sensitive(False)
def __delete(self, delete, lyricname, save):
try:
os.unlink(lyricname)
except EnvironmentError:
pass
lyricname = os.path.dirname(lyricname)
try:
os.rmdir(lyricname)
except EnvironmentError:
pass
delete.set_sensitive(False)
save.set_sensitive(True) | 0.284179 | 0.082846 |
from urllib.parse import urljoin
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from ...text import create_slug
from ..magic import MisencodedCharField, MisencodedTextField
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True, null=True)
jmeno_uzivatele = MisencodedCharField(max_length=20)
nick_uzivatele = MisencodedCharField(unique=True, max_length=25)
prijmeni_uzivatele = MisencodedCharField(max_length=20)
psw_uzivatele = MisencodedCharField(max_length=40)
email_uzivatele = MisencodedCharField(max_length=50)
pohlavi_uzivatele = MisencodedCharField(max_length=4, blank=True, null=True)
vek_uzivatele = models.IntegerField(default=0)
kraj_uzivatele = MisencodedCharField(max_length=20)
chat_barva = MisencodedCharField(max_length=6)
chat_pismo = models.IntegerField(default=12)
chat_reload = models.IntegerField(default=15)
chat_zprav = models.IntegerField(default=20)
chat_filtr = MisencodedCharField(max_length=255, blank=True, null=True)
chat_filtr_zobrazit = models.IntegerField(default=0)
pospristup = models.DateTimeField(auto_now_add=True)
level = MisencodedCharField(max_length=1)
icq_uzivatele = models.IntegerField(default=0)
vypsat_udaje = MisencodedCharField(max_length=15)
ikonka_uzivatele = MisencodedCharField(max_length=25, blank=True, null=True)
popis_uzivatele = MisencodedCharField(max_length=255, blank=True, null=True)
nova_posta = models.IntegerField(default=0)
skin = MisencodedCharField(max_length=10)
reputace = models.IntegerField(default=0)
reputace_rozdel = models.PositiveIntegerField(default=0)
status = MisencodedCharField(max_length=1)
reg_schval_datum = models.DateTimeField(blank=True, null=True, auto_now_add=True)
indexhodnotitele = models.DecimalField(max_digits=4, decimal_places=2, default=-99.99)
reload = MisencodedCharField(max_length=1)
max_level = models.IntegerField(blank=True, null=True)
api_key = MisencodedCharField(unique=True, max_length=40, blank=True, null=True)
class Meta:
db_table = 'uzivatele'
def get_slug(self):
slug = create_slug(self.nick_uzivatele)
if not slug:
slug = 'neznamy'
#TODO: log an error
return slug
def get_icon_url(self):
if not self.ikonka_uzivatele:
return None
else:
return urljoin(
settings.USER_ICON_MEDIA_ROOT_URL,
self.ikonka_uzivatele
)
icon_url = property(get_icon_url)
slug = property(get_slug)
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
# Note that unlike the normal model, we are creating the User lazily
# (instead of UserProfile as usual). Hence, on creation, UserProfile is assumed
# to exist (and needs to be updated with proper relation manually), whereas
# afterwards profiles can be updated as usual
if created:
# YOU are responsible for properly linking User and UserProfile
# outside of signal handling!
# ALWAYS use .users.create_user
pass
else:
instance.profile.save()
class LevelSystemParams(models.Model):
parametr = MisencodedCharField(primary_key=True, max_length=40)
hodnota = MisencodedCharField(max_length=30)
class Meta:
db_table = 'level_parametry_2' | ddcz/models/used/users.py | from urllib.parse import urljoin
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from ...text import create_slug
from ..magic import MisencodedCharField, MisencodedTextField
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True, null=True)
jmeno_uzivatele = MisencodedCharField(max_length=20)
nick_uzivatele = MisencodedCharField(unique=True, max_length=25)
prijmeni_uzivatele = MisencodedCharField(max_length=20)
psw_uzivatele = MisencodedCharField(max_length=40)
email_uzivatele = MisencodedCharField(max_length=50)
pohlavi_uzivatele = MisencodedCharField(max_length=4, blank=True, null=True)
vek_uzivatele = models.IntegerField(default=0)
kraj_uzivatele = MisencodedCharField(max_length=20)
chat_barva = MisencodedCharField(max_length=6)
chat_pismo = models.IntegerField(default=12)
chat_reload = models.IntegerField(default=15)
chat_zprav = models.IntegerField(default=20)
chat_filtr = MisencodedCharField(max_length=255, blank=True, null=True)
chat_filtr_zobrazit = models.IntegerField(default=0)
pospristup = models.DateTimeField(auto_now_add=True)
level = MisencodedCharField(max_length=1)
icq_uzivatele = models.IntegerField(default=0)
vypsat_udaje = MisencodedCharField(max_length=15)
ikonka_uzivatele = MisencodedCharField(max_length=25, blank=True, null=True)
popis_uzivatele = MisencodedCharField(max_length=255, blank=True, null=True)
nova_posta = models.IntegerField(default=0)
skin = MisencodedCharField(max_length=10)
reputace = models.IntegerField(default=0)
reputace_rozdel = models.PositiveIntegerField(default=0)
status = MisencodedCharField(max_length=1)
reg_schval_datum = models.DateTimeField(blank=True, null=True, auto_now_add=True)
indexhodnotitele = models.DecimalField(max_digits=4, decimal_places=2, default=-99.99)
reload = MisencodedCharField(max_length=1)
max_level = models.IntegerField(blank=True, null=True)
api_key = MisencodedCharField(unique=True, max_length=40, blank=True, null=True)
class Meta:
db_table = 'uzivatele'
def get_slug(self):
slug = create_slug(self.nick_uzivatele)
if not slug:
slug = 'neznamy'
#TODO: log an error
return slug
def get_icon_url(self):
if not self.ikonka_uzivatele:
return None
else:
return urljoin(
settings.USER_ICON_MEDIA_ROOT_URL,
self.ikonka_uzivatele
)
icon_url = property(get_icon_url)
slug = property(get_slug)
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
# Note that unlike the normal model, we are creating the User lazily
# (instead of UserProfile as usual). Hence, on creation, UserProfile is assumed
# to exist (and needs to be updated with proper relation manually), whereas
# afterwards profiles can be updated as usual
if created:
# YOU are responsible for properly linking User and UserProfile
# outside of signal handling!
# ALWAYS use .users.create_user
pass
else:
instance.profile.save()
class LevelSystemParams(models.Model):
parametr = MisencodedCharField(primary_key=True, max_length=40)
hodnota = MisencodedCharField(max_length=30)
class Meta:
db_table = 'level_parametry_2' | 0.292595 | 0.08617 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import numpy as np
import queue
from tf_agents.environments import py_environment
from tf_agents.environments import tf_environment
from tf_agents.environments import tf_py_environment
from tf_agents.environments import utils
from tf_agents.specs import array_spec
from tf_agents.environments import wrappers
from tf_agents.environments import suite_gym
from tf_agents.trajectories import time_step as ts
from game import Game
tf.compat.v1.enable_v2_behavior()
stack_size = 5
#action_key_up = ActionChains(self.driver).key_up("w")
class RaceGameEnv(py_environment.PyEnvironment):
def __init__(self):
self.game = Game()
self._action_spec = array_spec.BoundedArraySpec(
shape=(), dtype=np.int32, minimum=0, maximum=3, name='action')
self._observation_spec = array_spec.BoundedArraySpec((55, 240, 3), dtype=np.float64, minimum=0,
maximum=1,name='observation')
self._state = self.game.takess()
self._episode_ended = False
self._past_speed_queue = queue.Queue(stack_size)
self.action_key_up = "W"
print("INIT IS TRIGGERED")
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._observation_spec
def _reset(self):
self.game.resetGame()
self._state = self.game.takess()
self._episode_ended = False
self._past_speed_queue = queue.Queue(stack_size)
#print("RESET METHOD IS TRIGGERED")
#print(self._state.shape)
return ts.restart(self._state)
def _step(self, action):
if(self.action_key_up != 'W'):
self.action_key_up.perform()
if self._episode_ended:
# The last action ended the episode. Ignore the current action and start
# a new episode.
return self.reset()
# Make sure episodes don't go on forever.
if action == 0:
action_up = self.game.move('up')
elif action == 1:
action_up = self.game.move('left')
elif action == 2:
action_up = self.game.move('down')
elif action == 3:
action_up = self.game.move('right')
elif action == 4:
action_up = self.game.move('none')
#print("STEP METHOD IS TRIGGERED " , action)
self._state = self.game.takess()
speed = int(self.game.getSpead())
self.action_key_up = action_up
if(self._past_speed_queue.full()):
self._past_speed_queue.get()
self._past_speed_queue.put(speed)
temp_list = list(self._past_speed_queue.queue)
reward_val = speed -3
if reward_val>0:
reward_val = reward_val*2
#print(temp_list)
if(len(temp_list)>=stack_size and max(temp_list)<3):
#print("GAME OVER")
self._episode_ended = True
return ts.termination(self._state, reward=-50.0)
elif (len(temp_list)>=stack_size):
#reward_val = reward_val - 4
reward_val = reward_val + int(sum(temp_list)/10) #Additional reward for better average speeds
print('Action is = '+ str(action) +' :: Speed is = ' + str(speed) +' :: Reward is ='+str(reward_val))
return ts.transition(self._state, reward=reward_val, discount=.7) | racing_env.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import numpy as np
import queue
from tf_agents.environments import py_environment
from tf_agents.environments import tf_environment
from tf_agents.environments import tf_py_environment
from tf_agents.environments import utils
from tf_agents.specs import array_spec
from tf_agents.environments import wrappers
from tf_agents.environments import suite_gym
from tf_agents.trajectories import time_step as ts
from game import Game
tf.compat.v1.enable_v2_behavior()
stack_size = 5
#action_key_up = ActionChains(self.driver).key_up("w")
class RaceGameEnv(py_environment.PyEnvironment):
def __init__(self):
self.game = Game()
self._action_spec = array_spec.BoundedArraySpec(
shape=(), dtype=np.int32, minimum=0, maximum=3, name='action')
self._observation_spec = array_spec.BoundedArraySpec((55, 240, 3), dtype=np.float64, minimum=0,
maximum=1,name='observation')
self._state = self.game.takess()
self._episode_ended = False
self._past_speed_queue = queue.Queue(stack_size)
self.action_key_up = "W"
print("INIT IS TRIGGERED")
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._observation_spec
def _reset(self):
self.game.resetGame()
self._state = self.game.takess()
self._episode_ended = False
self._past_speed_queue = queue.Queue(stack_size)
#print("RESET METHOD IS TRIGGERED")
#print(self._state.shape)
return ts.restart(self._state)
def _step(self, action):
if(self.action_key_up != 'W'):
self.action_key_up.perform()
if self._episode_ended:
# The last action ended the episode. Ignore the current action and start
# a new episode.
return self.reset()
# Make sure episodes don't go on forever.
if action == 0:
action_up = self.game.move('up')
elif action == 1:
action_up = self.game.move('left')
elif action == 2:
action_up = self.game.move('down')
elif action == 3:
action_up = self.game.move('right')
elif action == 4:
action_up = self.game.move('none')
#print("STEP METHOD IS TRIGGERED " , action)
self._state = self.game.takess()
speed = int(self.game.getSpead())
self.action_key_up = action_up
if(self._past_speed_queue.full()):
self._past_speed_queue.get()
self._past_speed_queue.put(speed)
temp_list = list(self._past_speed_queue.queue)
reward_val = speed -3
if reward_val>0:
reward_val = reward_val*2
#print(temp_list)
if(len(temp_list)>=stack_size and max(temp_list)<3):
#print("GAME OVER")
self._episode_ended = True
return ts.termination(self._state, reward=-50.0)
elif (len(temp_list)>=stack_size):
#reward_val = reward_val - 4
reward_val = reward_val + int(sum(temp_list)/10) #Additional reward for better average speeds
print('Action is = '+ str(action) +' :: Speed is = ' + str(speed) +' :: Reward is ='+str(reward_val))
return ts.transition(self._state, reward=reward_val, discount=.7) | 0.589716 | 0.169406 |
from abc import ABC, abstractmethod
class Conta(ABC):
def __init__(self, agencia, conta, saldo):
self._agencia = agencia
self._conta = conta
self._saldo = saldo
@property
def agencia(self):
return self._agencia
@property
def conta(self):
return self._conta
@property
def saldo(self):
return self._saldo
@saldo.setter
def saldo(self, valor):
# Verifica se valor é uma instancia de int ou float
if not isinstance(valor, (int, float)):
raise ValueError('Saldo precisa ser númerico!')
self._saldo = valor
def depositar(self, valor):
if not isinstance(valor, (int, float)):
raise ValueError('Valor do depósito precisa ser númerico!')
self._saldo += valor
self.detalhes()
def detalhes(self):
print(f'Agência: {self.agencia}', end=' ')
print(f'Conta: {self.conta}', end=' ')
print(f'Saldo: {self.saldo}')
@abstractmethod
def sacar(self, valor):
"""
Método abstrato pois:
Toda conta precisa do método sacar, entretanto o valor limite de saque
pode ser diferente em outros tipos de conta. Por este motivo, este
método será escrito nas classes filhas.
"""
pass
class ContaPoupanca(Conta):
def sacar(self, valor):
if self.saldo < valor:
print('Saldo insuficiente!')
return
self.saldo -= valor
self.detalhes()
class ContaCorrente(Conta):
def __init__(self, agencia, conta, saldo, limite=100):
super().__init__(agencia, conta, saldo)
self._limite = limite
@property
def limite(self):
return self._limite
def sacar(self, valor):
if (self.saldo + self.limite) < valor:
print('Saldo insuficiente!')
return
self.saldo -= valor
self.detalhes()
cp = ContaPoupanca(1111, 2222, 0)
cp.depositar(10)
cp.sacar(5)
cp.sacar(5)
cp.sacar(1)
print('\n#####################################################\n')
cc = ContaCorrente(agencia=1111, conta=3333, saldo=0, limite=500)
cc.depositar(100)
cc.sacar(250)
cc.sacar(500) # Saldo insuficiente
cc.depositar(1000) | Aulas/Python_Orientado_a_Objetos/Classes_Abstratas/exemplo_pratico.py | from abc import ABC, abstractmethod
class Conta(ABC):
def __init__(self, agencia, conta, saldo):
self._agencia = agencia
self._conta = conta
self._saldo = saldo
@property
def agencia(self):
return self._agencia
@property
def conta(self):
return self._conta
@property
def saldo(self):
return self._saldo
@saldo.setter
def saldo(self, valor):
# Verifica se valor é uma instancia de int ou float
if not isinstance(valor, (int, float)):
raise ValueError('Saldo precisa ser númerico!')
self._saldo = valor
def depositar(self, valor):
if not isinstance(valor, (int, float)):
raise ValueError('Valor do depósito precisa ser númerico!')
self._saldo += valor
self.detalhes()
def detalhes(self):
print(f'Agência: {self.agencia}', end=' ')
print(f'Conta: {self.conta}', end=' ')
print(f'Saldo: {self.saldo}')
@abstractmethod
def sacar(self, valor):
"""
Método abstrato pois:
Toda conta precisa do método sacar, entretanto o valor limite de saque
pode ser diferente em outros tipos de conta. Por este motivo, este
método será escrito nas classes filhas.
"""
pass
class ContaPoupanca(Conta):
def sacar(self, valor):
if self.saldo < valor:
print('Saldo insuficiente!')
return
self.saldo -= valor
self.detalhes()
class ContaCorrente(Conta):
def __init__(self, agencia, conta, saldo, limite=100):
super().__init__(agencia, conta, saldo)
self._limite = limite
@property
def limite(self):
return self._limite
def sacar(self, valor):
if (self.saldo + self.limite) < valor:
print('Saldo insuficiente!')
return
self.saldo -= valor
self.detalhes()
cp = ContaPoupanca(1111, 2222, 0)
cp.depositar(10)
cp.sacar(5)
cp.sacar(5)
cp.sacar(1)
print('\n#####################################################\n')
cc = ContaCorrente(agencia=1111, conta=3333, saldo=0, limite=500)
cc.depositar(100)
cc.sacar(250)
cc.sacar(500) # Saldo insuficiente
cc.depositar(1000) | 0.693992 | 0.180431 |
import os
import sys
import unittest
from selenium.common.exceptions import InvalidElementStateException
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir)))
import base_test
class ElementClearTest(base_test.WebDriverBaseTest):
def test_writable_text_input_element_should_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_writable_input_page.html"))
e = self.driver.find_element_by_id("writableTextInput")
e.clear()
self.assertEquals("", e.get_attribute("value"))
def test_disabled_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_disabled_input_page.html"))
e = self.driver.find_element_by_id("disabledTextInput")
self.assertRaises(InvalidElementStateException, lambda: e.clear())
def test_read_only_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_readonly_input_page.html"))
e = self.driver.find_element_by_id("readOnlyTextInput")
self.assertRaises(InvalidElementStateException, lambda: e.clear())
def test_writable_text_area_element_should_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_writable_textarea_page.html"))
e = self.driver.find_element_by_id("writableTextArea")
e.clear()
self.assertEquals("", e.get_attribute("value"))
def test_disabled_text_area_element_should_not_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_disabled_textarea_page.html"))
e = self.driver.find_element_by_id("disabledTextArea")
self.assertRaises(InvalidElementStateException, lambda: e.clear())
def test_read_only_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_readonly_textarea_page.html"))
e = self.driver.find_element_by_id("readOnlyTextArea")
self.assertRaises(InvalidElementStateException, lambda: e.clear())
def test_content_editable_area_should_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_contenteditable_page.html"))
e = self.driver.find_element_by_id("contentEditableElement")
e.clear()
self.assertEquals("", e.text)
if __name__ == "__main__":
unittest.main() | misc/webdriver-w3c-tests/user_input/clear_test.py |
import os
import sys
import unittest
from selenium.common.exceptions import InvalidElementStateException
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir)))
import base_test
class ElementClearTest(base_test.WebDriverBaseTest):
def test_writable_text_input_element_should_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_writable_input_page.html"))
e = self.driver.find_element_by_id("writableTextInput")
e.clear()
self.assertEquals("", e.get_attribute("value"))
def test_disabled_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_disabled_input_page.html"))
e = self.driver.find_element_by_id("disabledTextInput")
self.assertRaises(InvalidElementStateException, lambda: e.clear())
def test_read_only_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_readonly_input_page.html"))
e = self.driver.find_element_by_id("readOnlyTextInput")
self.assertRaises(InvalidElementStateException, lambda: e.clear())
def test_writable_text_area_element_should_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_writable_textarea_page.html"))
e = self.driver.find_element_by_id("writableTextArea")
e.clear()
self.assertEquals("", e.get_attribute("value"))
def test_disabled_text_area_element_should_not_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_disabled_textarea_page.html"))
e = self.driver.find_element_by_id("disabledTextArea")
self.assertRaises(InvalidElementStateException, lambda: e.clear())
def test_read_only_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_readonly_textarea_page.html"))
e = self.driver.find_element_by_id("readOnlyTextArea")
self.assertRaises(InvalidElementStateException, lambda: e.clear())
def test_content_editable_area_should_clear(self):
self.driver.get(self.webserver.where_is(
"user_input/res/element_clear_contenteditable_page.html"))
e = self.driver.find_element_by_id("contentEditableElement")
e.clear()
self.assertEquals("", e.text)
if __name__ == "__main__":
unittest.main() | 0.329284 | 0.212497 |
from __future__ import annotations
# stdlib
import logging
import re
from typing import Optional, Union, List, Iterable
# externals
import pandas as pd
import uproot
# tdub
import tdub.config
from tdub.data import (
Region,
avoids_for,
categorize_branches,
branches_from,
selection_branches,
selection_as_numexpr,
)
log = logging.getLogger(__name__)
def raw_dataframe(
files: str | list[str],
tree: str = "WtLoop_nominal",
weight_name: str = "weight_nominal",
branches: Iterable[str] | None = None,
drop_weight_sys: bool = False,
**kwargs,
) -> pd.DataFrame:
"""Construct a raw pandas flavored Dataframe with help from uproot.
We call this dataframe "raw" because it hasn't been parsed by any
other tdub.frames functionality (no selection performed, kinematic
and weight branches won't be separated, etc.) -- just a pure raw
dataframe from some ROOT files.
Extra `kwargs` are fed to uproot's ``arrays()`` interface.
Parameters
----------
files : list(str) or str
Single ROOT file or list of ROOT files.
tree : str
The tree name to turn into a dataframe.
weight_name: str
Weight branch (we make sure to grab it if you give something
other than ``None`` to ``branches``).
branches : list(str), optional
List of branches to include as columns in the dataframe,
default is ``None``, includes all branches.
drop_weight_sys : bool
Drop all weight systematics from the being grabbed.
Returns
-------
pandas.DataFrame
The pandas flavored DataFrame with all requested branches
Examples
--------
>>> from tdub.data import quick_files
>>> from tdub.frames import raw_dataframe
>>> files = quick_files("/path/to/files")["ttbar"]
>>> df = raw_dataframe(files)
"""
if branches is not None:
branches = sorted(set(branches) | {weight_name}, key=str.lower)
else:
branches = branches_from(files, tree)
if weight_name not in branches:
raise RuntimeError(f"{weight_name} not present in {tree}")
if drop_weight_sys:
weight_sys_re = re.compile(r"^weight_sys\w+")
branches = sorted(
set(branches) ^ set(filter(weight_sys_re.match, branches)), key=str.lower
)
if isinstance(files, str):
files = [files]
result = pd.concat(
[
uproot.open(f).get(tree).arrays(branches, library="pd", **kwargs)
for f in files
]
)
result.selection_used = None
return result
def iterative_selection(
files: str | list[str],
selection: str,
tree: str = "WtLoop_nominal",
weight_name: str = "weight_nominal",
branches: Iterable[str] | None = None,
keep_category: str | None = None,
exclude_avoids: bool = False,
use_campaign_weight: bool = False,
use_tptrw: bool = False,
use_trrw: bool = False,
sample_frac: float | None = None,
**kwargs,
) -> pd.DataFrame:
"""Build a selected dataframe via uproot's iterate.
If we want to build a memory-hungry dataframe and apply a
selection this helps us avoid crashing due to using all of our
RAM. Constructing a dataframe with this function is useful when we
want to grab many branches in a large dataset that won't fit in
memory before the selection.
The selection can be in either numexpr or ROOT form, we ensure
that a ROOT style selection is converted to numexpr for use with
:py:func:`pandas.eval`.
Parameters
----------
files : list(str) or str
A single ROOT file or list of ROOT files.
selection : str
Selection string (numexpr or ROOT form accepted).
tree : str
Tree name to turn into a dataframe.
weight_name: str
Weight branch to preserve.
branches : list(str), optional
List of branches to include as columns in the dataframe,
default is ``None`` (all branches).
keep_category : str, optional
If not ``None``, the selected dataframe(s) will only include
columns which are part of the given category (see
:py:func:`tdub.data.categorize_branches`). The weight branch
is always kept.
exclude_avoids : bool
Exclude branches defined by :py:data:`tdub.config.AVOID_IN_CLF`.
use_campaign_weight : bool
Multiply the nominal weight by the campaign weight. this is
potentially necessary if the samples were prepared without the
campaign weight included in the product which forms the nominal
weight.
use_tptrw : bool
Apply the top pt reweighting factor.
use_trrw : bool
Apply the top recursive reweighting factor.
sample_frac : float, optional
Sample a fraction of the available data.
Returns
-------
pandas.DataFrame
The final selected dataframe(s) from the files.
Examples
--------
Creating a ``ttbar_df`` dataframe a single ``tW_df`` dataframe:
>>> from tdub.frames import iterative_selection
>>> from tdub.data import quick_files
>>> from tdub.data import selection_for
>>> qf = quick_files("/path/to/files")
>>> ttbar_dfs = iterative_selection(qf["ttbar"], selection_for("2j2b"),
... entrysteps="1 GB")
>>> tW_df = iterative_selection(qf["tW_DR"], selection_for("2j2b"))
Keep only kinematic branches after selection and ignore avoided columns:
>>> tW_df = iterative_selection(qf["tW_DR"],
... selection_for("2j2b"),
... exclue_avoids=True,
... keep_category="kinematics")
"""
# determine which branches will be used for selection only and
# which branches we need for weights
sel_branches = selection_branches(selection)
weights_to_grab = {weight_name}
if use_campaign_weight:
weights_to_grab.add("weight_campaign")
log.info("applying the campaign weight")
if use_tptrw:
weights_to_grab.add("weight_tptrw_tool")
log.info("applying the top pt reweighting factor")
if use_trrw:
weights_to_grab.add("weight_trrw_tool")
log.info("applying the top recursive reweighting factor")
if sample_frac is not None:
log.info(f"Sampling {100 * sample_frac}% of events")
if branches is None:
branches = set(branches_from(files, tree=tree))
branches = set(branches)
sel_only_branches = sel_branches - branches
# determine which branches to keep after reading dataframes and
# are necessary during reading.
if keep_category is not None:
branches_cated = categorize_branches(list(branches))
keep_cat = set(branches_cated[keep_category])
keep = keep_cat & branches
read_branches = list(keep | weights_to_grab | sel_branches)
else:
keep = branches
read_branches = list(branches | weights_to_grab | sel_branches)
# drop avoided classifier variables
if exclude_avoids:
keep = keep - set(tdub.config.AVOID_IN_CLF)
# always drop selection only branches
keep = keep - sel_only_branches
# always keep the requested weight (enforce here just in
# case). sort into a list and move on to dataframes
keep.add(weight_name)
keep_cols = sorted(keep, key=str.lower)
if isinstance(files, str):
files = [files]
numexpr_sel = selection_as_numexpr(selection)
dfs = []
for i, f in enumerate(files):
df = uproot.open(f).get(tree).arrays(read_branches, library="pd", **kwargs)
if sample_frac is not None:
df = df.sample(frac=sample_frac, random_state=tdub.config.RANDOM_STATE)
if use_campaign_weight:
apply_weight_campaign(df)
if use_tptrw:
apply_weight_tptrw(df)
if use_trrw:
apply_weight_trrw(df)
idf = df.query(numexpr_sel)
idf = idf[keep_cols]
dfs.append(idf)
log.debug(f"finished iteration {i}")
result = pd.concat(dfs)
result.selection_used = numexpr_sel
return result
def satisfying_selection(*dfs: pd.DataFrame, selection: str) -> list[pd.DataFrame]:
"""Get subsets of dataframes that satisfy a selection.
The selection string can be in either ROOT or numexpr form (we
ensure to convert ROOT to numexpr).
Parameters
----------
*dfs : sequence of :py:obj:`pandas.DataFrame`
Dataframes to apply the selection to.
selection : str
Selection string (in numexpr or ROOT form).
Returns
-------
list(pandas.DataFrame)
Dataframes satisfying the selection string.
Examples
--------
>>> from tdub.data import quick_files
>>> from tdub.frames import raw_dataframe, satisfying_selection
>>> qf = quick_files("/path/to/files")
>>> df_tW_DR = raw_dataframe(qf["tW_DR"])
>>> df_ttbar = raw_dataframe(qf["ttbar"])
>>> low_bdt = "(bdt_response < 0.4)"
>>> tW_DR_selected, ttbar_selected = satisfying_selection(
... dfim_tW_DR.df, dfim_ttbar.df, selection=low_bdt
... )
"""
numexprsel = selection_as_numexpr(selection)
newdfs = []
for df in dfs:
newdf = df.query(numexprsel)
newdf.selection_used = numexprsel
newdfs.append(newdf)
return newdfs
def drop_cols(df: pd.DataFrame, *cols: str) -> None:
"""Drop some columns from a dataframe.
This is a convenient function because it just ignores branches
that don't exist in the dataframe that are present in ``cols``. We
augment :py:class:`pandas.DataFrame` with this function
Parameters
----------
df : :py:obj:`pandas.DataFrame`
Dataframe which we want to slim.
*cols : sequence of strings
Columns to remove
Examples
--------
>>> import pandas as pd
>>> from tdub.data import drop_cols
>>> df = pd.read_parquet("some_file.parquet")
>>> "E_jet1" in df.columns:
True
>>> "mass_jet1" in df.columns:
True
>>> "mass_jet2" in df.columns:
True
>>> drop_cols(df, "E_jet1", "mass_jet1")
>>> "E_jet1" in df.columns:
False
>>> "mass_jet1" in df.columns:
False
>>> df.drop_cols("mass_jet2") # use augmented df class
>>> "mass_jet2" in df.columns:
False
"""
in_dataframe = set(df.columns)
in_cols = set(cols)
in_both = list(in_dataframe & in_cols)
log.debug("Dropping columns:")
for c in in_both:
log.debug(f" - {c}")
df.drop(columns=in_both, inplace=True)
def drop_avoid(df: pd.DataFrame, region: str | Region | None = None) -> None:
"""Drop columns that we avoid in classifiers.
Uses :py:func:`tdub.frames.drop_cols` with a predefined set of
columns (:py:data:`tdub.config.AVOID_IN_CLF`). We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataframe that you want to slim.
region : optional, str or tdub.data.Region
Region to augment the list of dropped columns (see the region
specific AVOID constants in the config module).
Examples
--------
>>> from tdub.frames import drop_avoid
>>> import pandas as pd
>>> df = pd.read_parquet("some_file.parquet")
>>> "E_jetL1" in df.columns:
True
>>> drop_avoid(df)
>>> "E_jetL1" in df.columns:
False
"""
to_drop = tdub.config.AVOID_IN_CLF
if region is not None:
to_drop += avoids_for(region)
drop_cols(df, *to_drop)
def drop_jet2(df: pd.DataFrame) -> None:
"""Drop all columns with jet2 properties.
In the 1j1b region we obviously don't have a second jet; so this
lets us get rid of all columns dependent on jet2 kinematic
properties. We augment :py:class:`pandas.DataFrame` with this
function.
Parameters
----------
df : pandas.DataFrame
Dataframe that we want to slim.
Examples
--------
>>> from tdub.frames import drop_jet2
>>> import pandas as pd
>>> df = pd.read_parquet("some_file.parquet")
>>> "pTsys_lep1lep2jet1jet2met" in df.columns:
True
>>> drop_jet2(df)
>>> "pTsys_lep1lep2jet1jet2met" in df.columns:
False
"""
j2cols = [col for col in df.columns if "jet2" in col]
drop_cols(df, *j2cols)
def apply_weight(
df: pd.DataFrame, weight_name: str, exclude: list[str] | None = None
) -> None:
"""Apply (multiply) a weight to all other weights in the DataFrame.
This will multiply the nominal weight and all systematic weights
in the DataFrame by the ``weight_name`` column. We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataaframe to operate on.
weight_name : str
Column name to multiple all other weight columns by.
exclude : list(str), optional
List of columns ot exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.apply_weight("weight_campaign")
"""
sys_weight_cols = [c for c in df.columns if "weight_sys" in c]
cols = ["weight_nominal"] + sys_weight_cols
if exclude is not None:
for entry in exclude:
if entry in cols:
cols.remove(entry)
if weight_name in cols:
log.warn(f"{weight_name} is in the columns list, dropping")
cols.remove(weight_name)
log.info(f"Applying {weight_name} to all weights in dataframe.")
df.loc[:, cols] = df.loc[:, cols].multiply(df.loc[:, weight_name], axis="index")
def apply_weight_inverse(
df: pd.DataFrame, weight_name: str, exclude: list[str] | None = None
) -> None:
"""Apply an inverse weight (via division) to all other weights in the DataFrame.
This will divide the nominal weight and all systematic weights in
the DataFrame by the ``weight_name`` column. We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataaframe to operate on.
weight_name : str
Column name to divide all other weight columns by.
exclude : list(str), optional
List of columns ot exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.apply_weight_inverse("weight_tptrw_tool")
"""
sys_weight_cols = [c for c in df.columns if "weight_sys" in c]
cols = ["weight_nominal"] + sys_weight_cols
if exclude is not None:
for entry in exclude:
if entry in cols:
cols.remove(entry)
if weight_name in cols:
log.warn(f"{weight_name} is in the columns list, dropping")
cols.remove(weight_name)
df.loc[:, cols] = df.loc[:, cols].divide(df.loc[:, weight_name], axis="index")
def apply_weight_campaign(df: pd.DataFrame, exclude: list[str] | None = None) -> None:
"""Multiply nominal and systematic weights by the campaign weight.
This is useful for samples that were produced without the campaign
weight term already applied to all other weights. We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataframe to operate on.
exclude : list(str), optional
List of columns to exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.weight_nominal[5]
0.003
>>> df.weight_campaign[5]
0.4
>>> df.apply_weight_campaign()
>>> df.weight_nominal[5]
0.0012
"""
apply_weight(df, "weight_campaign", exclude=exclude)
def apply_weight_tptrw(df: pd.DataFrame, exclude: list[str] | None = None) -> None:
"""Multiply nominal and systematic weights by the top pt reweight term.
This is useful for samples that were produced without the top pt
reweighting term already applied to all other weights. We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataframe to operate on.
exclude : list(str), optional
List of columns to exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.weight_nominal[5]
0.002
>>> df.weight_tptrw_tool[5]
0.98
>>> df.apply_weight_tptrw()
>>> df.weight_nominal[5]
0.00196
"""
excludes = ["weight_sys_noreweight"]
if exclude is not None:
excludes += exclude
apply_weight(df, "weight_tptrw_tool", exclude=excludes)
def apply_weight_trrw(df: pd.DataFrame, exclude: list[str] | None = None) -> None:
"""Multiply nominal and systematic weights by the top recursive reweight term.
This is useful for samples that were produced without the top
recursive reweighting term already applied to all other weights.
We augment :py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataframe to operate on.
exclude : list(str), optional
List of columns to exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.weight_nominal[5]
0.002
>>> df.weight_trrw_tool[5]
0.98
>>> df.apply_weight_trrw()
>>> df.weight_nominal[5]
0.00196
"""
excludes = ["weight_sys_noreweight"]
if exclude is not None:
excludes += exclude
apply_weight(df, "weight_trrw_tool", exclude=excludes)
pd.DataFrame.drop_cols = drop_cols
pd.DataFrame.drop_avoid = drop_avoid
pd.DataFrame.drop_jet2 = drop_jet2
pd.DataFrame.apply_weight = apply_weight
pd.DataFrame.apply_weight_campaign = apply_weight_campaign
pd.DataFrame.apply_weight_tptrw = apply_weight_tptrw
pd.DataFrame.apply_weight_trrw = apply_weight_trrw | src/tdub/frames.py |
from __future__ import annotations
# stdlib
import logging
import re
from typing import Optional, Union, List, Iterable
# externals
import pandas as pd
import uproot
# tdub
import tdub.config
from tdub.data import (
Region,
avoids_for,
categorize_branches,
branches_from,
selection_branches,
selection_as_numexpr,
)
log = logging.getLogger(__name__)
def raw_dataframe(
files: str | list[str],
tree: str = "WtLoop_nominal",
weight_name: str = "weight_nominal",
branches: Iterable[str] | None = None,
drop_weight_sys: bool = False,
**kwargs,
) -> pd.DataFrame:
"""Construct a raw pandas flavored Dataframe with help from uproot.
We call this dataframe "raw" because it hasn't been parsed by any
other tdub.frames functionality (no selection performed, kinematic
and weight branches won't be separated, etc.) -- just a pure raw
dataframe from some ROOT files.
Extra `kwargs` are fed to uproot's ``arrays()`` interface.
Parameters
----------
files : list(str) or str
Single ROOT file or list of ROOT files.
tree : str
The tree name to turn into a dataframe.
weight_name: str
Weight branch (we make sure to grab it if you give something
other than ``None`` to ``branches``).
branches : list(str), optional
List of branches to include as columns in the dataframe,
default is ``None``, includes all branches.
drop_weight_sys : bool
Drop all weight systematics from the being grabbed.
Returns
-------
pandas.DataFrame
The pandas flavored DataFrame with all requested branches
Examples
--------
>>> from tdub.data import quick_files
>>> from tdub.frames import raw_dataframe
>>> files = quick_files("/path/to/files")["ttbar"]
>>> df = raw_dataframe(files)
"""
if branches is not None:
branches = sorted(set(branches) | {weight_name}, key=str.lower)
else:
branches = branches_from(files, tree)
if weight_name not in branches:
raise RuntimeError(f"{weight_name} not present in {tree}")
if drop_weight_sys:
weight_sys_re = re.compile(r"^weight_sys\w+")
branches = sorted(
set(branches) ^ set(filter(weight_sys_re.match, branches)), key=str.lower
)
if isinstance(files, str):
files = [files]
result = pd.concat(
[
uproot.open(f).get(tree).arrays(branches, library="pd", **kwargs)
for f in files
]
)
result.selection_used = None
return result
def iterative_selection(
files: str | list[str],
selection: str,
tree: str = "WtLoop_nominal",
weight_name: str = "weight_nominal",
branches: Iterable[str] | None = None,
keep_category: str | None = None,
exclude_avoids: bool = False,
use_campaign_weight: bool = False,
use_tptrw: bool = False,
use_trrw: bool = False,
sample_frac: float | None = None,
**kwargs,
) -> pd.DataFrame:
"""Build a selected dataframe via uproot's iterate.
If we want to build a memory-hungry dataframe and apply a
selection this helps us avoid crashing due to using all of our
RAM. Constructing a dataframe with this function is useful when we
want to grab many branches in a large dataset that won't fit in
memory before the selection.
The selection can be in either numexpr or ROOT form, we ensure
that a ROOT style selection is converted to numexpr for use with
:py:func:`pandas.eval`.
Parameters
----------
files : list(str) or str
A single ROOT file or list of ROOT files.
selection : str
Selection string (numexpr or ROOT form accepted).
tree : str
Tree name to turn into a dataframe.
weight_name: str
Weight branch to preserve.
branches : list(str), optional
List of branches to include as columns in the dataframe,
default is ``None`` (all branches).
keep_category : str, optional
If not ``None``, the selected dataframe(s) will only include
columns which are part of the given category (see
:py:func:`tdub.data.categorize_branches`). The weight branch
is always kept.
exclude_avoids : bool
Exclude branches defined by :py:data:`tdub.config.AVOID_IN_CLF`.
use_campaign_weight : bool
Multiply the nominal weight by the campaign weight. this is
potentially necessary if the samples were prepared without the
campaign weight included in the product which forms the nominal
weight.
use_tptrw : bool
Apply the top pt reweighting factor.
use_trrw : bool
Apply the top recursive reweighting factor.
sample_frac : float, optional
Sample a fraction of the available data.
Returns
-------
pandas.DataFrame
The final selected dataframe(s) from the files.
Examples
--------
Creating a ``ttbar_df`` dataframe a single ``tW_df`` dataframe:
>>> from tdub.frames import iterative_selection
>>> from tdub.data import quick_files
>>> from tdub.data import selection_for
>>> qf = quick_files("/path/to/files")
>>> ttbar_dfs = iterative_selection(qf["ttbar"], selection_for("2j2b"),
... entrysteps="1 GB")
>>> tW_df = iterative_selection(qf["tW_DR"], selection_for("2j2b"))
Keep only kinematic branches after selection and ignore avoided columns:
>>> tW_df = iterative_selection(qf["tW_DR"],
... selection_for("2j2b"),
... exclue_avoids=True,
... keep_category="kinematics")
"""
# determine which branches will be used for selection only and
# which branches we need for weights
sel_branches = selection_branches(selection)
weights_to_grab = {weight_name}
if use_campaign_weight:
weights_to_grab.add("weight_campaign")
log.info("applying the campaign weight")
if use_tptrw:
weights_to_grab.add("weight_tptrw_tool")
log.info("applying the top pt reweighting factor")
if use_trrw:
weights_to_grab.add("weight_trrw_tool")
log.info("applying the top recursive reweighting factor")
if sample_frac is not None:
log.info(f"Sampling {100 * sample_frac}% of events")
if branches is None:
branches = set(branches_from(files, tree=tree))
branches = set(branches)
sel_only_branches = sel_branches - branches
# determine which branches to keep after reading dataframes and
# are necessary during reading.
if keep_category is not None:
branches_cated = categorize_branches(list(branches))
keep_cat = set(branches_cated[keep_category])
keep = keep_cat & branches
read_branches = list(keep | weights_to_grab | sel_branches)
else:
keep = branches
read_branches = list(branches | weights_to_grab | sel_branches)
# drop avoided classifier variables
if exclude_avoids:
keep = keep - set(tdub.config.AVOID_IN_CLF)
# always drop selection only branches
keep = keep - sel_only_branches
# always keep the requested weight (enforce here just in
# case). sort into a list and move on to dataframes
keep.add(weight_name)
keep_cols = sorted(keep, key=str.lower)
if isinstance(files, str):
files = [files]
numexpr_sel = selection_as_numexpr(selection)
dfs = []
for i, f in enumerate(files):
df = uproot.open(f).get(tree).arrays(read_branches, library="pd", **kwargs)
if sample_frac is not None:
df = df.sample(frac=sample_frac, random_state=tdub.config.RANDOM_STATE)
if use_campaign_weight:
apply_weight_campaign(df)
if use_tptrw:
apply_weight_tptrw(df)
if use_trrw:
apply_weight_trrw(df)
idf = df.query(numexpr_sel)
idf = idf[keep_cols]
dfs.append(idf)
log.debug(f"finished iteration {i}")
result = pd.concat(dfs)
result.selection_used = numexpr_sel
return result
def satisfying_selection(*dfs: pd.DataFrame, selection: str) -> list[pd.DataFrame]:
"""Get subsets of dataframes that satisfy a selection.
The selection string can be in either ROOT or numexpr form (we
ensure to convert ROOT to numexpr).
Parameters
----------
*dfs : sequence of :py:obj:`pandas.DataFrame`
Dataframes to apply the selection to.
selection : str
Selection string (in numexpr or ROOT form).
Returns
-------
list(pandas.DataFrame)
Dataframes satisfying the selection string.
Examples
--------
>>> from tdub.data import quick_files
>>> from tdub.frames import raw_dataframe, satisfying_selection
>>> qf = quick_files("/path/to/files")
>>> df_tW_DR = raw_dataframe(qf["tW_DR"])
>>> df_ttbar = raw_dataframe(qf["ttbar"])
>>> low_bdt = "(bdt_response < 0.4)"
>>> tW_DR_selected, ttbar_selected = satisfying_selection(
... dfim_tW_DR.df, dfim_ttbar.df, selection=low_bdt
... )
"""
numexprsel = selection_as_numexpr(selection)
newdfs = []
for df in dfs:
newdf = df.query(numexprsel)
newdf.selection_used = numexprsel
newdfs.append(newdf)
return newdfs
def drop_cols(df: pd.DataFrame, *cols: str) -> None:
"""Drop some columns from a dataframe.
This is a convenient function because it just ignores branches
that don't exist in the dataframe that are present in ``cols``. We
augment :py:class:`pandas.DataFrame` with this function
Parameters
----------
df : :py:obj:`pandas.DataFrame`
Dataframe which we want to slim.
*cols : sequence of strings
Columns to remove
Examples
--------
>>> import pandas as pd
>>> from tdub.data import drop_cols
>>> df = pd.read_parquet("some_file.parquet")
>>> "E_jet1" in df.columns:
True
>>> "mass_jet1" in df.columns:
True
>>> "mass_jet2" in df.columns:
True
>>> drop_cols(df, "E_jet1", "mass_jet1")
>>> "E_jet1" in df.columns:
False
>>> "mass_jet1" in df.columns:
False
>>> df.drop_cols("mass_jet2") # use augmented df class
>>> "mass_jet2" in df.columns:
False
"""
in_dataframe = set(df.columns)
in_cols = set(cols)
in_both = list(in_dataframe & in_cols)
log.debug("Dropping columns:")
for c in in_both:
log.debug(f" - {c}")
df.drop(columns=in_both, inplace=True)
def drop_avoid(df: pd.DataFrame, region: str | Region | None = None) -> None:
"""Drop columns that we avoid in classifiers.
Uses :py:func:`tdub.frames.drop_cols` with a predefined set of
columns (:py:data:`tdub.config.AVOID_IN_CLF`). We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataframe that you want to slim.
region : optional, str or tdub.data.Region
Region to augment the list of dropped columns (see the region
specific AVOID constants in the config module).
Examples
--------
>>> from tdub.frames import drop_avoid
>>> import pandas as pd
>>> df = pd.read_parquet("some_file.parquet")
>>> "E_jetL1" in df.columns:
True
>>> drop_avoid(df)
>>> "E_jetL1" in df.columns:
False
"""
to_drop = tdub.config.AVOID_IN_CLF
if region is not None:
to_drop += avoids_for(region)
drop_cols(df, *to_drop)
def drop_jet2(df: pd.DataFrame) -> None:
"""Drop all columns with jet2 properties.
In the 1j1b region we obviously don't have a second jet; so this
lets us get rid of all columns dependent on jet2 kinematic
properties. We augment :py:class:`pandas.DataFrame` with this
function.
Parameters
----------
df : pandas.DataFrame
Dataframe that we want to slim.
Examples
--------
>>> from tdub.frames import drop_jet2
>>> import pandas as pd
>>> df = pd.read_parquet("some_file.parquet")
>>> "pTsys_lep1lep2jet1jet2met" in df.columns:
True
>>> drop_jet2(df)
>>> "pTsys_lep1lep2jet1jet2met" in df.columns:
False
"""
j2cols = [col for col in df.columns if "jet2" in col]
drop_cols(df, *j2cols)
def apply_weight(
df: pd.DataFrame, weight_name: str, exclude: list[str] | None = None
) -> None:
"""Apply (multiply) a weight to all other weights in the DataFrame.
This will multiply the nominal weight and all systematic weights
in the DataFrame by the ``weight_name`` column. We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataaframe to operate on.
weight_name : str
Column name to multiple all other weight columns by.
exclude : list(str), optional
List of columns ot exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.apply_weight("weight_campaign")
"""
sys_weight_cols = [c for c in df.columns if "weight_sys" in c]
cols = ["weight_nominal"] + sys_weight_cols
if exclude is not None:
for entry in exclude:
if entry in cols:
cols.remove(entry)
if weight_name in cols:
log.warn(f"{weight_name} is in the columns list, dropping")
cols.remove(weight_name)
log.info(f"Applying {weight_name} to all weights in dataframe.")
df.loc[:, cols] = df.loc[:, cols].multiply(df.loc[:, weight_name], axis="index")
def apply_weight_inverse(
df: pd.DataFrame, weight_name: str, exclude: list[str] | None = None
) -> None:
"""Apply an inverse weight (via division) to all other weights in the DataFrame.
This will divide the nominal weight and all systematic weights in
the DataFrame by the ``weight_name`` column. We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataaframe to operate on.
weight_name : str
Column name to divide all other weight columns by.
exclude : list(str), optional
List of columns ot exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.apply_weight_inverse("weight_tptrw_tool")
"""
sys_weight_cols = [c for c in df.columns if "weight_sys" in c]
cols = ["weight_nominal"] + sys_weight_cols
if exclude is not None:
for entry in exclude:
if entry in cols:
cols.remove(entry)
if weight_name in cols:
log.warn(f"{weight_name} is in the columns list, dropping")
cols.remove(weight_name)
df.loc[:, cols] = df.loc[:, cols].divide(df.loc[:, weight_name], axis="index")
def apply_weight_campaign(df: pd.DataFrame, exclude: list[str] | None = None) -> None:
"""Multiply nominal and systematic weights by the campaign weight.
This is useful for samples that were produced without the campaign
weight term already applied to all other weights. We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataframe to operate on.
exclude : list(str), optional
List of columns to exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.weight_nominal[5]
0.003
>>> df.weight_campaign[5]
0.4
>>> df.apply_weight_campaign()
>>> df.weight_nominal[5]
0.0012
"""
apply_weight(df, "weight_campaign", exclude=exclude)
def apply_weight_tptrw(df: pd.DataFrame, exclude: list[str] | None = None) -> None:
"""Multiply nominal and systematic weights by the top pt reweight term.
This is useful for samples that were produced without the top pt
reweighting term already applied to all other weights. We augment
:py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataframe to operate on.
exclude : list(str), optional
List of columns to exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.weight_nominal[5]
0.002
>>> df.weight_tptrw_tool[5]
0.98
>>> df.apply_weight_tptrw()
>>> df.weight_nominal[5]
0.00196
"""
excludes = ["weight_sys_noreweight"]
if exclude is not None:
excludes += exclude
apply_weight(df, "weight_tptrw_tool", exclude=excludes)
def apply_weight_trrw(df: pd.DataFrame, exclude: list[str] | None = None) -> None:
"""Multiply nominal and systematic weights by the top recursive reweight term.
This is useful for samples that were produced without the top
recursive reweighting term already applied to all other weights.
We augment :py:class:`pandas.DataFrame` with this function.
Parameters
----------
df : pandas.DataFrame
Dataframe to operate on.
exclude : list(str), optional
List of columns to exclude when determining the other weight
columns to operate on.
Examples
--------
>>> import tdub.frames
>>> df = tdub.frames.raw_dataframe("/path/to/file.root")
>>> df.weight_nominal[5]
0.002
>>> df.weight_trrw_tool[5]
0.98
>>> df.apply_weight_trrw()
>>> df.weight_nominal[5]
0.00196
"""
excludes = ["weight_sys_noreweight"]
if exclude is not None:
excludes += exclude
apply_weight(df, "weight_trrw_tool", exclude=excludes)
pd.DataFrame.drop_cols = drop_cols
pd.DataFrame.drop_avoid = drop_avoid
pd.DataFrame.drop_jet2 = drop_jet2
pd.DataFrame.apply_weight = apply_weight
pd.DataFrame.apply_weight_campaign = apply_weight_campaign
pd.DataFrame.apply_weight_tptrw = apply_weight_tptrw
pd.DataFrame.apply_weight_trrw = apply_weight_trrw | 0.883902 | 0.4206 |
import json
import requests
from django.shortcuts import redirect, render
from django.core.paginator import Paginator
from django.core.exceptions import SuspiciousOperation
from django.contrib import messages
from django.shortcuts import HttpResponse
from .models import *
from .forms import *
from django.http import JsonResponse, response
from django.conf import settings
def search_person(request):
if request.method =='POST':
search_str = json.loads(request.body).get('searchText')
searchFilter =Person.objects.filter(
commune__icontains = search_str)| Person.objects.filter(
wilaya__icontains = search_str)| Person.objects.filter(
blood_type__icontains = search_str)
data = searchFilter.values()
return JsonResponse(list(data),safe=False)
else:
raise SuspiciousOperation('Invalid JSON')
def index(request):
return render(request, 'pages/home.html')
def search(request):
result = Person.objects.all()
result_paginator = Paginator(result,20)
page_num = request.GET.get('page')
page = result_paginator.get_page(page_num)
context = {
' count':result_paginator.count,
'result': Person.objects.all(),
'page': page
}
return render(request, 'pages/search.html', context)
def create(request):
userName = request.POST.get('username')
userPhone = request.POST.get('phone')
userWilaya = request.POST.get('wilaya', None)
userCommune = request.POST.get('commune', None)
userBloodtype = request.POST.get('blood_type', None)
data = Person(name=userName, phone=userPhone,
wilaya=userWilaya, commune=userCommune, blood_type=userBloodtype)
if request.method == 'POST':
capatcha_token = request.POST.get('g-recaptcha-response')
cap_data = {'secret': settings.CAPTCHA_SECRET, "response": capatcha_token}
cap_server_response = requests.post(url=settings.CAPTCHA_URL, data=cap_data)
cap_json=json.loads(cap_server_response.text)
if cap_json['success']==False:
messages.error(request,' الرجاء التحقق من reCAPTCHA ')
else :
data.save()
messages.success(request,' تم نشر إعلانك بنجاح ,بارك الله فيك ')
return render(request, 'pages/create.html') | tbr3App/views.py | import json
import requests
from django.shortcuts import redirect, render
from django.core.paginator import Paginator
from django.core.exceptions import SuspiciousOperation
from django.contrib import messages
from django.shortcuts import HttpResponse
from .models import *
from .forms import *
from django.http import JsonResponse, response
from django.conf import settings
def search_person(request):
if request.method =='POST':
search_str = json.loads(request.body).get('searchText')
searchFilter =Person.objects.filter(
commune__icontains = search_str)| Person.objects.filter(
wilaya__icontains = search_str)| Person.objects.filter(
blood_type__icontains = search_str)
data = searchFilter.values()
return JsonResponse(list(data),safe=False)
else:
raise SuspiciousOperation('Invalid JSON')
def index(request):
return render(request, 'pages/home.html')
def search(request):
result = Person.objects.all()
result_paginator = Paginator(result,20)
page_num = request.GET.get('page')
page = result_paginator.get_page(page_num)
context = {
' count':result_paginator.count,
'result': Person.objects.all(),
'page': page
}
return render(request, 'pages/search.html', context)
def create(request):
userName = request.POST.get('username')
userPhone = request.POST.get('phone')
userWilaya = request.POST.get('wilaya', None)
userCommune = request.POST.get('commune', None)
userBloodtype = request.POST.get('blood_type', None)
data = Person(name=userName, phone=userPhone,
wilaya=userWilaya, commune=userCommune, blood_type=userBloodtype)
if request.method == 'POST':
capatcha_token = request.POST.get('g-recaptcha-response')
cap_data = {'secret': settings.CAPTCHA_SECRET, "response": capatcha_token}
cap_server_response = requests.post(url=settings.CAPTCHA_URL, data=cap_data)
cap_json=json.loads(cap_server_response.text)
if cap_json['success']==False:
messages.error(request,' الرجاء التحقق من reCAPTCHA ')
else :
data.save()
messages.success(request,' تم نشر إعلانك بنجاح ,بارك الله فيك ')
return render(request, 'pages/create.html') | 0.261708 | 0.055056 |
from importlib import resources
import pandas as pd
def load_bio_kdd04(as_frame: bool = False):
"""Load and return the higly imbalanced binary classification Protein Homology
Dataset from [KDD cup 2004](https://www.kdd.org/kdd-cup/view/kdd-cup-2004/Data).
This datasets include only bio_train.dat part of the dataset
* The first element of each line is a BLOCK ID that denotes to which native sequence
this example belongs. There is a unique BLOCK ID for each native sequence.
BLOCK IDs are integers running from 1 to 303 (one for each native sequence,
i.e. for each query). BLOCK IDs were assigned before the blocks were split
into the train and test sets, so they do not run consecutively in either file.
* The second element of each line is an EXAMPLE ID that uniquely describes
the example. You will need this EXAMPLE ID and the BLOCK ID when you submit results.
* The third element is the class of the example. Proteins that are homologous to
the native sequence are denoted by 1, non-homologous proteins (i.e. decoys) by 0.
Test examples have a "?" in this position.
* All following elements are feature values. There are 74 feature values in each line.
The features describe the match (e.g. the score of a sequence alignment) between
the native protein sequence and the sequence that is tested for homology.
"""
# header_list = ["EXAMPLE_ID", "BLOCK_ID", "target"] + [str(i) for i in range(4, 78)]
with resources.path(
"pytorch_widedeep.datasets.data", "bio_train.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_adult(as_frame: bool = False):
"""Load and return the higly imbalanced binary classification [adult income datatest](http://www.cs.toronto.edu/~delve/data/adult/desc.html).
you may find detailed description [here](http://www.cs.toronto.edu/~delve/data/adult/adultDetail.html)
"""
with resources.path(
"pytorch_widedeep.datasets.data", "adult.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_ecoli(as_frame: bool = False):
"""Load and return the higly imbalanced multiclass classification e.coli dataset
Dataset from [UCI Machine learning Repository](https://archive.ics.uci.edu/ml/datasets/ecoli).
1. Title: Protein Localization Sites
2. Creator and Maintainer:
<NAME>
Institue of Molecular and Cellular Biology
Osaka, University
1-3 Yamada-oka, Suita 565 Japan
<EMAIL>
http://www.imcb.osaka-u.ac.jp/nakai/psort.html
Donor: <NAME> (<EMAIL>)
Date: September, 1996
See also: yeast database
3. Past Usage.
Reference: "A Probablistic Classification System for Predicting the Cellular
Localization Sites of Proteins", Paul Horton & Kenta Nakai,
Intelligent Systems in Molecular Biology, 109-115.
St. Louis, USA 1996.
Results: 81% for E.coli with an ad hoc structured
probability model. Also similar accuracy for Binary Decision Tree and
Bayesian Classifier methods applied by the same authors in
unpublished results.
Predicted Attribute: Localization site of protein. ( non-numeric ).
4. The references below describe a predecessor to this dataset and its
development. They also give results (not cross-validated) for classification
by a rule-based expert system with that version of the dataset.
Reference: "Expert Sytem for Predicting Protein Localization Sites in
Gram-Negative Bacteria", <NAME> & <NAME>,
PROTEINS: Structure, Function, and Genetics 11:95-110, 1991.
Reference: "A Knowledge Base for Predicting Protein Localization Sites in
Eukaryotic Cells", <NAME> & <NAME>,
Genomics 14:897-911, 1992.
5. Number of Instances: 336 for the E.coli dataset and
6. Number of Attributes.
for E.coli dataset: 8 ( 7 predictive, 1 name )
7. Attribute Information.
1. Sequence Name: Accession number for the SWISS-PROT database
2. mcg: McGeoch's method for signal sequence recognition.
3. gvh: von Heijne's method for signal sequence recognition.
4. lip: von Heijne's Signal Peptidase II consensus sequence score.
Binary attribute.
5. chg: Presence of charge on N-terminus of predicted lipoproteins.
Binary attribute.
6. aac: score of discriminant analysis of the amino acid content of
outer membrane and periplasmic proteins.
7. alm1: score of the ALOM membrane spanning region prediction program.
8. alm2: score of ALOM program after excluding putative cleavable signal
regions from the sequence.
8. Missing Attribute Values: None.
9. Class Distribution. The class is the localization site. Please see Nakai & Kanehisa referenced above for more details.
cp (cytoplasm) 143
im (inner membrane without signal sequence) 77
pp (perisplasm) 52
imU (inner membrane, uncleavable signal sequence) 35
om (outer membrane) 20
omL (outer membrane lipoprotein) 5
imL (inner membrane lipoprotein) 2
imS (inner membrane, cleavable signal sequence) 2
"""
with resources.path(
"pytorch_widedeep.datasets.data", "ecoli.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_california_housing(as_frame: bool = False):
"""Load and return the higly imbalanced regression California housing dataset.
Characteristics:
Number of Instances: 20640
Number of Attributes: 8 numeric, predictive attributes and the target
Attribute Information:
- MedInc median income in block group
- HouseAge median house age in block group
- AveRooms average number of rooms per household
- AveBedrms average number of bedrooms per household
- Population block group population
- AveOccup average number of household members
- Latitude block group latitude
- Longitude block group longitude
This dataset was obtained from the StatLib repository.
https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html
The target variable is the median house value for California districts,
expressed in hundreds of thousands of dollars ($100,000).
This dataset was derived from the 1990 U.S. census, using one row per census
block group. A block group is the smallest geographical unit for which the U.S.
Census Bureau publishes sample data (a block group typically has a population
of 600 to 3,000 people).
An household is a group of people residing within a home. Since the average
number of rooms and bedrooms in this dataset are provided per household, these
columns may take surpinsingly large values for block groups with few households
and many empty houses, such as vacation resorts.
References
----------
<NAME> and <NAME>, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
with resources.path(
"pytorch_widedeep.datasets.data", "california_housing.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_birds(as_frame: bool = False):
"""Load and return the multi-label classification bird dataset.
References
----------
http://mulan.sourceforge.net/datasets-mlc.html
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME> <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"The 9th annual MLSP competition: New methods for acoustic classification of multiple
simultaneous bird species in a noisy environment", in proc. 2013 IEEE International Workshop
on Machine Learning for Signal Processing (MLSP)
"""
with resources.path(
"pytorch_widedeep.datasets.data", "birds_train.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_rf1(as_frame: bool = False):
"""Load and return the multi-target regression River Flow(RF1) dataset.
Characterisctics:
The river flow data set (RF1) concerns a prediction task in which flows in a river network are
predicted for 48 hours in the future at 8 different locations in the Mississippi River network
in the United States [18]. RF1 is one of the multi-target regression problems listed in the
literature survey on multi-target regression problems by Borchani et al. [2], and therefore
serves as a good test case for the active learning algorithm. Each row includes the most recent
observation for each of the 8 sites as well as time-lagged observations from 6, 12, 18, 24, 36,
48 and 60 hours in the past. Therefore, the data set consists in total of 64 attribute variables
and 8 target variables. The data set contains over 1 year of hourly observations (over 9000
data points) collected from September 2011 to September 2012 by the US National Weather
Service. From these 9000 data points, 1000 points have been randomly sampled for training
and 2000 for evaluation.
"""
with resources.path(
"pytorch_widedeep.datasets.data", "rf1_train.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_womens_ecommerce(as_frame: bool = False):
"""
Context
This is a Women’s Clothing E-Commerce dataset revolving around the reviews written by customers.
Its nine supportive features offer a great environment to parse out the text through its multiple
dimensions. Because this is real commercial data, it has been anonymized, and references to the company
in the review text and body have been replaced with “retailer”.
Content
This dataset includes 23486 rows and 10 feature variables. Each row corresponds to a customer review,
and includes the variables:
Clothing ID: Integer Categorical variable that refers to the specific piece being reviewed.
Age: Positive Integer variable of the reviewers age.
Title: String variable for the title of the review.
Review Text: String variable for the review body.
Rating: Positive Ordinal Integer variable for the product score granted by the customer from
1 Worst, to 5 Best.
Recommended IND: Binary variable stating where the customer recommends the product where 1 is recommended,
0 is not recommended.
Positive Feedback Count: Positive Integer documenting the number of other customers who found this
review positive.
Division Name: Categorical name of the product high level division.
Department Name: Categorical name of the product department name.
Class Name: Categorical name of the product class name.
"""
with resources.path(
"pytorch_widedeep.datasets.data",
"WomensClothingE-CommerceReviews.parquet.brotli",
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy() | pytorch_widedeep/datasets/_base.py | from importlib import resources
import pandas as pd
def load_bio_kdd04(as_frame: bool = False):
"""Load and return the higly imbalanced binary classification Protein Homology
Dataset from [KDD cup 2004](https://www.kdd.org/kdd-cup/view/kdd-cup-2004/Data).
This datasets include only bio_train.dat part of the dataset
* The first element of each line is a BLOCK ID that denotes to which native sequence
this example belongs. There is a unique BLOCK ID for each native sequence.
BLOCK IDs are integers running from 1 to 303 (one for each native sequence,
i.e. for each query). BLOCK IDs were assigned before the blocks were split
into the train and test sets, so they do not run consecutively in either file.
* The second element of each line is an EXAMPLE ID that uniquely describes
the example. You will need this EXAMPLE ID and the BLOCK ID when you submit results.
* The third element is the class of the example. Proteins that are homologous to
the native sequence are denoted by 1, non-homologous proteins (i.e. decoys) by 0.
Test examples have a "?" in this position.
* All following elements are feature values. There are 74 feature values in each line.
The features describe the match (e.g. the score of a sequence alignment) between
the native protein sequence and the sequence that is tested for homology.
"""
# header_list = ["EXAMPLE_ID", "BLOCK_ID", "target"] + [str(i) for i in range(4, 78)]
with resources.path(
"pytorch_widedeep.datasets.data", "bio_train.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_adult(as_frame: bool = False):
"""Load and return the higly imbalanced binary classification [adult income datatest](http://www.cs.toronto.edu/~delve/data/adult/desc.html).
you may find detailed description [here](http://www.cs.toronto.edu/~delve/data/adult/adultDetail.html)
"""
with resources.path(
"pytorch_widedeep.datasets.data", "adult.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_ecoli(as_frame: bool = False):
"""Load and return the higly imbalanced multiclass classification e.coli dataset
Dataset from [UCI Machine learning Repository](https://archive.ics.uci.edu/ml/datasets/ecoli).
1. Title: Protein Localization Sites
2. Creator and Maintainer:
<NAME>
Institue of Molecular and Cellular Biology
Osaka, University
1-3 Yamada-oka, Suita 565 Japan
<EMAIL>
http://www.imcb.osaka-u.ac.jp/nakai/psort.html
Donor: <NAME> (<EMAIL>)
Date: September, 1996
See also: yeast database
3. Past Usage.
Reference: "A Probablistic Classification System for Predicting the Cellular
Localization Sites of Proteins", Paul Horton & Kenta Nakai,
Intelligent Systems in Molecular Biology, 109-115.
St. Louis, USA 1996.
Results: 81% for E.coli with an ad hoc structured
probability model. Also similar accuracy for Binary Decision Tree and
Bayesian Classifier methods applied by the same authors in
unpublished results.
Predicted Attribute: Localization site of protein. ( non-numeric ).
4. The references below describe a predecessor to this dataset and its
development. They also give results (not cross-validated) for classification
by a rule-based expert system with that version of the dataset.
Reference: "Expert Sytem for Predicting Protein Localization Sites in
Gram-Negative Bacteria", <NAME> & <NAME>,
PROTEINS: Structure, Function, and Genetics 11:95-110, 1991.
Reference: "A Knowledge Base for Predicting Protein Localization Sites in
Eukaryotic Cells", <NAME> & <NAME>,
Genomics 14:897-911, 1992.
5. Number of Instances: 336 for the E.coli dataset and
6. Number of Attributes.
for E.coli dataset: 8 ( 7 predictive, 1 name )
7. Attribute Information.
1. Sequence Name: Accession number for the SWISS-PROT database
2. mcg: McGeoch's method for signal sequence recognition.
3. gvh: von Heijne's method for signal sequence recognition.
4. lip: von Heijne's Signal Peptidase II consensus sequence score.
Binary attribute.
5. chg: Presence of charge on N-terminus of predicted lipoproteins.
Binary attribute.
6. aac: score of discriminant analysis of the amino acid content of
outer membrane and periplasmic proteins.
7. alm1: score of the ALOM membrane spanning region prediction program.
8. alm2: score of ALOM program after excluding putative cleavable signal
regions from the sequence.
8. Missing Attribute Values: None.
9. Class Distribution. The class is the localization site. Please see Nakai & Kanehisa referenced above for more details.
cp (cytoplasm) 143
im (inner membrane without signal sequence) 77
pp (perisplasm) 52
imU (inner membrane, uncleavable signal sequence) 35
om (outer membrane) 20
omL (outer membrane lipoprotein) 5
imL (inner membrane lipoprotein) 2
imS (inner membrane, cleavable signal sequence) 2
"""
with resources.path(
"pytorch_widedeep.datasets.data", "ecoli.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_california_housing(as_frame: bool = False):
"""Load and return the higly imbalanced regression California housing dataset.
Characteristics:
Number of Instances: 20640
Number of Attributes: 8 numeric, predictive attributes and the target
Attribute Information:
- MedInc median income in block group
- HouseAge median house age in block group
- AveRooms average number of rooms per household
- AveBedrms average number of bedrooms per household
- Population block group population
- AveOccup average number of household members
- Latitude block group latitude
- Longitude block group longitude
This dataset was obtained from the StatLib repository.
https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html
The target variable is the median house value for California districts,
expressed in hundreds of thousands of dollars ($100,000).
This dataset was derived from the 1990 U.S. census, using one row per census
block group. A block group is the smallest geographical unit for which the U.S.
Census Bureau publishes sample data (a block group typically has a population
of 600 to 3,000 people).
An household is a group of people residing within a home. Since the average
number of rooms and bedrooms in this dataset are provided per household, these
columns may take surpinsingly large values for block groups with few households
and many empty houses, such as vacation resorts.
References
----------
<NAME> and <NAME>, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
with resources.path(
"pytorch_widedeep.datasets.data", "california_housing.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_birds(as_frame: bool = False):
"""Load and return the multi-label classification bird dataset.
References
----------
http://mulan.sourceforge.net/datasets-mlc.html
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME> <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"The 9th annual MLSP competition: New methods for acoustic classification of multiple
simultaneous bird species in a noisy environment", in proc. 2013 IEEE International Workshop
on Machine Learning for Signal Processing (MLSP)
"""
with resources.path(
"pytorch_widedeep.datasets.data", "birds_train.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_rf1(as_frame: bool = False):
"""Load and return the multi-target regression River Flow(RF1) dataset.
Characterisctics:
The river flow data set (RF1) concerns a prediction task in which flows in a river network are
predicted for 48 hours in the future at 8 different locations in the Mississippi River network
in the United States [18]. RF1 is one of the multi-target regression problems listed in the
literature survey on multi-target regression problems by Borchani et al. [2], and therefore
serves as a good test case for the active learning algorithm. Each row includes the most recent
observation for each of the 8 sites as well as time-lagged observations from 6, 12, 18, 24, 36,
48 and 60 hours in the past. Therefore, the data set consists in total of 64 attribute variables
and 8 target variables. The data set contains over 1 year of hourly observations (over 9000
data points) collected from September 2011 to September 2012 by the US National Weather
Service. From these 9000 data points, 1000 points have been randomly sampled for training
and 2000 for evaluation.
"""
with resources.path(
"pytorch_widedeep.datasets.data", "rf1_train.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy()
def load_womens_ecommerce(as_frame: bool = False):
"""
Context
This is a Women’s Clothing E-Commerce dataset revolving around the reviews written by customers.
Its nine supportive features offer a great environment to parse out the text through its multiple
dimensions. Because this is real commercial data, it has been anonymized, and references to the company
in the review text and body have been replaced with “retailer”.
Content
This dataset includes 23486 rows and 10 feature variables. Each row corresponds to a customer review,
and includes the variables:
Clothing ID: Integer Categorical variable that refers to the specific piece being reviewed.
Age: Positive Integer variable of the reviewers age.
Title: String variable for the title of the review.
Review Text: String variable for the review body.
Rating: Positive Ordinal Integer variable for the product score granted by the customer from
1 Worst, to 5 Best.
Recommended IND: Binary variable stating where the customer recommends the product where 1 is recommended,
0 is not recommended.
Positive Feedback Count: Positive Integer documenting the number of other customers who found this
review positive.
Division Name: Categorical name of the product high level division.
Department Name: Categorical name of the product department name.
Class Name: Categorical name of the product class name.
"""
with resources.path(
"pytorch_widedeep.datasets.data",
"WomensClothingE-CommerceReviews.parquet.brotli",
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy() | 0.777215 | 0.73053 |
from icarus.clients.superclient import SuperClient
import speech_recognition as sr
from gtts import gTTS
import os
import platform
from playsound import playsound
from icarus.logging import icarus_logger
try:
from icarus.clients.WakeWordEngines.porcupine import Porcupine
except OSError:
icarus_logger.warning('Tried using porcupine with windows')
PLING_MP3 = os.path.join(os.path.dirname(__file__), '../resources/pling.mp3')
class SpeechClient(SuperClient):
sensitivity = None
handle = None
pa = None
audio_stream = None
wake_word_handler = None
def __init__(self, skill_handler, persistence):
if platform.system() == 'Windows':
raise OSError
super().__init__(skill_handler, persistence)
def setup(self):
self.wake_word_handler = Porcupine()
def run(self):
self.setup()
while True:
self.wake_word_handler.monitor_audio(self.stt)
@staticmethod
def _play_init():
try:
playsound(PLING_MP3)
except ModuleNotFoundError:
# arch has a problem with python-gobject, using mpg123 as fallback
os.system(f"mpg123 {os.path.dirname(__file__)}/../resources/pling.mp3 >/dev/null 2>&1")
def stt(self):
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, 0.5)
self._play_init()
print("Speak:")
audio = r.listen(source, timeout=2, phrase_time_limit=4)
try:
result = r.recognize_google(audio)
print("You said " + result)
self._queue_new_message(result)
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
def send(self, message: str, client_attr):
if self.persistence.get_config('SpeechClient', 'morse') == 'true':
message = SpeechClient._message2morse(message)
tts = gTTS(text=message, lang='en')
tts.save(f"{os.path.dirname(__file__)}/../resources/tts_message.mp3")
if platform.system().lower() == 'windows':
playsound(f"{os.path.dirname(__file__)}/../resources/tts_message.mp3")
else:
os.system(f"mpg123 {os.path.dirname(__file__)}/../resources/tts_message.mp3") # >/dev/null 2>&1")
if os.path.isfile(f"{os.path.dirname(__file__)}/../resources/tts_message.mp3"):
os.remove(f"{os.path.dirname(__file__)}/../resources/tts_message.mp3")
@staticmethod
def _message2morse(message):
# Dictionary representing the morse code chart
MORSE_CODE_DICT = { 'A':'.-', 'B':'-...',
'C':'-.-.', 'D':'-..', 'E':'.',
'F':'..-.', 'G':'--.', 'H':'....',
'I':'..', 'J':'.---', 'K':'-.-',
'L':'.-..', 'M':'--', 'N':'-.',
'O':'---', 'P':'.--.', 'Q':'--.-',
'R':'.-.', 'S':'...', 'T':'-',
'U':'..-', 'V':'...-', 'W':'.--',
'X':'-..-', 'Y':'-.--', 'Z':'--..',
'1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.',
'0':'-----', ', ':'--..--', '.':'.-.-.-',
'?':'..--..', '/':'-..-.', '-':'-....-',
'(':'-.--.', ')':'-.--.-'}
morse = ''
for letter in message.upper():
if letter == ' ':
morse += ' '
elif letter in MORSE_CODE_DICT:
morse += MORSE_CODE_DICT[letter] + ' '
else:
morse += ''
morse = morse.replace('.', "Beep")
morse = morse.replace('_', "Beeeeeeep")
return morse | icarus/clients/speechclient.py | from icarus.clients.superclient import SuperClient
import speech_recognition as sr
from gtts import gTTS
import os
import platform
from playsound import playsound
from icarus.logging import icarus_logger
try:
from icarus.clients.WakeWordEngines.porcupine import Porcupine
except OSError:
icarus_logger.warning('Tried using porcupine with windows')
PLING_MP3 = os.path.join(os.path.dirname(__file__), '../resources/pling.mp3')
class SpeechClient(SuperClient):
sensitivity = None
handle = None
pa = None
audio_stream = None
wake_word_handler = None
def __init__(self, skill_handler, persistence):
if platform.system() == 'Windows':
raise OSError
super().__init__(skill_handler, persistence)
def setup(self):
self.wake_word_handler = Porcupine()
def run(self):
self.setup()
while True:
self.wake_word_handler.monitor_audio(self.stt)
@staticmethod
def _play_init():
try:
playsound(PLING_MP3)
except ModuleNotFoundError:
# arch has a problem with python-gobject, using mpg123 as fallback
os.system(f"mpg123 {os.path.dirname(__file__)}/../resources/pling.mp3 >/dev/null 2>&1")
def stt(self):
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, 0.5)
self._play_init()
print("Speak:")
audio = r.listen(source, timeout=2, phrase_time_limit=4)
try:
result = r.recognize_google(audio)
print("You said " + result)
self._queue_new_message(result)
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
def send(self, message: str, client_attr):
if self.persistence.get_config('SpeechClient', 'morse') == 'true':
message = SpeechClient._message2morse(message)
tts = gTTS(text=message, lang='en')
tts.save(f"{os.path.dirname(__file__)}/../resources/tts_message.mp3")
if platform.system().lower() == 'windows':
playsound(f"{os.path.dirname(__file__)}/../resources/tts_message.mp3")
else:
os.system(f"mpg123 {os.path.dirname(__file__)}/../resources/tts_message.mp3") # >/dev/null 2>&1")
if os.path.isfile(f"{os.path.dirname(__file__)}/../resources/tts_message.mp3"):
os.remove(f"{os.path.dirname(__file__)}/../resources/tts_message.mp3")
@staticmethod
def _message2morse(message):
# Dictionary representing the morse code chart
MORSE_CODE_DICT = { 'A':'.-', 'B':'-...',
'C':'-.-.', 'D':'-..', 'E':'.',
'F':'..-.', 'G':'--.', 'H':'....',
'I':'..', 'J':'.---', 'K':'-.-',
'L':'.-..', 'M':'--', 'N':'-.',
'O':'---', 'P':'.--.', 'Q':'--.-',
'R':'.-.', 'S':'...', 'T':'-',
'U':'..-', 'V':'...-', 'W':'.--',
'X':'-..-', 'Y':'-.--', 'Z':'--..',
'1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.',
'0':'-----', ', ':'--..--', '.':'.-.-.-',
'?':'..--..', '/':'-..-.', '-':'-....-',
'(':'-.--.', ')':'-.--.-'}
morse = ''
for letter in message.upper():
if letter == ' ':
morse += ' '
elif letter in MORSE_CODE_DICT:
morse += MORSE_CODE_DICT[letter] + ' '
else:
morse += ''
morse = morse.replace('.', "Beep")
morse = morse.replace('_', "Beeeeeeep")
return morse | 0.272315 | 0.064772 |
#!/usr/bin/env python
import csv
import sys
import os
import datetime
import random
import re
import datetime
import logging
import openpyxl
from optparse import OptionParser
class GraphExcel:
def __init__(self, inputFilename, verbose=False):
self.logger = logging.getLogger('graph_excel')
self.rowSize = 0
self.colSize = 0
self.header = []
self.data = []
self.verbose = verbose
if '.csv' in inputFilename[-4:].lower():
self.logger.info('Processing CSV {0}'.format(inputFilename))
self.processCSV(inputFilename)
elif '.xls' in inputFilename[-4:].lower():
self.logger.info('Processing Excel {0}'.format(inputFilename))
self.processExcel(inputFilename)
elif '.xlsx' in inputFilename[-5:].lower():
self.logger.info('Processing Excel {0}'.format(inputFilename))
self.processExcel(inputFilename)
else:
print 'Require valid Excel or CSV input File'
self.logger.error('Require valid Excel or CSV input File')
def getHTML(self):
return 'test'
def processCSV(self, inputFilename):
inputFile = open(inputFilename, 'r')
inputCSV = csv.reader(inputFile)
index = 0
for row in inputCSV:
if index == 0:
self.header = row
self.colSize = len(self.header)
else:
self.data.append(row)
index += 1
self.rowSize = index
def processExcel(self, inputFilename):
# Read in the Excel file as a list of lists
workbook = openpyxl.load_workbook(inputFilename, data_only=True)
worksheet = workbook.worksheets[0]
self.rowSize = worksheet.max_row
self.colSize = worksheet.max_column
for rowIndex in xrange(1, self.rowSize+1):
row = []
for colIndex in xrange(1, self.colSize+1):
if worksheet.cell(row=rowIndex, column=colIndex).value is not None:
element = worksheet.cell(row=rowIndex, column=colIndex).value
row.append(element)
if rowIndex == 1:
self.header = row
else:
self.data.append(row)
def getRows(self):
return self.rowSize
def getCols(self):
return self.colSize
def getHeader(self):
return self.header
def getData(self):
return self.data
def getRandomColours(self):
coloursList = []
for element in self.header:
coloursList.append(self.getRandomHex())
return coloursList
def getRandomHex(self):
digits = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
randomColour = ''
for x in xrange(0,6):
index = random.randint(0,15)
randomColour += digits[index]
return randomColour
def main(argv):
parser = OptionParser(usage="Usage: Excel2HTML <input-filename>")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verboseFlag",
default=False,
help="Verbose output from the script")
(options, filename) = parser.parse_args()
if len(filename) != 1 or not os.path.isfile(filename[0]) :
print parser.print_help()
exit(1)
excel2HTML = Excel2HTML(options.verboseFlag, filename[0])
print excel2HTML.getHTML()
if __name__ == "__main__":
sys.exit(main(sys.argv)) | src/GraphExcel.py |
#!/usr/bin/env python
import csv
import sys
import os
import datetime
import random
import re
import datetime
import logging
import openpyxl
from optparse import OptionParser
class GraphExcel:
def __init__(self, inputFilename, verbose=False):
self.logger = logging.getLogger('graph_excel')
self.rowSize = 0
self.colSize = 0
self.header = []
self.data = []
self.verbose = verbose
if '.csv' in inputFilename[-4:].lower():
self.logger.info('Processing CSV {0}'.format(inputFilename))
self.processCSV(inputFilename)
elif '.xls' in inputFilename[-4:].lower():
self.logger.info('Processing Excel {0}'.format(inputFilename))
self.processExcel(inputFilename)
elif '.xlsx' in inputFilename[-5:].lower():
self.logger.info('Processing Excel {0}'.format(inputFilename))
self.processExcel(inputFilename)
else:
print 'Require valid Excel or CSV input File'
self.logger.error('Require valid Excel or CSV input File')
def getHTML(self):
return 'test'
def processCSV(self, inputFilename):
inputFile = open(inputFilename, 'r')
inputCSV = csv.reader(inputFile)
index = 0
for row in inputCSV:
if index == 0:
self.header = row
self.colSize = len(self.header)
else:
self.data.append(row)
index += 1
self.rowSize = index
def processExcel(self, inputFilename):
# Read in the Excel file as a list of lists
workbook = openpyxl.load_workbook(inputFilename, data_only=True)
worksheet = workbook.worksheets[0]
self.rowSize = worksheet.max_row
self.colSize = worksheet.max_column
for rowIndex in xrange(1, self.rowSize+1):
row = []
for colIndex in xrange(1, self.colSize+1):
if worksheet.cell(row=rowIndex, column=colIndex).value is not None:
element = worksheet.cell(row=rowIndex, column=colIndex).value
row.append(element)
if rowIndex == 1:
self.header = row
else:
self.data.append(row)
def getRows(self):
return self.rowSize
def getCols(self):
return self.colSize
def getHeader(self):
return self.header
def getData(self):
return self.data
def getRandomColours(self):
coloursList = []
for element in self.header:
coloursList.append(self.getRandomHex())
return coloursList
def getRandomHex(self):
digits = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
randomColour = ''
for x in xrange(0,6):
index = random.randint(0,15)
randomColour += digits[index]
return randomColour
def main(argv):
parser = OptionParser(usage="Usage: Excel2HTML <input-filename>")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verboseFlag",
default=False,
help="Verbose output from the script")
(options, filename) = parser.parse_args()
if len(filename) != 1 or not os.path.isfile(filename[0]) :
print parser.print_help()
exit(1)
excel2HTML = Excel2HTML(options.verboseFlag, filename[0])
print excel2HTML.getHTML()
if __name__ == "__main__":
sys.exit(main(sys.argv)) | 0.165458 | 0.10393 |
import time
from syndicate.commons.log_helper import get_logger
from syndicate.core import ClientError
from syndicate.core.helper import unpack_kwargs
from syndicate.core.resources.base_resource import BaseResource
from syndicate.core.resources.helper import build_description_obj
_LOG = get_logger('syndicate.core.resources.kinesis_resource')
class KinesisResource(BaseResource):
def __init__(self, kin_conn) -> None:
self.kin_conn = kin_conn
def create_kinesis_stream(self, args):
return self.create_pool(self._create_kinesis_stream_from_meta, args)
def remove_kinesis_streams(self, args):
self.create_pool(self._remove_kinesis_stream, args)
@unpack_kwargs
def _remove_kinesis_stream(self, arn, config):
stream_name = config['resource_name']
try:
self.kin_conn.remove_stream(stream_name=stream_name)
_LOG.info('Kinesis stream %s was removed.', stream_name)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
_LOG.warn('Kinesis stream %s is not found', stream_name)
else:
raise e
@unpack_kwargs
def _create_kinesis_stream_from_meta(self, name, meta):
response = self.kin_conn.get_stream(name)
if response:
stream_status = response['StreamDescription']['StreamStatus']
if stream_status == 'DELETING':
_LOG.debug('Waiting for deletion kinesis stream %s...', name)
time.sleep(120)
else:
_LOG.warn('%s kinesis stream exists', name)
return {
response['StreamARN']: build_description_obj(response,
name, meta)
}
self.kin_conn.create_stream(stream_name=name,
shard_count=meta['shard_count'])
_LOG.info('Created kinesis stream %s.', name)
return self.describe_kinesis_stream(name=name, meta=meta)
def describe_kinesis_stream(self, name, meta):
response = self.kin_conn.get_stream(name)
return {
response['StreamARN']: build_description_obj(response, name, meta)
} | syndicate/core/resources/kinesis_resource.py | import time
from syndicate.commons.log_helper import get_logger
from syndicate.core import ClientError
from syndicate.core.helper import unpack_kwargs
from syndicate.core.resources.base_resource import BaseResource
from syndicate.core.resources.helper import build_description_obj
_LOG = get_logger('syndicate.core.resources.kinesis_resource')
class KinesisResource(BaseResource):
def __init__(self, kin_conn) -> None:
self.kin_conn = kin_conn
def create_kinesis_stream(self, args):
return self.create_pool(self._create_kinesis_stream_from_meta, args)
def remove_kinesis_streams(self, args):
self.create_pool(self._remove_kinesis_stream, args)
@unpack_kwargs
def _remove_kinesis_stream(self, arn, config):
stream_name = config['resource_name']
try:
self.kin_conn.remove_stream(stream_name=stream_name)
_LOG.info('Kinesis stream %s was removed.', stream_name)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
_LOG.warn('Kinesis stream %s is not found', stream_name)
else:
raise e
@unpack_kwargs
def _create_kinesis_stream_from_meta(self, name, meta):
response = self.kin_conn.get_stream(name)
if response:
stream_status = response['StreamDescription']['StreamStatus']
if stream_status == 'DELETING':
_LOG.debug('Waiting for deletion kinesis stream %s...', name)
time.sleep(120)
else:
_LOG.warn('%s kinesis stream exists', name)
return {
response['StreamARN']: build_description_obj(response,
name, meta)
}
self.kin_conn.create_stream(stream_name=name,
shard_count=meta['shard_count'])
_LOG.info('Created kinesis stream %s.', name)
return self.describe_kinesis_stream(name=name, meta=meta)
def describe_kinesis_stream(self, name, meta):
response = self.kin_conn.get_stream(name)
return {
response['StreamARN']: build_description_obj(response, name, meta)
} | 0.416678 | 0.081264 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from .models import Snack
class ThingTests(TestCase) :
def setUp(self):
self.user = get_user_model().objects.create(
username = "tester", email = "<EMAIL>", password = "<PASSWORD>"
)
self.snack = Snack.objects.create(
name="banana", description = "Healthy Fruits" , purchaser = self.user,
)
def test_string_representation(self):
self.assertEqual(str(self.snack), "banana")
def test_thing_content(self):
self.assertEqual(f"{self.snack.name}", "banana")
self.assertEqual(f"{self.snack.purchaser}", "tester")
self.assertEqual(self.snack.description, "Healthy Fruits")
def test_thing_list_view(self):
response = self.client.get(reverse("snack_list"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "banana")
self.assertTemplateUsed(response, "snack_list.html")
def test_thing_detail_view(self):
response = self.client.get(reverse("snack_detail", args="1"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "snack_detail.html")
def test_thing_create_view(self):
response = self.client.post(
reverse("snack_create"),
{
"name": "Apple slices with peanut butter",
"description": "Apples and peanut butter are a match made in heaven — both nutritionally and flavor-wise.On one hand, apples are a fiber-rich fruit. On the other hand, peanuts provide healthy fats, plant-based protein, and fiber — pretty much all of the filling nutrients you should look for in a snack",
"purchaser": self.user.id,
}, follow=True
)
self.assertRedirects(response, reverse("snack_detail", args="2"))
def test_thing_update_view_redirect(self):
response = self.client.post(
reverse("snack_update", args="1"),
{"name" : "<NAME>", "description" : "Any Thing", "purchaser" : self.user.id}
)
self.assertRedirects(response, reverse("snack_detail", args="1"))
def test_thing_delete_view(self):
response = self.client.get(reverse("snack_delete", args="1"))
self.assertEqual(response.status_code, 200) | snacks/tests.py | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from .models import Snack
class ThingTests(TestCase) :
def setUp(self):
self.user = get_user_model().objects.create(
username = "tester", email = "<EMAIL>", password = "<PASSWORD>"
)
self.snack = Snack.objects.create(
name="banana", description = "Healthy Fruits" , purchaser = self.user,
)
def test_string_representation(self):
self.assertEqual(str(self.snack), "banana")
def test_thing_content(self):
self.assertEqual(f"{self.snack.name}", "banana")
self.assertEqual(f"{self.snack.purchaser}", "tester")
self.assertEqual(self.snack.description, "Healthy Fruits")
def test_thing_list_view(self):
response = self.client.get(reverse("snack_list"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "banana")
self.assertTemplateUsed(response, "snack_list.html")
def test_thing_detail_view(self):
response = self.client.get(reverse("snack_detail", args="1"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "snack_detail.html")
def test_thing_create_view(self):
response = self.client.post(
reverse("snack_create"),
{
"name": "Apple slices with peanut butter",
"description": "Apples and peanut butter are a match made in heaven — both nutritionally and flavor-wise.On one hand, apples are a fiber-rich fruit. On the other hand, peanuts provide healthy fats, plant-based protein, and fiber — pretty much all of the filling nutrients you should look for in a snack",
"purchaser": self.user.id,
}, follow=True
)
self.assertRedirects(response, reverse("snack_detail", args="2"))
def test_thing_update_view_redirect(self):
response = self.client.post(
reverse("snack_update", args="1"),
{"name" : "<NAME>", "description" : "Any Thing", "purchaser" : self.user.id}
)
self.assertRedirects(response, reverse("snack_detail", args="1"))
def test_thing_delete_view(self):
response = self.client.get(reverse("snack_delete", args="1"))
self.assertEqual(response.status_code, 200) | 0.527317 | 0.267815 |
import numpy as np
import matplotlib.image as io
import matplotlib.pyplot as plt
alpha = 30
seedX = 998244353
seedY = 1000000007
''' First alpha = 5
oriFile = "qrcode.png"
waterMarkFile = "testwm.png"
outFile = "watermarked.png"
'''
''' Second alpha = 30
oriFile = "test.png"
waterMarkFile = "website.png"
outFile = "test_wmed.png"
'''
ori = io.imread(oriFile)
waterMark = io.imread(waterMarkFile)
ori = ori[:, :, :3]
waterMark = waterMark[:, :, :3]
oriN, oriM, oriA = ori.shape
watermarkN, watermarkM, watermarkR = waterMark.shape
# Randomlize
TH = np.zeros((oriN // 2, oriM, oriA))
TH1 = np.zeros((oriN // 2, oriM, oriA))
TH1[0 : watermarkN, 0 : watermarkM, :] = waterMark.copy()
THN, THM, THR = TH.shape
plt.imshow(TH1)
plt.show()
randX = np.arange(THN)
np.random.seed(seedX)
randX = np.random.permutation(randX)
randY = np.arange(THM)
np.random.seed(seedY)
randY = np.random.permutation(randY)
for i in range(0, THN):
for j in range(0, THM):
for k in range(0, THR):
TH[i][j][k] = TH1[randX[i]][randY[j]][k].copy()
plt.imshow(TH)
plt.show()
# Make symmetric
_waterMark = np.zeros((oriN, oriM, oriA))
_waterMark[0 : THN, 0 : THM, :] = TH.copy()
for i in range(0, THN):
for j in range(0, THM):
for k in range(0, THR):
_waterMark[oriN - i - 1][oriM - j - 1][k] = TH[i][j][k].copy()
plt.imshow(_waterMark)
plt.show()
# Do FFT
oriR = np.fft.fft2(ori[:, :, 0])
oriG = np.fft.fft2(ori[:, :, 1])
oriB = np.fft.fft2(ori[:, :, 2])
_waterMarkedR = oriR + alpha * _waterMark[:, :, 0]
_waterMarkedG = oriG + alpha * _waterMark[:, :, 1]
_waterMarkedB = oriB + alpha * _waterMark[:, :, 2]
waterMarkedR = np.fft.ifft2(_waterMarkedR)
waterMarkedG = np.fft.ifft2(_waterMarkedG)
waterMarkedB = np.fft.ifft2(_waterMarkedB)
waterMarked = np.zeros((oriN, oriM, oriA + 1))
waterMarked[:, :, 0] = np.real(waterMarkedR).copy()
waterMarked[:, :, 1] = np.real(waterMarkedG).copy()
waterMarked[:, :, 2] = np.real(waterMarkedB).copy()
waterMarked[:, :, 3] = np.ones((oriN, oriM))
waterMarked = np.clip(waterMarked, 0, 1)
plt.imshow(waterMarked)
plt.show()
io.imsave(outFile, waterMarked)
# Decode instantly
_waterMarkedR = np.fft.fft2(waterMarkedR)
_waterMarkedG = np.fft.fft2(waterMarkedG)
_waterMarkedB = np.fft.fft2(waterMarkedB)
oriR = np.fft.fft2(ori[:, :, 0])
oriG = np.fft.fft2(ori[:, :, 1])
oriB = np.fft.fft2(ori[:, :, 2])
_waterMark = np.zeros((oriN, oriM, oriA))
_waterMark[:, :, 0] = (_waterMarkedR - oriR) / alpha
_waterMark[:, :, 1] = (_waterMarkedG - oriG) / alpha
_waterMark[:, :, 2] = (_waterMarkedB - oriB) / alpha
_waterMark = np.real(_waterMark)
waterMarkOut = np.zeros((oriN, oriM, oriA))
for i in range(0, THN):
for j in range(0, THM):
for k in range(0, THR):
waterMarkOut[randX[i], randY[j], k] = _waterMark[i, j, k]
for i in range(0, THN):
for j in range(0, THM):
waterMarkOut[oriN - i - 1, oriM - j - 1, :] = waterMarkOut[i, j, :].copy()
plt.imshow(waterMarkOut)
plt.show() | BlindWatermarkDFT/encode.py | import numpy as np
import matplotlib.image as io
import matplotlib.pyplot as plt
alpha = 30
seedX = 998244353
seedY = 1000000007
''' First alpha = 5
oriFile = "qrcode.png"
waterMarkFile = "testwm.png"
outFile = "watermarked.png"
'''
''' Second alpha = 30
oriFile = "test.png"
waterMarkFile = "website.png"
outFile = "test_wmed.png"
'''
ori = io.imread(oriFile)
waterMark = io.imread(waterMarkFile)
ori = ori[:, :, :3]
waterMark = waterMark[:, :, :3]
oriN, oriM, oriA = ori.shape
watermarkN, watermarkM, watermarkR = waterMark.shape
# Randomlize
TH = np.zeros((oriN // 2, oriM, oriA))
TH1 = np.zeros((oriN // 2, oriM, oriA))
TH1[0 : watermarkN, 0 : watermarkM, :] = waterMark.copy()
THN, THM, THR = TH.shape
plt.imshow(TH1)
plt.show()
randX = np.arange(THN)
np.random.seed(seedX)
randX = np.random.permutation(randX)
randY = np.arange(THM)
np.random.seed(seedY)
randY = np.random.permutation(randY)
for i in range(0, THN):
for j in range(0, THM):
for k in range(0, THR):
TH[i][j][k] = TH1[randX[i]][randY[j]][k].copy()
plt.imshow(TH)
plt.show()
# Make symmetric
_waterMark = np.zeros((oriN, oriM, oriA))
_waterMark[0 : THN, 0 : THM, :] = TH.copy()
for i in range(0, THN):
for j in range(0, THM):
for k in range(0, THR):
_waterMark[oriN - i - 1][oriM - j - 1][k] = TH[i][j][k].copy()
plt.imshow(_waterMark)
plt.show()
# Do FFT
oriR = np.fft.fft2(ori[:, :, 0])
oriG = np.fft.fft2(ori[:, :, 1])
oriB = np.fft.fft2(ori[:, :, 2])
_waterMarkedR = oriR + alpha * _waterMark[:, :, 0]
_waterMarkedG = oriG + alpha * _waterMark[:, :, 1]
_waterMarkedB = oriB + alpha * _waterMark[:, :, 2]
waterMarkedR = np.fft.ifft2(_waterMarkedR)
waterMarkedG = np.fft.ifft2(_waterMarkedG)
waterMarkedB = np.fft.ifft2(_waterMarkedB)
waterMarked = np.zeros((oriN, oriM, oriA + 1))
waterMarked[:, :, 0] = np.real(waterMarkedR).copy()
waterMarked[:, :, 1] = np.real(waterMarkedG).copy()
waterMarked[:, :, 2] = np.real(waterMarkedB).copy()
waterMarked[:, :, 3] = np.ones((oriN, oriM))
waterMarked = np.clip(waterMarked, 0, 1)
plt.imshow(waterMarked)
plt.show()
io.imsave(outFile, waterMarked)
# Decode instantly
_waterMarkedR = np.fft.fft2(waterMarkedR)
_waterMarkedG = np.fft.fft2(waterMarkedG)
_waterMarkedB = np.fft.fft2(waterMarkedB)
oriR = np.fft.fft2(ori[:, :, 0])
oriG = np.fft.fft2(ori[:, :, 1])
oriB = np.fft.fft2(ori[:, :, 2])
_waterMark = np.zeros((oriN, oriM, oriA))
_waterMark[:, :, 0] = (_waterMarkedR - oriR) / alpha
_waterMark[:, :, 1] = (_waterMarkedG - oriG) / alpha
_waterMark[:, :, 2] = (_waterMarkedB - oriB) / alpha
_waterMark = np.real(_waterMark)
waterMarkOut = np.zeros((oriN, oriM, oriA))
for i in range(0, THN):
for j in range(0, THM):
for k in range(0, THR):
waterMarkOut[randX[i], randY[j], k] = _waterMark[i, j, k]
for i in range(0, THN):
for j in range(0, THM):
waterMarkOut[oriN - i - 1, oriM - j - 1, :] = waterMarkOut[i, j, :].copy()
plt.imshow(waterMarkOut)
plt.show() | 0.280912 | 0.608507 |
from os import path
import os
from datetime import datetime
import pandas as pd
proc_dir = 'processed'
file = ':dir/:year/AIS_:year_:month_Zone:zone.csv'
file_out = ':dir/:year/AIS_:year_:month.csv'
vessel_file = 'vessel_types.csv'
flag_file = 'maritime_vessel_identification.csv'
years = ['2015']
months = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12']
# Zones go from 01 to 20 (zones 12 and 13 do not exist)
# Currently retrieving only the zones corresponding to the USA west coast
zones = ['06', '07', '08', '09', '10', '11']
vessels = pd.read_csv(vessel_file)
flags = pd.read_csv(flag_file)
def get_vessel_group(ntype):
return vessels.loc[vessels['type_number'] == ntype,
'type_group'].values[0]
def get_vessel_desc(ntype):
return vessels.loc[vessels['type_number'] == ntype,
'description'].values[0]
def get_vessel_country(mmsi):
mid = int(str(mmsi)[:3])
f = flags.loc[flags['mid'] == mid, 'country'].values
return f[0] if len(f) > 0 else ''
def get_vessel_flag(mmsi):
mid = int(str(mmsi)[:3])
f = flags.loc[flags['mid'] == mid, 'country_code'].values
return f[0] if len(f) > 0 else ''
if not path.isdir(proc_dir):
os.mkdir(proc_dir)
for year in years:
if not path.isdir(path.join(proc_dir, year)):
os.mkdir(path.join(proc_dir, year))
for month in months:
print('Processing AIS for', month, '/', year)
to_save = file_out.replace(':dir', proc_dir) \
.replace(':year', year) \
.replace(':month', month)
data = None
for zone in zones:
print(' Processing AIS for Zone', zone)
to_process = file.replace(':dir', 'AIS_ASCII_by_UTM_Month') \
.replace(':year', year) \
.replace(':month', month) \
.replace(':zone', zone)
if not path.isfile(to_process):
print(' File', to_process, 'not found!')
continue
df = pd.read_csv(to_process)
new_df = pd.DataFrame()
v_type = [0 if pd.isna(t) else int(t)
for t in df['VesselType']]
new_df['MMSI'] = df['MMSI']
new_df['IMO'] = df['IMO']
new_df['DateTime'] = \
[datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
for d in df['BaseDateTime'].values]
new_df['Lat'] = df['LAT']
new_df['Lon'] = df['LON']
new_df['SOG'] = df['SOG']
new_df['COG'] = df['COG']
new_df['Heading'] = df['Heading']
new_df['VesselName'] = df['VesselName']
new_df['CallSign'] = df['CallSign']
new_df['VesselType'] = v_type
new_df['VesselTypeGroup'] = \
[get_vessel_group(v) for v in v_type]
new_df['VesselTypeDescription'] = \
[get_vessel_desc(v) for v in v_type]
new_df['Status'] = df['Status']
new_df['Length'] = df['Length']
new_df['Width'] = df['Width']
new_df['Draft'] = df['Draft']
new_df['Cargo'] = df['Cargo']
new_df['Zone'] = zone
new_df['Country'] = [get_vessel_country(mmsi)
for mmsi in df['MMSI'].values]
new_df['Flag'] = [get_vessel_flag(mmsi)
for mmsi in df['MMSI'].values]
if data is not None:
data = pd.concat([data, new_df], ignore_index=True)
else:
data = new_df
print(' Sorting records of processed file(s)')
data.sort_values(by='DateTime', ascending=True, inplace=True)
# data['IMO'] = data['IMO'].astype('int')
data['VesselType'] = data['VesselType'].astype('int')
print(' Saving file', to_save)
data.to_csv(to_save, index=False)
print('\nDone!') | ais_usa/ais/process_ais_files.py | from os import path
import os
from datetime import datetime
import pandas as pd
proc_dir = 'processed'
file = ':dir/:year/AIS_:year_:month_Zone:zone.csv'
file_out = ':dir/:year/AIS_:year_:month.csv'
vessel_file = 'vessel_types.csv'
flag_file = 'maritime_vessel_identification.csv'
years = ['2015']
months = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12']
# Zones go from 01 to 20 (zones 12 and 13 do not exist)
# Currently retrieving only the zones corresponding to the USA west coast
zones = ['06', '07', '08', '09', '10', '11']
vessels = pd.read_csv(vessel_file)
flags = pd.read_csv(flag_file)
def get_vessel_group(ntype):
return vessels.loc[vessels['type_number'] == ntype,
'type_group'].values[0]
def get_vessel_desc(ntype):
return vessels.loc[vessels['type_number'] == ntype,
'description'].values[0]
def get_vessel_country(mmsi):
mid = int(str(mmsi)[:3])
f = flags.loc[flags['mid'] == mid, 'country'].values
return f[0] if len(f) > 0 else ''
def get_vessel_flag(mmsi):
mid = int(str(mmsi)[:3])
f = flags.loc[flags['mid'] == mid, 'country_code'].values
return f[0] if len(f) > 0 else ''
if not path.isdir(proc_dir):
os.mkdir(proc_dir)
for year in years:
if not path.isdir(path.join(proc_dir, year)):
os.mkdir(path.join(proc_dir, year))
for month in months:
print('Processing AIS for', month, '/', year)
to_save = file_out.replace(':dir', proc_dir) \
.replace(':year', year) \
.replace(':month', month)
data = None
for zone in zones:
print(' Processing AIS for Zone', zone)
to_process = file.replace(':dir', 'AIS_ASCII_by_UTM_Month') \
.replace(':year', year) \
.replace(':month', month) \
.replace(':zone', zone)
if not path.isfile(to_process):
print(' File', to_process, 'not found!')
continue
df = pd.read_csv(to_process)
new_df = pd.DataFrame()
v_type = [0 if pd.isna(t) else int(t)
for t in df['VesselType']]
new_df['MMSI'] = df['MMSI']
new_df['IMO'] = df['IMO']
new_df['DateTime'] = \
[datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
for d in df['BaseDateTime'].values]
new_df['Lat'] = df['LAT']
new_df['Lon'] = df['LON']
new_df['SOG'] = df['SOG']
new_df['COG'] = df['COG']
new_df['Heading'] = df['Heading']
new_df['VesselName'] = df['VesselName']
new_df['CallSign'] = df['CallSign']
new_df['VesselType'] = v_type
new_df['VesselTypeGroup'] = \
[get_vessel_group(v) for v in v_type]
new_df['VesselTypeDescription'] = \
[get_vessel_desc(v) for v in v_type]
new_df['Status'] = df['Status']
new_df['Length'] = df['Length']
new_df['Width'] = df['Width']
new_df['Draft'] = df['Draft']
new_df['Cargo'] = df['Cargo']
new_df['Zone'] = zone
new_df['Country'] = [get_vessel_country(mmsi)
for mmsi in df['MMSI'].values]
new_df['Flag'] = [get_vessel_flag(mmsi)
for mmsi in df['MMSI'].values]
if data is not None:
data = pd.concat([data, new_df], ignore_index=True)
else:
data = new_df
print(' Sorting records of processed file(s)')
data.sort_values(by='DateTime', ascending=True, inplace=True)
# data['IMO'] = data['IMO'].astype('int')
data['VesselType'] = data['VesselType'].astype('int')
print(' Saving file', to_save)
data.to_csv(to_save, index=False)
print('\nDone!') | 0.203906 | 0.216177 |
from __future__ import absolute_import
from cssselect import GenericTranslator
from diazo import utils
from lxml import etree
from optparse import OptionParser
import logging
import sys
logger = logging.getLogger('diazo')
usage = __doc__
class LocationPathTranslator(GenericTranslator):
def xpath_descendant_combinator(self, left, right):
"""right is a child, grand-child or further descendant of left"""
return left.join('//', right)
_generic_translator = GenericTranslator()
_location_path_translator = LocationPathTranslator()
def convert_css_selectors(rules):
"""Convert css rules to xpath rules element tree in place
"""
# XXX: There is a :root pseudo-class # NOQA: T000
# http://www.w3.org/TR/css3-selectors/#root-pseudo
# We may wish to add support to lxml.cssselect for it some day.
for element in rules.xpath(
"//@*[namespace-uri()='{nsp}']/..".format(nsp=utils.namespaces['css']),
):
tag_namespace = utils.namespace(element.tag)
css_prefix = element.attrib.get(
utils.fullname(
utils.namespaces['css'],
'prefix',
),
None,
)
for name, value in element.attrib.items():
if not name.startswith(
'{%s}' % utils.namespaces['css'], # NOQA: S001
):
continue
localname = utils.localname(name)
if localname == 'prefix':
continue
if not value:
element.attrib[localname] = ''
continue
if (
tag_namespace == utils.namespaces['diazo'] and
localname in
(
'content',
'content-children',
'if-content',
'if-not-content',
) or (
tag_namespace == utils.namespaces['xsl'] and
localname in ('match',)
)
):
prefix = css_prefix or '//'
tr = _location_path_translator
else:
prefix = css_prefix or 'descendant-or-self::'
tr = _generic_translator
element.attrib[localname] = tr.css_to_xpath(value, prefix=prefix)
return rules
def main():
"""Called from console script
"""
parser = OptionParser(usage=usage)
parser.add_option(
'-o',
'--output',
metavar='output.html',
help='Output filename (instead of stdout)',
dest='output',
default=sys.stdout,
)
parser.add_option(
'-p',
'--pretty-print',
action='store_true',
help='Pretty print output',
dest='pretty_print',
default=False,
)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('Invalid number of arguments')
rules = etree.parse(args[0])
convert_css_selectors(rules)
rules.write(options.output, pretty_print=options.pretty_print)
if __name__ == '__main__':
main() | lib/diazo/cssrules.py | from __future__ import absolute_import
from cssselect import GenericTranslator
from diazo import utils
from lxml import etree
from optparse import OptionParser
import logging
import sys
logger = logging.getLogger('diazo')
usage = __doc__
class LocationPathTranslator(GenericTranslator):
def xpath_descendant_combinator(self, left, right):
"""right is a child, grand-child or further descendant of left"""
return left.join('//', right)
_generic_translator = GenericTranslator()
_location_path_translator = LocationPathTranslator()
def convert_css_selectors(rules):
"""Convert css rules to xpath rules element tree in place
"""
# XXX: There is a :root pseudo-class # NOQA: T000
# http://www.w3.org/TR/css3-selectors/#root-pseudo
# We may wish to add support to lxml.cssselect for it some day.
for element in rules.xpath(
"//@*[namespace-uri()='{nsp}']/..".format(nsp=utils.namespaces['css']),
):
tag_namespace = utils.namespace(element.tag)
css_prefix = element.attrib.get(
utils.fullname(
utils.namespaces['css'],
'prefix',
),
None,
)
for name, value in element.attrib.items():
if not name.startswith(
'{%s}' % utils.namespaces['css'], # NOQA: S001
):
continue
localname = utils.localname(name)
if localname == 'prefix':
continue
if not value:
element.attrib[localname] = ''
continue
if (
tag_namespace == utils.namespaces['diazo'] and
localname in
(
'content',
'content-children',
'if-content',
'if-not-content',
) or (
tag_namespace == utils.namespaces['xsl'] and
localname in ('match',)
)
):
prefix = css_prefix or '//'
tr = _location_path_translator
else:
prefix = css_prefix or 'descendant-or-self::'
tr = _generic_translator
element.attrib[localname] = tr.css_to_xpath(value, prefix=prefix)
return rules
def main():
"""Called from console script
"""
parser = OptionParser(usage=usage)
parser.add_option(
'-o',
'--output',
metavar='output.html',
help='Output filename (instead of stdout)',
dest='output',
default=sys.stdout,
)
parser.add_option(
'-p',
'--pretty-print',
action='store_true',
help='Pretty print output',
dest='pretty_print',
default=False,
)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('Invalid number of arguments')
rules = etree.parse(args[0])
convert_css_selectors(rules)
rules.write(options.output, pretty_print=options.pretty_print)
if __name__ == '__main__':
main() | 0.422147 | 0.088583 |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class contentinspectioncallout(base_resource) :
""" Configuration for Content Inspection callout resource. """
def __init__(self) :
self._name = None
self._type = None
self._profilename = None
self._servername = None
self._serverip = None
self._serverport = None
self._returntype = None
self._resultexpr = None
self._comment = None
self._hits = None
self._undefhits = None
self._undefreason = None
self.___count = None
@property
def name(self) :
r"""Name for the Content Inspection callout. Not case sensitive. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Must not begin with 're' or 'xp' or be a word reserved for use as an expression qualifier prefix (such as HTTP) or enumeration value (such as ASCII). Must not be the name of an existing named expression, pattern set, dataset, stringmap, or callout.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the Content Inspection callout. Not case sensitive. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Must not begin with 're' or 'xp' or be a word reserved for use as an expression qualifier prefix (such as HTTP) or enumeration value (such as ASCII). Must not be the name of an existing named expression, pattern set, dataset, stringmap, or callout.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def type(self) :
r"""Type of the Content Inspection callout. It must be one of the following:
* ICAP - Sends ICAP request to the configured ICAP server.<br/>Possible values = ICAP.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
r"""Type of the Content Inspection callout. It must be one of the following:
* ICAP - Sends ICAP request to the configured ICAP server.<br/>Possible values = ICAP
"""
try :
self._type = type
except Exception as e:
raise e
@property
def profilename(self) :
r"""Name of the Content Inspection profile. The type of the configured profile must match the type specified using -type argument.<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._profilename
except Exception as e:
raise e
@profilename.setter
def profilename(self, profilename) :
r"""Name of the Content Inspection profile. The type of the configured profile must match the type specified using -type argument.<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._profilename = profilename
except Exception as e:
raise e
@property
def servername(self) :
r"""Name of the load balancing or content switching virtual server or service to which the Content Inspection request is issued. Mutually exclusive with server IP address and port parameters. The service type must be TCP or SSL_TCP. If there are vservers and services with the same name, then vserver is selected.<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._servername
except Exception as e:
raise e
@servername.setter
def servername(self, servername) :
r"""Name of the load balancing or content switching virtual server or service to which the Content Inspection request is issued. Mutually exclusive with server IP address and port parameters. The service type must be TCP or SSL_TCP. If there are vservers and services with the same name, then vserver is selected.<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._servername = servername
except Exception as e:
raise e
@property
def serverip(self) :
r"""IP address of Content Inspection server. Mutually exclusive with the server name parameter.<br/>Minimum length = 1.
"""
try :
return self._serverip
except Exception as e:
raise e
@serverip.setter
def serverip(self, serverip) :
r"""IP address of Content Inspection server. Mutually exclusive with the server name parameter.<br/>Minimum length = 1
"""
try :
self._serverip = serverip
except Exception as e:
raise e
@property
def serverport(self) :
r"""Port of the Content Inspection server.<br/>Default value: 1344<br/>Minimum length = 1<br/>Maximum length = 65535.
"""
try :
return self._serverport
except Exception as e:
raise e
@serverport.setter
def serverport(self, serverport) :
r"""Port of the Content Inspection server.<br/>Default value: 1344<br/>Minimum length = 1<br/>Maximum length = 65535
"""
try :
self._serverport = serverport
except Exception as e:
raise e
@property
def returntype(self) :
r"""Type of data that the target callout agent returns in response to the callout.
Available settings function as follows:
* TEXT - Treat the returned value as a text string.
* NUM - Treat the returned value as a number.
* BOOL - Treat the returned value as a Boolean value.
Note: You cannot change the return type after it is set.<br/>Possible values = BOOL, NUM, TEXT.
"""
try :
return self._returntype
except Exception as e:
raise e
@returntype.setter
def returntype(self, returntype) :
r"""Type of data that the target callout agent returns in response to the callout.
Available settings function as follows:
* TEXT - Treat the returned value as a text string.
* NUM - Treat the returned value as a number.
* BOOL - Treat the returned value as a Boolean value.
Note: You cannot change the return type after it is set.<br/>Possible values = BOOL, NUM, TEXT
"""
try :
self._returntype = returntype
except Exception as e:
raise e
@property
def resultexpr(self) :
r"""Expression that extracts the callout results from the response sent by the CI callout agent. Must be a response based expression, that is, it must begin with ICAP.RES. The operations in this expression must match the return type. For example, if you configure a return type of TEXT, the result expression must be a text based expression, as in the following example: icap.res.header("ISTag").<br/>Minimum length = 1.
"""
try :
return self._resultexpr
except Exception as e:
raise e
@resultexpr.setter
def resultexpr(self, resultexpr) :
r"""Expression that extracts the callout results from the response sent by the CI callout agent. Must be a response based expression, that is, it must begin with ICAP.RES. The operations in this expression must match the return type. For example, if you configure a return type of TEXT, the result expression must be a text based expression, as in the following example: icap.res.header("ISTag").<br/>Minimum length = 1
"""
try :
self._resultexpr = resultexpr
except Exception as e:
raise e
@property
def comment(self) :
r"""Any comments to preserve information about this Content Inspection callout.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
r"""Any comments to preserve information about this Content Inspection callout.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def hits(self) :
r"""Total hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def undefhits(self) :
r"""Total undefs.
"""
try :
return self._undefhits
except Exception as e:
raise e
@property
def undefreason(self) :
r"""Reason for last undef.<br/>Possible values = Failed to add service, Vserver not found, Not a HTTP or SSL vserver, Generated callout request is invalid, Content-Length header not found in callout request, Not enough space to put Content-Length value, Config incomplete, Server is DOWN, Creating callout connection failed, No memory to generate callout request packets, No memory to create callout task, No memory to create callout async, Callout request expression undef, No callout response expression, Skipped callout response eval, Callout response pixl init undef, Callout response expression undef.
"""
try :
return self._undefreason
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(contentinspectioncallout_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.contentinspectioncallout
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = contentinspectioncallout()
addresource.name = resource.name
addresource.type = resource.type
addresource.profilename = resource.profilename
addresource.servername = resource.servername
addresource.serverip = resource.serverip
addresource.serverport = resource.serverport
addresource.returntype = resource.returntype
addresource.resultexpr = resource.resultexpr
addresource.comment = resource.comment
return addresource
@classmethod
def add(cls, client, resource) :
r""" Use this API to add contentinspectioncallout.
"""
try :
if type(resource) is not list :
addresource = cls.filter_add_parameters(resource)
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i] = cls.filter_add_parameters(resource[i])
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = contentinspectioncallout()
deleteresource.name = resource.name
return deleteresource
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete contentinspectioncallout.
"""
try :
if type(resource) is not list :
deleteresource = contentinspectioncallout()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource)
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = contentinspectioncallout()
updateresource.name = resource.name
updateresource.servername = resource.servername
updateresource.serverip = resource.serverip
updateresource.serverport = resource.serverport
updateresource.profilename = resource.profilename
updateresource.returntype = resource.returntype
updateresource.resultexpr = resource.resultexpr
updateresource.comment = resource.comment
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update contentinspectioncallout.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of contentinspectioncallout resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = contentinspectioncallout()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the contentinspectioncallout resources that are configured on netscaler.
"""
try :
if not name :
obj = contentinspectioncallout()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = contentinspectioncallout()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [contentinspectioncallout() for _ in range(len(name))]
obj = [contentinspectioncallout() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = contentinspectioncallout()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of contentinspectioncallout resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = contentinspectioncallout()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the contentinspectioncallout resources configured on NetScaler.
"""
try :
obj = contentinspectioncallout()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of contentinspectioncallout resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = contentinspectioncallout()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Type:
ICAP = "ICAP"
class Returntype:
BOOL = "BOOL"
NUM = "NUM"
TEXT = "TEXT"
class Undefreason:
Failed_to_add_service = "Failed to add service"
Vserver_not_found = "Vserver not found"
Not_a_HTTP_or_SSL_vserver = "Not a HTTP or SSL vserver"
Generated_callout_request_is_invalid = "Generated callout request is invalid"
Content_Length_header_not_found_in_callout_request = "Content-Length header not found in callout request"
Not_enough_space_to_put_Content_Length_value = "Not enough space to put Content-Length value"
Config_incomplete = "Config incomplete"
Server_is_DOWN = "Server is DOWN"
Creating_callout_connection_failed = "Creating callout connection failed"
No_memory_to_generate_callout_request_packets = "No memory to generate callout request packets"
No_memory_to_create_callout_task = "No memory to create callout task"
No_memory_to_create_callout_async = "No memory to create callout async"
Callout_request_expression_undef = "Callout request expression undef"
No_callout_response_expression = "No callout response expression"
Skipped_callout_response_eval = "Skipped callout response eval"
Callout_response_pixl_init_undef = "Callout response pixl init undef"
Callout_response_expression_undef = "Callout response expression undef"
class contentinspectioncallout_response(base_response) :
def __init__(self, length=1) :
self.contentinspectioncallout = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.contentinspectioncallout = [contentinspectioncallout() for _ in range(length)] | nssrc/com/citrix/netscaler/nitro/resource/config/contentinspection/contentinspectioncallout.py |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class contentinspectioncallout(base_resource) :
""" Configuration for Content Inspection callout resource. """
def __init__(self) :
self._name = None
self._type = None
self._profilename = None
self._servername = None
self._serverip = None
self._serverport = None
self._returntype = None
self._resultexpr = None
self._comment = None
self._hits = None
self._undefhits = None
self._undefreason = None
self.___count = None
@property
def name(self) :
r"""Name for the Content Inspection callout. Not case sensitive. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Must not begin with 're' or 'xp' or be a word reserved for use as an expression qualifier prefix (such as HTTP) or enumeration value (such as ASCII). Must not be the name of an existing named expression, pattern set, dataset, stringmap, or callout.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the Content Inspection callout. Not case sensitive. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Must not begin with 're' or 'xp' or be a word reserved for use as an expression qualifier prefix (such as HTTP) or enumeration value (such as ASCII). Must not be the name of an existing named expression, pattern set, dataset, stringmap, or callout.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def type(self) :
r"""Type of the Content Inspection callout. It must be one of the following:
* ICAP - Sends ICAP request to the configured ICAP server.<br/>Possible values = ICAP.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
r"""Type of the Content Inspection callout. It must be one of the following:
* ICAP - Sends ICAP request to the configured ICAP server.<br/>Possible values = ICAP
"""
try :
self._type = type
except Exception as e:
raise e
@property
def profilename(self) :
r"""Name of the Content Inspection profile. The type of the configured profile must match the type specified using -type argument.<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._profilename
except Exception as e:
raise e
@profilename.setter
def profilename(self, profilename) :
r"""Name of the Content Inspection profile. The type of the configured profile must match the type specified using -type argument.<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._profilename = profilename
except Exception as e:
raise e
@property
def servername(self) :
r"""Name of the load balancing or content switching virtual server or service to which the Content Inspection request is issued. Mutually exclusive with server IP address and port parameters. The service type must be TCP or SSL_TCP. If there are vservers and services with the same name, then vserver is selected.<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._servername
except Exception as e:
raise e
@servername.setter
def servername(self, servername) :
r"""Name of the load balancing or content switching virtual server or service to which the Content Inspection request is issued. Mutually exclusive with server IP address and port parameters. The service type must be TCP or SSL_TCP. If there are vservers and services with the same name, then vserver is selected.<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._servername = servername
except Exception as e:
raise e
@property
def serverip(self) :
r"""IP address of Content Inspection server. Mutually exclusive with the server name parameter.<br/>Minimum length = 1.
"""
try :
return self._serverip
except Exception as e:
raise e
@serverip.setter
def serverip(self, serverip) :
r"""IP address of Content Inspection server. Mutually exclusive with the server name parameter.<br/>Minimum length = 1
"""
try :
self._serverip = serverip
except Exception as e:
raise e
@property
def serverport(self) :
r"""Port of the Content Inspection server.<br/>Default value: 1344<br/>Minimum length = 1<br/>Maximum length = 65535.
"""
try :
return self._serverport
except Exception as e:
raise e
@serverport.setter
def serverport(self, serverport) :
r"""Port of the Content Inspection server.<br/>Default value: 1344<br/>Minimum length = 1<br/>Maximum length = 65535
"""
try :
self._serverport = serverport
except Exception as e:
raise e
@property
def returntype(self) :
r"""Type of data that the target callout agent returns in response to the callout.
Available settings function as follows:
* TEXT - Treat the returned value as a text string.
* NUM - Treat the returned value as a number.
* BOOL - Treat the returned value as a Boolean value.
Note: You cannot change the return type after it is set.<br/>Possible values = BOOL, NUM, TEXT.
"""
try :
return self._returntype
except Exception as e:
raise e
@returntype.setter
def returntype(self, returntype) :
r"""Type of data that the target callout agent returns in response to the callout.
Available settings function as follows:
* TEXT - Treat the returned value as a text string.
* NUM - Treat the returned value as a number.
* BOOL - Treat the returned value as a Boolean value.
Note: You cannot change the return type after it is set.<br/>Possible values = BOOL, NUM, TEXT
"""
try :
self._returntype = returntype
except Exception as e:
raise e
@property
def resultexpr(self) :
r"""Expression that extracts the callout results from the response sent by the CI callout agent. Must be a response based expression, that is, it must begin with ICAP.RES. The operations in this expression must match the return type. For example, if you configure a return type of TEXT, the result expression must be a text based expression, as in the following example: icap.res.header("ISTag").<br/>Minimum length = 1.
"""
try :
return self._resultexpr
except Exception as e:
raise e
@resultexpr.setter
def resultexpr(self, resultexpr) :
r"""Expression that extracts the callout results from the response sent by the CI callout agent. Must be a response based expression, that is, it must begin with ICAP.RES. The operations in this expression must match the return type. For example, if you configure a return type of TEXT, the result expression must be a text based expression, as in the following example: icap.res.header("ISTag").<br/>Minimum length = 1
"""
try :
self._resultexpr = resultexpr
except Exception as e:
raise e
@property
def comment(self) :
r"""Any comments to preserve information about this Content Inspection callout.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
r"""Any comments to preserve information about this Content Inspection callout.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def hits(self) :
r"""Total hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def undefhits(self) :
r"""Total undefs.
"""
try :
return self._undefhits
except Exception as e:
raise e
@property
def undefreason(self) :
r"""Reason for last undef.<br/>Possible values = Failed to add service, Vserver not found, Not a HTTP or SSL vserver, Generated callout request is invalid, Content-Length header not found in callout request, Not enough space to put Content-Length value, Config incomplete, Server is DOWN, Creating callout connection failed, No memory to generate callout request packets, No memory to create callout task, No memory to create callout async, Callout request expression undef, No callout response expression, Skipped callout response eval, Callout response pixl init undef, Callout response expression undef.
"""
try :
return self._undefreason
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(contentinspectioncallout_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.contentinspectioncallout
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = contentinspectioncallout()
addresource.name = resource.name
addresource.type = resource.type
addresource.profilename = resource.profilename
addresource.servername = resource.servername
addresource.serverip = resource.serverip
addresource.serverport = resource.serverport
addresource.returntype = resource.returntype
addresource.resultexpr = resource.resultexpr
addresource.comment = resource.comment
return addresource
@classmethod
def add(cls, client, resource) :
r""" Use this API to add contentinspectioncallout.
"""
try :
if type(resource) is not list :
addresource = cls.filter_add_parameters(resource)
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i] = cls.filter_add_parameters(resource[i])
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = contentinspectioncallout()
deleteresource.name = resource.name
return deleteresource
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete contentinspectioncallout.
"""
try :
if type(resource) is not list :
deleteresource = contentinspectioncallout()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource)
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = contentinspectioncallout()
updateresource.name = resource.name
updateresource.servername = resource.servername
updateresource.serverip = resource.serverip
updateresource.serverport = resource.serverport
updateresource.profilename = resource.profilename
updateresource.returntype = resource.returntype
updateresource.resultexpr = resource.resultexpr
updateresource.comment = resource.comment
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update contentinspectioncallout.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of contentinspectioncallout resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = contentinspectioncallout()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ contentinspectioncallout() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the contentinspectioncallout resources that are configured on netscaler.
"""
try :
if not name :
obj = contentinspectioncallout()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = contentinspectioncallout()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [contentinspectioncallout() for _ in range(len(name))]
obj = [contentinspectioncallout() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = contentinspectioncallout()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of contentinspectioncallout resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = contentinspectioncallout()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the contentinspectioncallout resources configured on NetScaler.
"""
try :
obj = contentinspectioncallout()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of contentinspectioncallout resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = contentinspectioncallout()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Type:
ICAP = "ICAP"
class Returntype:
BOOL = "BOOL"
NUM = "NUM"
TEXT = "TEXT"
class Undefreason:
Failed_to_add_service = "Failed to add service"
Vserver_not_found = "Vserver not found"
Not_a_HTTP_or_SSL_vserver = "Not a HTTP or SSL vserver"
Generated_callout_request_is_invalid = "Generated callout request is invalid"
Content_Length_header_not_found_in_callout_request = "Content-Length header not found in callout request"
Not_enough_space_to_put_Content_Length_value = "Not enough space to put Content-Length value"
Config_incomplete = "Config incomplete"
Server_is_DOWN = "Server is DOWN"
Creating_callout_connection_failed = "Creating callout connection failed"
No_memory_to_generate_callout_request_packets = "No memory to generate callout request packets"
No_memory_to_create_callout_task = "No memory to create callout task"
No_memory_to_create_callout_async = "No memory to create callout async"
Callout_request_expression_undef = "Callout request expression undef"
No_callout_response_expression = "No callout response expression"
Skipped_callout_response_eval = "Skipped callout response eval"
Callout_response_pixl_init_undef = "Callout response pixl init undef"
Callout_response_expression_undef = "Callout response expression undef"
class contentinspectioncallout_response(base_response) :
def __init__(self, length=1) :
self.contentinspectioncallout = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.contentinspectioncallout = [contentinspectioncallout() for _ in range(length)] | 0.678753 | 0.254578 |
from typing import List
import numpy
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.instances.multiple_choice_qa.question_answer_instance import IndexedQuestionAnswerInstance
from deep_qa.data.instances.multiple_choice_qa.question_answer_instance import QuestionAnswerInstance
from ....common.test_case import DeepQaTestCase
class TestQuestionAnswerInstance(DeepQaTestCase):
def instance_to_line(self, question: str, answers: List[str], label: int, index=None):
line = ''
if index is not None:
line += str(index) + '\t'
line += question
line += '\t'
line += '###'.join(answers)
line += '\t'
line += str(label)
return line
def test_read_from_line_handles_three_column(self):
question = "what is the answer"
answers = ['a', 'b', 'c']
label = 1
line = self.instance_to_line(question, answers, label)
instance = QuestionAnswerInstance.read_from_line(line)
assert instance.question_text == question
assert instance.answer_options == answers
assert instance.label is label
assert instance.index is None
def test_read_from_line_handles_four_column(self):
question = "what is the answer"
answers = ['a', 'b', 'c']
label = 1
index = 23
line = self.instance_to_line(question, answers, label, index)
instance = QuestionAnswerInstance.read_from_line(line)
assert instance.question_text == question
assert instance.answer_options == answers
assert instance.label is label
assert instance.index is index
def test_words_includes_question_and_answers(self):
instance = QuestionAnswerInstance("a b c", ["d", "e f"], 1)
assert instance.words() == {'words': ['a', 'b', 'c', 'd', 'e', 'f']}
def test_to_indexed_instance_converts_correctly(self):
instance = QuestionAnswerInstance("a A b", ["d", "e f D"], 1)
data_indexer = DataIndexer()
a_index = data_indexer.add_word_to_index("a")
d_index = data_indexer.add_word_to_index("d")
oov_index = data_indexer.get_word_index(data_indexer._oov_token) # pylint: disable=protected-access
indexed_instance = instance.to_indexed_instance(data_indexer)
assert indexed_instance.question_indices == [a_index, a_index, oov_index]
assert len(indexed_instance.option_indices) == 2
assert indexed_instance.option_indices[0] == [d_index]
assert indexed_instance.option_indices[1] == [oov_index, oov_index, d_index]
class TestIndexedQuestionAnswerInstance(DeepQaTestCase):
def setUp(self):
super(TestIndexedQuestionAnswerInstance, self).setUp()
self.instance = IndexedQuestionAnswerInstance([1, 2, 3],
[[2, 3], [4], [5, 6]],
1)
def test_get_padding_lengths_returns_three_correct_lengths(self):
assert self.instance.get_padding_lengths() == {
'num_sentence_words': 3,
'answer_length': 2,
'num_options': 3
}
def test_pad_calls_pad_on_all_options(self):
self.instance.pad({'num_sentence_words': 2, 'answer_length': 2, 'num_options': 3})
assert self.instance.question_indices == [2, 3]
assert self.instance.option_indices[0] == [2, 3]
assert self.instance.option_indices[1] == [0, 4]
assert self.instance.option_indices[2] == [5, 6]
def test_pad_adds_empty_options_when_necessary(self):
self.instance.pad({'num_sentence_words': 1, 'answer_length': 1, 'num_options': 4})
assert self.instance.question_indices == [3]
assert self.instance.option_indices[0] == [3]
assert self.instance.option_indices[1] == [4]
assert self.instance.option_indices[2] == [6]
assert self.instance.option_indices[3] == [0]
assert len(self.instance.option_indices) == 4
def test_pad_removes_options_when_necessary(self):
self.instance.pad({'num_sentence_words': 1, 'answer_length': 1, 'num_options': 1})
assert self.instance.question_indices == [3]
assert self.instance.option_indices[0] == [3]
assert len(self.instance.option_indices) == 1
def test_as_training_data_produces_correct_numpy_arrays(self):
self.instance.pad({'num_sentence_words': 3, 'answer_length': 2, 'num_options': 3})
inputs, label = self.instance.as_training_data()
assert numpy.all(label == numpy.asarray([0, 1, 0]))
assert numpy.all(inputs[0] == numpy.asarray([1, 2, 3]))
assert numpy.all(inputs[1] == numpy.asarray([[2, 3], [0, 4], [5, 6]])) | tests/data/instances/multiple_choice_qa/question_answer_instance_test.py | from typing import List
import numpy
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.instances.multiple_choice_qa.question_answer_instance import IndexedQuestionAnswerInstance
from deep_qa.data.instances.multiple_choice_qa.question_answer_instance import QuestionAnswerInstance
from ....common.test_case import DeepQaTestCase
class TestQuestionAnswerInstance(DeepQaTestCase):
def instance_to_line(self, question: str, answers: List[str], label: int, index=None):
line = ''
if index is not None:
line += str(index) + '\t'
line += question
line += '\t'
line += '###'.join(answers)
line += '\t'
line += str(label)
return line
def test_read_from_line_handles_three_column(self):
question = "what is the answer"
answers = ['a', 'b', 'c']
label = 1
line = self.instance_to_line(question, answers, label)
instance = QuestionAnswerInstance.read_from_line(line)
assert instance.question_text == question
assert instance.answer_options == answers
assert instance.label is label
assert instance.index is None
def test_read_from_line_handles_four_column(self):
question = "what is the answer"
answers = ['a', 'b', 'c']
label = 1
index = 23
line = self.instance_to_line(question, answers, label, index)
instance = QuestionAnswerInstance.read_from_line(line)
assert instance.question_text == question
assert instance.answer_options == answers
assert instance.label is label
assert instance.index is index
def test_words_includes_question_and_answers(self):
instance = QuestionAnswerInstance("a b c", ["d", "e f"], 1)
assert instance.words() == {'words': ['a', 'b', 'c', 'd', 'e', 'f']}
def test_to_indexed_instance_converts_correctly(self):
instance = QuestionAnswerInstance("a A b", ["d", "e f D"], 1)
data_indexer = DataIndexer()
a_index = data_indexer.add_word_to_index("a")
d_index = data_indexer.add_word_to_index("d")
oov_index = data_indexer.get_word_index(data_indexer._oov_token) # pylint: disable=protected-access
indexed_instance = instance.to_indexed_instance(data_indexer)
assert indexed_instance.question_indices == [a_index, a_index, oov_index]
assert len(indexed_instance.option_indices) == 2
assert indexed_instance.option_indices[0] == [d_index]
assert indexed_instance.option_indices[1] == [oov_index, oov_index, d_index]
class TestIndexedQuestionAnswerInstance(DeepQaTestCase):
def setUp(self):
super(TestIndexedQuestionAnswerInstance, self).setUp()
self.instance = IndexedQuestionAnswerInstance([1, 2, 3],
[[2, 3], [4], [5, 6]],
1)
def test_get_padding_lengths_returns_three_correct_lengths(self):
assert self.instance.get_padding_lengths() == {
'num_sentence_words': 3,
'answer_length': 2,
'num_options': 3
}
def test_pad_calls_pad_on_all_options(self):
self.instance.pad({'num_sentence_words': 2, 'answer_length': 2, 'num_options': 3})
assert self.instance.question_indices == [2, 3]
assert self.instance.option_indices[0] == [2, 3]
assert self.instance.option_indices[1] == [0, 4]
assert self.instance.option_indices[2] == [5, 6]
def test_pad_adds_empty_options_when_necessary(self):
self.instance.pad({'num_sentence_words': 1, 'answer_length': 1, 'num_options': 4})
assert self.instance.question_indices == [3]
assert self.instance.option_indices[0] == [3]
assert self.instance.option_indices[1] == [4]
assert self.instance.option_indices[2] == [6]
assert self.instance.option_indices[3] == [0]
assert len(self.instance.option_indices) == 4
def test_pad_removes_options_when_necessary(self):
self.instance.pad({'num_sentence_words': 1, 'answer_length': 1, 'num_options': 1})
assert self.instance.question_indices == [3]
assert self.instance.option_indices[0] == [3]
assert len(self.instance.option_indices) == 1
def test_as_training_data_produces_correct_numpy_arrays(self):
self.instance.pad({'num_sentence_words': 3, 'answer_length': 2, 'num_options': 3})
inputs, label = self.instance.as_training_data()
assert numpy.all(label == numpy.asarray([0, 1, 0]))
assert numpy.all(inputs[0] == numpy.asarray([1, 2, 3]))
assert numpy.all(inputs[1] == numpy.asarray([[2, 3], [0, 4], [5, 6]])) | 0.740737 | 0.631424 |
import attr
import json
import numpy as np
from text2qdmr.datasets.utils.extract_values import GroundingKey, ValueUnit
from text2qdmr.datasets.qdmr import QDMRStepArg
def to_dict_with_sorted_values(d, key=None):
return {k: sorted(v, key=key) for k, v in d.items()}
def to_dict_with_set_values(d):
result = {}
for k, v in d.items():
hashable_v = []
for v_elem in v:
if isinstance(v_elem, list):
hashable_v.append(tuple(v_elem))
else:
hashable_v.append(v_elem)
result[k] = set(hashable_v)
return result
def tuplify(x):
if not isinstance(x, (tuple, list)):
return x
return tuple(tuplify(elem) for elem in x)
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, GroundingKey) or isinstance(obj, ValueUnit):
return obj.__dict__
elif isinstance(obj, QDMRStepArg) and obj.arg_type == 'ref':
return attr.asdict(obj)
elif isinstance(obj, QDMRStepArg) and obj.arg_type == 'grounding':
dict_obj = attr.asdict(obj)
if isinstance(obj, GroundingKey):
dict_obj['arg'] = obj.arg.__dict__
return dict_obj
elif attr.has(obj):
return attr.asdict(obj)
elif isinstance(obj, list):
obj = tuple(obj)
return super().default(obj)
class ComplexDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if isinstance(obj, dict) and 'type' in obj and 'keys' in obj:
return GroundingKey(grounding_type=obj['type'], keys=obj['keys'])
elif isinstance(obj, dict) and 'value' in obj and 'from_qdmr' in obj:
return ValueUnit(**obj)
elif isinstance(obj, dict) and 'arg_type' in obj and 'arg' in obj:
obj['arg'] = self.object_hook(obj['arg'])
return QDMRStepArg(**obj)
return obj | text2qdmr/utils/serialization.py | import attr
import json
import numpy as np
from text2qdmr.datasets.utils.extract_values import GroundingKey, ValueUnit
from text2qdmr.datasets.qdmr import QDMRStepArg
def to_dict_with_sorted_values(d, key=None):
return {k: sorted(v, key=key) for k, v in d.items()}
def to_dict_with_set_values(d):
result = {}
for k, v in d.items():
hashable_v = []
for v_elem in v:
if isinstance(v_elem, list):
hashable_v.append(tuple(v_elem))
else:
hashable_v.append(v_elem)
result[k] = set(hashable_v)
return result
def tuplify(x):
if not isinstance(x, (tuple, list)):
return x
return tuple(tuplify(elem) for elem in x)
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, GroundingKey) or isinstance(obj, ValueUnit):
return obj.__dict__
elif isinstance(obj, QDMRStepArg) and obj.arg_type == 'ref':
return attr.asdict(obj)
elif isinstance(obj, QDMRStepArg) and obj.arg_type == 'grounding':
dict_obj = attr.asdict(obj)
if isinstance(obj, GroundingKey):
dict_obj['arg'] = obj.arg.__dict__
return dict_obj
elif attr.has(obj):
return attr.asdict(obj)
elif isinstance(obj, list):
obj = tuple(obj)
return super().default(obj)
class ComplexDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if isinstance(obj, dict) and 'type' in obj and 'keys' in obj:
return GroundingKey(grounding_type=obj['type'], keys=obj['keys'])
elif isinstance(obj, dict) and 'value' in obj and 'from_qdmr' in obj:
return ValueUnit(**obj)
elif isinstance(obj, dict) and 'arg_type' in obj and 'arg' in obj:
obj['arg'] = self.object_hook(obj['arg'])
return QDMRStepArg(**obj)
return obj | 0.531696 | 0.208108 |
from autoarray.plot import inversion_plotters, fit_interferometer_plotters
from autogalaxy.plot.mat_wrap import lensing_mat_plot, lensing_include, lensing_visuals
from autogalaxy.plot import plane_plotters
from autogalaxy.fit import fit_interferometer
class FitInterferometerPlotter(
fit_interferometer_plotters.AbstractFitInterferometerPlotter
):
def __init__(
self,
fit: fit_interferometer.FitInterferometer,
mat_plot_1d: lensing_mat_plot.MatPlot1D = lensing_mat_plot.MatPlot1D(),
visuals_1d: lensing_visuals.Visuals1D = lensing_visuals.Visuals1D(),
include_1d: lensing_include.Include1D = lensing_include.Include1D(),
mat_plot_2d: lensing_mat_plot.MatPlot2D = lensing_mat_plot.MatPlot2D(),
visuals_2d: lensing_visuals.Visuals2D = lensing_visuals.Visuals2D(),
include_2d: lensing_include.Include2D = lensing_include.Include2D(),
):
super().__init__(
fit=fit,
mat_plot_1d=mat_plot_1d,
include_1d=include_1d,
visuals_1d=visuals_1d,
mat_plot_2d=mat_plot_2d,
include_2d=include_2d,
visuals_2d=visuals_2d,
)
@property
def plane(self):
return self.fit.plane
@property
def visuals_with_include_2d(self):
visuals_2d = super(FitInterferometerPlotter, self).visuals_with_include_2d
return visuals_2d + visuals_2d.__class__()
def plane_plotter_from(self, plane):
return plane_plotters.PlanePlotter(
plane=plane,
grid=self.fit.interferometer.grid,
mat_plot_2d=self.mat_plot_2d,
visuals_2d=self.visuals_with_include_2d,
include_2d=self.include_2d,
)
@property
def inversion_plotter(self):
return inversion_plotters.InversionPlotter(
inversion=self.fit.inversion,
mat_plot_2d=self.mat_plot_2d,
visuals_2d=self.visuals_with_include_2d,
include_2d=self.include_2d,
)
def subplot_fit_real_space(self):
if self.fit.inversion is None:
plane_plotter = self.plane_plotter_from(plane=self.plane)
plane_plotter.subplot(
image=True, plane_image=True, auto_filename="subplot_fit_real_space"
)
elif self.fit.inversion is not None:
self.inversion_plotter.subplot(
reconstructed_image=True,
reconstruction=True,
auto_filename="subplot_fit_real_space",
) | autogalaxy/plot/fit_interferometer_plotters.py | from autoarray.plot import inversion_plotters, fit_interferometer_plotters
from autogalaxy.plot.mat_wrap import lensing_mat_plot, lensing_include, lensing_visuals
from autogalaxy.plot import plane_plotters
from autogalaxy.fit import fit_interferometer
class FitInterferometerPlotter(
fit_interferometer_plotters.AbstractFitInterferometerPlotter
):
def __init__(
self,
fit: fit_interferometer.FitInterferometer,
mat_plot_1d: lensing_mat_plot.MatPlot1D = lensing_mat_plot.MatPlot1D(),
visuals_1d: lensing_visuals.Visuals1D = lensing_visuals.Visuals1D(),
include_1d: lensing_include.Include1D = lensing_include.Include1D(),
mat_plot_2d: lensing_mat_plot.MatPlot2D = lensing_mat_plot.MatPlot2D(),
visuals_2d: lensing_visuals.Visuals2D = lensing_visuals.Visuals2D(),
include_2d: lensing_include.Include2D = lensing_include.Include2D(),
):
super().__init__(
fit=fit,
mat_plot_1d=mat_plot_1d,
include_1d=include_1d,
visuals_1d=visuals_1d,
mat_plot_2d=mat_plot_2d,
include_2d=include_2d,
visuals_2d=visuals_2d,
)
@property
def plane(self):
return self.fit.plane
@property
def visuals_with_include_2d(self):
visuals_2d = super(FitInterferometerPlotter, self).visuals_with_include_2d
return visuals_2d + visuals_2d.__class__()
def plane_plotter_from(self, plane):
return plane_plotters.PlanePlotter(
plane=plane,
grid=self.fit.interferometer.grid,
mat_plot_2d=self.mat_plot_2d,
visuals_2d=self.visuals_with_include_2d,
include_2d=self.include_2d,
)
@property
def inversion_plotter(self):
return inversion_plotters.InversionPlotter(
inversion=self.fit.inversion,
mat_plot_2d=self.mat_plot_2d,
visuals_2d=self.visuals_with_include_2d,
include_2d=self.include_2d,
)
def subplot_fit_real_space(self):
if self.fit.inversion is None:
plane_plotter = self.plane_plotter_from(plane=self.plane)
plane_plotter.subplot(
image=True, plane_image=True, auto_filename="subplot_fit_real_space"
)
elif self.fit.inversion is not None:
self.inversion_plotter.subplot(
reconstructed_image=True,
reconstruction=True,
auto_filename="subplot_fit_real_space",
) | 0.875814 | 0.44734 |
from re import match
from fit.record import TIMESTAMP_FIELD_NUM, TIMESTAMP_MASK, \
TIMESTAMP_FIELD_NAME
from fit.record.fields import Fields
from fit.types import Type
from fit.types.array import Array
from fit.types.dynamic import Dynamic
from fit.types.extended import LocalDateTime
from fit.utils import get_known
class Meta(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
class FieldProxy(object):
def __init__(self, number, key):
self.number = number
self.key = key
def __get__(self, instance, owner):
main_key = instance._get_name(self.number)
value = instance._data.get(main_key, None)
if value is None:
return None
field = instance._get_type(self.number)
if self.key != main_key: # Subfield
dynamic_field = instance._meta.model[self.number]
referred_key = dynamic_field.referred
referred_value = instance[referred_key]
subfield = dynamic_field.get_subfield(referred_value)
if not subfield or self.key != subfield.name:
return None
return instance._meta.subfields[self.key]._load(value)
return field._load(value)
def __set__(self, instance, value):
if value is None:
return self.__delete__(instance)
field = instance._get_type(self.number)
main_key = instance._get_name(self.number)
if self.key != main_key: # Subfield
dynamic_field = instance._meta.model[self.number]
referred_key = dynamic_field.referred
referred_value = instance[referred_key]
subfield = dynamic_field.get_subfield(referred_value)
if self.key != subfield.name:
raise AttributeError("Irrelevant subfield '%s'" % self.key)
data = instance._meta.subfields[self.key]._save(value)
else:
data = field._save(value)
instance._data[main_key] = data
def __delete__(self, instance):
instance._data[self.key] = None
class MessageMeta(type):
def __new__(mcs, name, bases, attrs):
meta = Meta(attrs.pop("_meta", {}))
meta.model = meta.get("model", {})
meta.names = meta.get("names", {})
meta.subfields = meta.get("subfields", {})
inherit = True
if "_meta" in attrs:
inherit = meta.get("inherit", True)
for base in bases:
if hasattr(base, "_meta") and inherit:
meta.model.update(base._meta.get("model", {}))
meta.names.update(base._meta.get("names", {}))
meta.subfields.update(base._meta.get("subfields", {}))
for key, value in attrs.items():
if isinstance(value, Dynamic):
for subfield in value.variants.values():
subfield.type = subfield.type or value.base.__class__
meta.subfields[subfield.name] = subfield.type(
value.number, **subfield.kwargs)
if isinstance(value, Type):
meta.model[value.number] = value
meta.names[value.number] = key
for key in meta.names.values():
attrs.pop(key, None)
attrs['_meta'] = meta
instance = super(MessageMeta, mcs).__new__(mcs, name, bases, attrs)
for number, key in meta.names.items():
setattr(instance, key, FieldProxy(number, key))
for name, subfield in meta.subfields.items():
setattr(instance, name, FieldProxy(subfield.number, name))
return instance
class Message(object):
__metaclass__ = MessageMeta
_meta = Meta()
msg_type = None
def __init__(self, definition=None, **data):
if not definition:
from fit.record.definition import Definition
from fit.record.header import DefinitionHeader
definition = Definition(DefinitionHeader(self.msg_type))
definition.fields = Fields(self._meta.model.values())
definition.number = self.msg_type
self._data = {}
self._definition = definition
self._unknowns = {}
for key, value in data.items():
self[key] = value
def __repr__(self):
data = {}
for field in self.definition.fields:
name = self._get_name(field.number)
field = self._get_type(field.number)
field_name = name
if name.startswith("unknown_"):
field_name = "%s[%d]" % (
field.__class__.__name__,
field.number
)
data[field_name] = "%s%s" % (
getattr(self, name),
getattr(field, "units", None) or ""
)
normal_part = (' %s' % ' '.join(
"%s=%s" % (key, value)
for key, value in data.items()
)).rstrip()
dynamic_part = (' %s' % ' '.join(
"%s=%s" % (key, self[key])
for key in self._meta.subfields.keys()
if self[key] is not None
)).rstrip()
return '<%s.%s[%d]%s%s>' % (
self.__module__.split(".")[-1],
self.__class__.__name__,
self.msg_type,
normal_part, dynamic_part
)
def __setitem__(self, key, value):
if isinstance(key, int):
key = self._get_name(key)
self._get_number(key)
setattr(self, key, value)
def __getitem__(self, key):
if isinstance(key, int):
key = self._get_name(key)
self._get_number(key)
return getattr(self, key)
def __delitem__(self, key):
if isinstance(key, int):
key = self._get_name(key)
self._get_number(key)
delattr(self, key)
def __contains__(self, key):
if isinstance(key, int):
key = self._get_name(key)
return hasattr(self, key)
def __iter__(self):
for field in self.definition.fields:
yield self[field.number]
def __len__(self):
return len(self.definition.fields)
def _get_name(self, number):
if number not in self._meta.names:
return "unknown_%d" % number
return self._meta.names[number]
def _get_type(self, number):
if number not in self._meta.model:
return self._unknowns[number]
return self._meta.model[number]
def _get_number(self, name):
if name in self._meta.subfields:
return None
for number, other in self._meta.names.items():
if name == other:
return number
if match(r"unknown_\d+", name):
number = int(name.split("_")[-1])
if number in self._unknowns:
return number
raise KeyError(name)
@property
def definition(self):
fields = Fields(
field for number, field in self._meta.model.items()
if getattr(self, self._get_name(number)) is not None
)
for number, field in self._unknowns.items():
if getattr(self, self._get_name(number)) is not None:
fields.append(field)
for field in fields:
if isinstance(field, Array):
field.size = field.value_type.size * len(
getattr(self, self._get_name(field.number)))
self._definition.fields = fields
return self._definition
def read(self, read_buffer, model):
for field in model:
unknown = None
if field.number not in self._meta.names:
self._unknowns[field.number] = field
unknown = self._get_name(field.number)
resolved = self._get_type(field.number)
resolved.size = field.size
self._data[self._get_name(field.number)] = field.read(
read_buffer, architecture=self._definition.architecture)
if unknown:
setattr(self, unknown, self._data[unknown])
def write(self, index, model=None):
from fit.record.header import DataHeader
model = model or self.definition.fields
write_buffer = DataHeader(index).write()
for field in model:
value = self[self._get_name(field.number)]
data = field._save(value)
write_buffer += field.write(data)
return write_buffer
def process_timestamp(self, timestamp, offset):
if TIMESTAMP_FIELD_NUM in self: # Message already has TS field
timestamp = self._data.get(
self._get_name(TIMESTAMP_FIELD_NUM), timestamp)
offset = timestamp & TIMESTAMP_MASK
# Current message isn't compressed TS data
if not hasattr(self._definition.header, "offset"):
return timestamp, offset
# Compressed TS data message: calculate new TS and update model
timestamp += (self._definition.header.offset - offset) & TIMESTAMP_MASK
offset = self._definition.header.offset
self._meta.names[TIMESTAMP_FIELD_NUM] = TIMESTAMP_FIELD_NAME
ts_field = self._meta.model[TIMESTAMP_FIELD_NUM] = \
LocalDateTime(TIMESTAMP_FIELD_NUM)
setattr(self, TIMESTAMP_FIELD_NAME, ts_field._load(timestamp))
return timestamp, offset
KNOWN = get_known(__name__, Message, key="msg_type")
def register(message_cls):
if not issubclass(message_cls, Message):
raise ValueError(
"%s should be subclass of Message" % message_cls.__name__)
if not isinstance(message_cls.msg_type, int):
raise ValueError(
"%s should have defined message type" % message_cls.__name__)
if not message_cls._meta.model:
raise ValueError(
"%s should have not empty model" % message_cls.__name__)
KNOWN[message_cls.msg_type] = message_cls | fit/messages/__init__.py | from re import match
from fit.record import TIMESTAMP_FIELD_NUM, TIMESTAMP_MASK, \
TIMESTAMP_FIELD_NAME
from fit.record.fields import Fields
from fit.types import Type
from fit.types.array import Array
from fit.types.dynamic import Dynamic
from fit.types.extended import LocalDateTime
from fit.utils import get_known
class Meta(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
class FieldProxy(object):
def __init__(self, number, key):
self.number = number
self.key = key
def __get__(self, instance, owner):
main_key = instance._get_name(self.number)
value = instance._data.get(main_key, None)
if value is None:
return None
field = instance._get_type(self.number)
if self.key != main_key: # Subfield
dynamic_field = instance._meta.model[self.number]
referred_key = dynamic_field.referred
referred_value = instance[referred_key]
subfield = dynamic_field.get_subfield(referred_value)
if not subfield or self.key != subfield.name:
return None
return instance._meta.subfields[self.key]._load(value)
return field._load(value)
def __set__(self, instance, value):
if value is None:
return self.__delete__(instance)
field = instance._get_type(self.number)
main_key = instance._get_name(self.number)
if self.key != main_key: # Subfield
dynamic_field = instance._meta.model[self.number]
referred_key = dynamic_field.referred
referred_value = instance[referred_key]
subfield = dynamic_field.get_subfield(referred_value)
if self.key != subfield.name:
raise AttributeError("Irrelevant subfield '%s'" % self.key)
data = instance._meta.subfields[self.key]._save(value)
else:
data = field._save(value)
instance._data[main_key] = data
def __delete__(self, instance):
instance._data[self.key] = None
class MessageMeta(type):
def __new__(mcs, name, bases, attrs):
meta = Meta(attrs.pop("_meta", {}))
meta.model = meta.get("model", {})
meta.names = meta.get("names", {})
meta.subfields = meta.get("subfields", {})
inherit = True
if "_meta" in attrs:
inherit = meta.get("inherit", True)
for base in bases:
if hasattr(base, "_meta") and inherit:
meta.model.update(base._meta.get("model", {}))
meta.names.update(base._meta.get("names", {}))
meta.subfields.update(base._meta.get("subfields", {}))
for key, value in attrs.items():
if isinstance(value, Dynamic):
for subfield in value.variants.values():
subfield.type = subfield.type or value.base.__class__
meta.subfields[subfield.name] = subfield.type(
value.number, **subfield.kwargs)
if isinstance(value, Type):
meta.model[value.number] = value
meta.names[value.number] = key
for key in meta.names.values():
attrs.pop(key, None)
attrs['_meta'] = meta
instance = super(MessageMeta, mcs).__new__(mcs, name, bases, attrs)
for number, key in meta.names.items():
setattr(instance, key, FieldProxy(number, key))
for name, subfield in meta.subfields.items():
setattr(instance, name, FieldProxy(subfield.number, name))
return instance
class Message(object):
__metaclass__ = MessageMeta
_meta = Meta()
msg_type = None
def __init__(self, definition=None, **data):
if not definition:
from fit.record.definition import Definition
from fit.record.header import DefinitionHeader
definition = Definition(DefinitionHeader(self.msg_type))
definition.fields = Fields(self._meta.model.values())
definition.number = self.msg_type
self._data = {}
self._definition = definition
self._unknowns = {}
for key, value in data.items():
self[key] = value
def __repr__(self):
data = {}
for field in self.definition.fields:
name = self._get_name(field.number)
field = self._get_type(field.number)
field_name = name
if name.startswith("unknown_"):
field_name = "%s[%d]" % (
field.__class__.__name__,
field.number
)
data[field_name] = "%s%s" % (
getattr(self, name),
getattr(field, "units", None) or ""
)
normal_part = (' %s' % ' '.join(
"%s=%s" % (key, value)
for key, value in data.items()
)).rstrip()
dynamic_part = (' %s' % ' '.join(
"%s=%s" % (key, self[key])
for key in self._meta.subfields.keys()
if self[key] is not None
)).rstrip()
return '<%s.%s[%d]%s%s>' % (
self.__module__.split(".")[-1],
self.__class__.__name__,
self.msg_type,
normal_part, dynamic_part
)
def __setitem__(self, key, value):
if isinstance(key, int):
key = self._get_name(key)
self._get_number(key)
setattr(self, key, value)
def __getitem__(self, key):
if isinstance(key, int):
key = self._get_name(key)
self._get_number(key)
return getattr(self, key)
def __delitem__(self, key):
if isinstance(key, int):
key = self._get_name(key)
self._get_number(key)
delattr(self, key)
def __contains__(self, key):
if isinstance(key, int):
key = self._get_name(key)
return hasattr(self, key)
def __iter__(self):
for field in self.definition.fields:
yield self[field.number]
def __len__(self):
return len(self.definition.fields)
def _get_name(self, number):
if number not in self._meta.names:
return "unknown_%d" % number
return self._meta.names[number]
def _get_type(self, number):
if number not in self._meta.model:
return self._unknowns[number]
return self._meta.model[number]
def _get_number(self, name):
if name in self._meta.subfields:
return None
for number, other in self._meta.names.items():
if name == other:
return number
if match(r"unknown_\d+", name):
number = int(name.split("_")[-1])
if number in self._unknowns:
return number
raise KeyError(name)
@property
def definition(self):
fields = Fields(
field for number, field in self._meta.model.items()
if getattr(self, self._get_name(number)) is not None
)
for number, field in self._unknowns.items():
if getattr(self, self._get_name(number)) is not None:
fields.append(field)
for field in fields:
if isinstance(field, Array):
field.size = field.value_type.size * len(
getattr(self, self._get_name(field.number)))
self._definition.fields = fields
return self._definition
def read(self, read_buffer, model):
for field in model:
unknown = None
if field.number not in self._meta.names:
self._unknowns[field.number] = field
unknown = self._get_name(field.number)
resolved = self._get_type(field.number)
resolved.size = field.size
self._data[self._get_name(field.number)] = field.read(
read_buffer, architecture=self._definition.architecture)
if unknown:
setattr(self, unknown, self._data[unknown])
def write(self, index, model=None):
from fit.record.header import DataHeader
model = model or self.definition.fields
write_buffer = DataHeader(index).write()
for field in model:
value = self[self._get_name(field.number)]
data = field._save(value)
write_buffer += field.write(data)
return write_buffer
def process_timestamp(self, timestamp, offset):
if TIMESTAMP_FIELD_NUM in self: # Message already has TS field
timestamp = self._data.get(
self._get_name(TIMESTAMP_FIELD_NUM), timestamp)
offset = timestamp & TIMESTAMP_MASK
# Current message isn't compressed TS data
if not hasattr(self._definition.header, "offset"):
return timestamp, offset
# Compressed TS data message: calculate new TS and update model
timestamp += (self._definition.header.offset - offset) & TIMESTAMP_MASK
offset = self._definition.header.offset
self._meta.names[TIMESTAMP_FIELD_NUM] = TIMESTAMP_FIELD_NAME
ts_field = self._meta.model[TIMESTAMP_FIELD_NUM] = \
LocalDateTime(TIMESTAMP_FIELD_NUM)
setattr(self, TIMESTAMP_FIELD_NAME, ts_field._load(timestamp))
return timestamp, offset
KNOWN = get_known(__name__, Message, key="msg_type")
def register(message_cls):
if not issubclass(message_cls, Message):
raise ValueError(
"%s should be subclass of Message" % message_cls.__name__)
if not isinstance(message_cls.msg_type, int):
raise ValueError(
"%s should have defined message type" % message_cls.__name__)
if not message_cls._meta.model:
raise ValueError(
"%s should have not empty model" % message_cls.__name__)
KNOWN[message_cls.msg_type] = message_cls | 0.732592 | 0.115088 |
load("@io_bazel_rules_docker//container:load.bzl", "container_load")
BUILD_BAZEL = """
java_import(
name = "server",
jars = ["buildfarm-server_deploy.jar"],
visibility = ["//visibility:public"],
)
java_import(
name = "worker",
jars = ["buildfarm-worker_deploy.jar"],
visibility = ["//visibility:public"],
)
"""
def modify(repository_ctx, filename, directive):
args = ["sed", "-i", directive, filename]
result = repository_ctx.execute(args)
if result.return_code:
fail("%r failed: %s" % (args, result.stderr))
def _buildfarm_repository_impl(repository_ctx):
commit = repository_ctx.attr.commit
url = repository_ctx.attr.remote.format(
commit = commit,
)
# Download and unarchive it!
repository_ctx.download_and_extract(url,
stripPrefix = "-".join(["bazel-buildfarm", commit]),
)
# Apply mods if requested
for filename, commands in repository_ctx.attr.modifications.items():
for command in commands:
modify(repository_ctx, filename, command)
result = repository_ctx.execute(["bazel", "build",
"//src/main/java/build/buildfarm:buildfarm-server_deploy.jar",
"//src/main/java/build/buildfarm:buildfarm-worker_deploy.jar",
], quiet = False)
if result.return_code:
fail("bazel build failed: %s" % result.stderr)
result = repository_ctx.execute(["cp", "bazel-bin/src/main/java/build/buildfarm/buildfarm-server_deploy.jar", "."])
if result.return_code:
fail("copy failed: %s" % result.stderr)
result = repository_ctx.execute(["cp", "bazel-bin/src/main/java/build/buildfarm/buildfarm-worker_deploy.jar", "."])
if result.return_code:
fail("copy failed: %s" % result.stderr)
repository_ctx.file("BUILD.bazel", BUILD_BAZEL)
buildfarm_repository = repository_rule(
implementation = _buildfarm_repository_impl,
attrs = {
"remote": attr.string(
default = "https://github.com/bazelbuild/bazel-buildfarm/archive/{commit}.tar.gz",
),
"commit": attr.string(
mandatory = True,
),
"modifications": attr.string_list_dict(
doc = "Optional sed modifications to apply",
),
}
) | farm/workspace.bzl | load("@io_bazel_rules_docker//container:load.bzl", "container_load")
BUILD_BAZEL = """
java_import(
name = "server",
jars = ["buildfarm-server_deploy.jar"],
visibility = ["//visibility:public"],
)
java_import(
name = "worker",
jars = ["buildfarm-worker_deploy.jar"],
visibility = ["//visibility:public"],
)
"""
def modify(repository_ctx, filename, directive):
args = ["sed", "-i", directive, filename]
result = repository_ctx.execute(args)
if result.return_code:
fail("%r failed: %s" % (args, result.stderr))
def _buildfarm_repository_impl(repository_ctx):
commit = repository_ctx.attr.commit
url = repository_ctx.attr.remote.format(
commit = commit,
)
# Download and unarchive it!
repository_ctx.download_and_extract(url,
stripPrefix = "-".join(["bazel-buildfarm", commit]),
)
# Apply mods if requested
for filename, commands in repository_ctx.attr.modifications.items():
for command in commands:
modify(repository_ctx, filename, command)
result = repository_ctx.execute(["bazel", "build",
"//src/main/java/build/buildfarm:buildfarm-server_deploy.jar",
"//src/main/java/build/buildfarm:buildfarm-worker_deploy.jar",
], quiet = False)
if result.return_code:
fail("bazel build failed: %s" % result.stderr)
result = repository_ctx.execute(["cp", "bazel-bin/src/main/java/build/buildfarm/buildfarm-server_deploy.jar", "."])
if result.return_code:
fail("copy failed: %s" % result.stderr)
result = repository_ctx.execute(["cp", "bazel-bin/src/main/java/build/buildfarm/buildfarm-worker_deploy.jar", "."])
if result.return_code:
fail("copy failed: %s" % result.stderr)
repository_ctx.file("BUILD.bazel", BUILD_BAZEL)
buildfarm_repository = repository_rule(
implementation = _buildfarm_repository_impl,
attrs = {
"remote": attr.string(
default = "https://github.com/bazelbuild/bazel-buildfarm/archive/{commit}.tar.gz",
),
"commit": attr.string(
mandatory = True,
),
"modifications": attr.string_list_dict(
doc = "Optional sed modifications to apply",
),
}
) | 0.468061 | 0.078008 |
import unittest
from LuckyNumbers import count
from typing import Dict, List
class TestLuckyNumbers(unittest.TestCase):
"""
All the unit tests of the lucky numbers program.
"""
def test_count_0(self):
"""
Tests the simplest case, with 0.
"""
self.assertEqual(count(0), 0, 'Between 0 and 0, there is 0 lucky numbers.')
def test_count_5(self):
"""
Test the fast method with 5.
"""
value: int = 5
result: int = 0
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_9(self):
"""
Test the fast method with 9.
"""
value: int = 9
result: int = 2
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_10(self):
"""
Test the fast method with 10.
"""
value: int = 10
result: int = 2
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_20(self):
"""
Test the fast method with 20.
"""
value: int = 20
result: int = 4
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_35(self):
"""
Test the fast method with 35.
"""
value: int = 35
result: int = 6
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_66(self):
"""
Test the fast method with 66.
"""
value: int = 66
result: int = 18
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_72(self):
"""
Test the fast method with 72.
"""
value: int = 72
result: int = 21
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_173(self):
"""
Test the fast method with 173.
"""
value: int = 173
result: int = 55
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_459(self):
"""
Test the fast method with 459.
"""
value: int = 459
result: int = 148
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_666(self):
"""
Test the fast method with 666.
"""
value: int = 666
result: int = 264
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_770(self):
"""
Test the fast method with 770.
"""
value: int = 770
result: int = 306
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_1719(self):
"""
Test the fast method with 2645.
"""
value: int = 2645
result: int = 1113
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_2645(self):
"""
Test the fast method with 1719.
"""
value: int = 1719
result: int = 723
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_7000(self):
"""
Test the fast method with 700.
"""
value: int = 7000
result: int = 3333
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_000(self):
"""
Test the fast method with 361000.
"""
value: int = 361_000
result: int = 187_995
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_070(self):
"""
Test the fast method with 361070.
"""
value: int = 361_070
result: int = 188_058
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_077(self):
"""
Test the fast method with 361077.
"""
value: int = 361_077
result: int = 188_065
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_080(self):
"""
Test the fast method with 361080.
"""
value: int = 361_080
result: int = 188_067
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_087(self):
"""
Test the fast method with 361087.
"""
value: int = 361_087
result: int = 188_067
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_700_000(self):
"""
Test the fast method with 700000.
"""
value: int = 700_000
result: int = 374_421
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_770_000(self):
"""
Test the fast method with 770000.
"""
value: int = 770_000
result: int = 410_562
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_773_000(self):
"""
Test the fast method with 773000.
"""
value: int = 773_000
result: int = 411_864
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_773_904(self):
"""
Test the fast method with 773904.
"""
value: int = 773_904
result: int = 412_264
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_800_000(self):
"""
Test the fast method with 800000.
"""
value: int = 800_000
result: int = 426_983
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_880_000(self):
"""
Test the fast method with 880000.
"""
value: int = 880_000
result: int = 472_910
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_883_904(self):
"""
Test the fast method with 883904.
"""
value: int = 883_904
result: int = 475_749
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_6_645_243(self):
"""
Test the fast method with 6645243.
"""
value: int = 6_645_243
result: int = 3_615_948
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_1_000_000_000_000_000_000(self):
"""
Test the fast method with 1000000000000000000.
"""
value: int = 1_000_000_000_000_000_000
result: int = 264160473575034274
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
if __name__ == '__main__':
unittest.main() | Communautaires/[UT] The lucky number.py | import unittest
from LuckyNumbers import count
from typing import Dict, List
class TestLuckyNumbers(unittest.TestCase):
"""
All the unit tests of the lucky numbers program.
"""
def test_count_0(self):
"""
Tests the simplest case, with 0.
"""
self.assertEqual(count(0), 0, 'Between 0 and 0, there is 0 lucky numbers.')
def test_count_5(self):
"""
Test the fast method with 5.
"""
value: int = 5
result: int = 0
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_9(self):
"""
Test the fast method with 9.
"""
value: int = 9
result: int = 2
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_10(self):
"""
Test the fast method with 10.
"""
value: int = 10
result: int = 2
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_20(self):
"""
Test the fast method with 20.
"""
value: int = 20
result: int = 4
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_35(self):
"""
Test the fast method with 35.
"""
value: int = 35
result: int = 6
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_66(self):
"""
Test the fast method with 66.
"""
value: int = 66
result: int = 18
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_72(self):
"""
Test the fast method with 72.
"""
value: int = 72
result: int = 21
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_173(self):
"""
Test the fast method with 173.
"""
value: int = 173
result: int = 55
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_459(self):
"""
Test the fast method with 459.
"""
value: int = 459
result: int = 148
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_666(self):
"""
Test the fast method with 666.
"""
value: int = 666
result: int = 264
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_770(self):
"""
Test the fast method with 770.
"""
value: int = 770
result: int = 306
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_1719(self):
"""
Test the fast method with 2645.
"""
value: int = 2645
result: int = 1113
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_2645(self):
"""
Test the fast method with 1719.
"""
value: int = 1719
result: int = 723
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_7000(self):
"""
Test the fast method with 700.
"""
value: int = 7000
result: int = 3333
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_000(self):
"""
Test the fast method with 361000.
"""
value: int = 361_000
result: int = 187_995
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_070(self):
"""
Test the fast method with 361070.
"""
value: int = 361_070
result: int = 188_058
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_077(self):
"""
Test the fast method with 361077.
"""
value: int = 361_077
result: int = 188_065
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_080(self):
"""
Test the fast method with 361080.
"""
value: int = 361_080
result: int = 188_067
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_361_087(self):
"""
Test the fast method with 361087.
"""
value: int = 361_087
result: int = 188_067
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_700_000(self):
"""
Test the fast method with 700000.
"""
value: int = 700_000
result: int = 374_421
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_770_000(self):
"""
Test the fast method with 770000.
"""
value: int = 770_000
result: int = 410_562
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_773_000(self):
"""
Test the fast method with 773000.
"""
value: int = 773_000
result: int = 411_864
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_773_904(self):
"""
Test the fast method with 773904.
"""
value: int = 773_904
result: int = 412_264
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_800_000(self):
"""
Test the fast method with 800000.
"""
value: int = 800_000
result: int = 426_983
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_880_000(self):
"""
Test the fast method with 880000.
"""
value: int = 880_000
result: int = 472_910
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_883_904(self):
"""
Test the fast method with 883904.
"""
value: int = 883_904
result: int = 475_749
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_6_645_243(self):
"""
Test the fast method with 6645243.
"""
value: int = 6_645_243
result: int = 3_615_948
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
def test_count_1_000_000_000_000_000_000(self):
"""
Test the fast method with 1000000000000000000.
"""
value: int = 1_000_000_000_000_000_000
result: int = 264160473575034274
self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')
if __name__ == '__main__':
unittest.main() | 0.787768 | 0.836087 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CreateUser(Document):
def before_save(self):
# CHECK SITE LIMIT OR DELETE USER WHEN USER RECORD
# [CREATE USER] WHEN USER IS NOT CREATED IN [USER] ------ ??????????????
create_user(self.enabled, self.email, self.first_name, self.password, self.role_profile )
# update password when password is set -----------------------------
if(self.password is not None):
if(self.password != ""):
update_password(self.email, self.password)
@frappe.whitelist()
def create_user(enabled, email, first_name, password, role_profile ):
if frappe.db.exists("User", email):
update_user_record(enabled, email, first_name, password, role_profile)
if not frappe.db.exists("User", email):
user = frappe.get_doc({
"doctype": "User",
"email": email,
"first_name": first_name,
"send_welcome_email": 0,
"enabled": enabled
})
user.insert(ignore_permissions=True)
frappe.db.commit()
set_role_profile(email, role_profile)
frappe.db.commit()
@frappe.whitelist()
def update_password(email, password):
doc = frappe.get_doc("User", email)
doc.new_password = password
doc.save(ignore_permissions=True)
frappe.db.commit()
@frappe.whitelist()
def set_role_profile(email, role_profile):
doc = frappe.get_doc("User", email)
doc.role_profile_name = role_profile
doc.save(ignore_permissions=True)
frappe.db.commit()
set_to_system_user(email)
frappe.db.commit()
@frappe.whitelist()
def set_to_system_user(email):
doc = frappe.get_doc("User", email)
doc.user_type = "System User"
doc.save(ignore_permissions=True)
frappe.db.commit()
# TO UPDATE USER
@frappe.whitelist()
def update_user_record(enabled, email, first_name, password, role_profile):
doc = frappe.get_doc("User", email)
doc.enabled = enabled
doc.email = email
doc.first_name = first_name
doc.new_password = password
doc.role_profile_name = role_profile
doc.module_profile ="No Module Allowed"
doc.save(ignore_permissions=True)
frappe.db.commit() | create_user/create_user/doctype/create_user/create_user.py |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CreateUser(Document):
def before_save(self):
# CHECK SITE LIMIT OR DELETE USER WHEN USER RECORD
# [CREATE USER] WHEN USER IS NOT CREATED IN [USER] ------ ??????????????
create_user(self.enabled, self.email, self.first_name, self.password, self.role_profile )
# update password when password is set -----------------------------
if(self.password is not None):
if(self.password != ""):
update_password(self.email, self.password)
@frappe.whitelist()
def create_user(enabled, email, first_name, password, role_profile ):
if frappe.db.exists("User", email):
update_user_record(enabled, email, first_name, password, role_profile)
if not frappe.db.exists("User", email):
user = frappe.get_doc({
"doctype": "User",
"email": email,
"first_name": first_name,
"send_welcome_email": 0,
"enabled": enabled
})
user.insert(ignore_permissions=True)
frappe.db.commit()
set_role_profile(email, role_profile)
frappe.db.commit()
@frappe.whitelist()
def update_password(email, password):
doc = frappe.get_doc("User", email)
doc.new_password = password
doc.save(ignore_permissions=True)
frappe.db.commit()
@frappe.whitelist()
def set_role_profile(email, role_profile):
doc = frappe.get_doc("User", email)
doc.role_profile_name = role_profile
doc.save(ignore_permissions=True)
frappe.db.commit()
set_to_system_user(email)
frappe.db.commit()
@frappe.whitelist()
def set_to_system_user(email):
doc = frappe.get_doc("User", email)
doc.user_type = "System User"
doc.save(ignore_permissions=True)
frappe.db.commit()
# TO UPDATE USER
@frappe.whitelist()
def update_user_record(enabled, email, first_name, password, role_profile):
doc = frappe.get_doc("User", email)
doc.enabled = enabled
doc.email = email
doc.first_name = first_name
doc.new_password = password
doc.role_profile_name = role_profile
doc.module_profile ="No Module Allowed"
doc.save(ignore_permissions=True)
frappe.db.commit() | 0.253676 | 0.074198 |
from provider.aws.all.command import All
from provider.aws.common_aws import generate_session, aws_verbose
from provider.aws.iot.command import Iot
from provider.aws.limit.command import Limit
from provider.aws.policy.command import Policy
from provider.aws.security.command import Security
from provider.aws.vpc.command import Vpc
from shared.common import (
exit_critical,
message_handler,
BaseCommand,
)
DEFAULT_REGION = "us-east-1"
DEFAULT_PARTITION_CODE = "aws"
def get_partition(session, region_name):
partition_code = DEFAULT_PARTITION_CODE # assume it's always default partition, even if we can't find a region
partition_name = "AWS Standard"
# pylint: disable=protected-access
loader = session._session.get_component("data_loader")
endpoints = loader.load_data("endpoints")
for partition in endpoints["partitions"]:
for region, _ in partition["regions"].items():
if region == region_name:
partition_code = partition["partition"]
partition_name = partition["partitionName"]
if partition_code != DEFAULT_PARTITION_CODE:
message_handler(
"Found non-default partition: {} ({})".format(
partition_code, partition_name
),
"HEADER",
)
return partition_code
def check_region_profile(arg_region_name, profile_region_name):
if arg_region_name is None and profile_region_name is None:
exit_critical("Neither region parameter nor region config were passed")
def check_region(region_parameter, region_name, session, partition_code):
"""
Region us-east-1 as a default region here, if not aws partition, just return asked region
This is just to list aws regions, doesn't matter default region
"""
if partition_code != "aws":
return [region_name]
client = session.client("ec2", region_name=DEFAULT_REGION)
valid_region_names = [
region["RegionName"]
for region in client.describe_regions(AllRegions=True)["Regions"]
]
if region_parameter != "all":
if region_name not in valid_region_names:
message = "There is no region named: {0}".format(region_name)
exit_critical(message)
else:
valid_region_names = [region_name]
return valid_region_names
def aws_main(args) -> BaseCommand:
# Check if verbose mode is enabled
if args.verbose:
aws_verbose()
# aws profile check
if "region_name" not in args:
session = generate_session(profile_name=args.profile_name, region_name=None)
else:
session = generate_session(
profile_name=args.profile_name, region_name=args.region_name
)
session.get_credentials()
region_name = session.region_name
partition_code = get_partition(session, region_name)
if "region_name" not in args:
region_names = [DEFAULT_REGION]
else:
# checking region configuration
check_region_profile(
arg_region_name=args.region_name, profile_region_name=region_name
)
# assuming region parameter precedes region configuration
if args.region_name is not None:
region_name = args.region_name
# get regions
region_names = check_region(
region_parameter=args.region_name,
region_name=region_name,
session=session,
partition_code=partition_code,
)
if "threshold" in args:
if args.threshold is not None:
if args.threshold.isdigit() is False:
exit_critical("Threshold must be between 0 and 100")
else:
if int(args.threshold) < 0 or int(args.threshold) > 100:
exit_critical("Threshold must be between 0 and 100")
if args.command == "aws-vpc":
command = Vpc(
vpc_id=args.vpc_id,
region_names=region_names,
session=session,
partition_code=partition_code,
)
elif args.command == "aws-policy":
command = Policy(
region_names=region_names, session=session, partition_code=partition_code
)
elif args.command == "aws-iot":
command = Iot(
thing_name=args.thing_name,
region_names=region_names,
session=session,
partition_code=partition_code,
)
elif args.command == "aws-all":
command = All(
region_names=region_names, session=session, partition_code=partition_code
)
elif args.command == "aws-limit":
command = Limit(
region_names=region_names,
session=session,
threshold=args.threshold,
partition_code=partition_code,
)
elif args.command == "aws-security":
command = Security(
region_names=region_names,
session=session,
commands=args.commands,
partition_code=partition_code,
)
else:
raise NotImplementedError("Unknown command")
return command | cloudiscovery/provider/aws/command.py | from provider.aws.all.command import All
from provider.aws.common_aws import generate_session, aws_verbose
from provider.aws.iot.command import Iot
from provider.aws.limit.command import Limit
from provider.aws.policy.command import Policy
from provider.aws.security.command import Security
from provider.aws.vpc.command import Vpc
from shared.common import (
exit_critical,
message_handler,
BaseCommand,
)
DEFAULT_REGION = "us-east-1"
DEFAULT_PARTITION_CODE = "aws"
def get_partition(session, region_name):
partition_code = DEFAULT_PARTITION_CODE # assume it's always default partition, even if we can't find a region
partition_name = "AWS Standard"
# pylint: disable=protected-access
loader = session._session.get_component("data_loader")
endpoints = loader.load_data("endpoints")
for partition in endpoints["partitions"]:
for region, _ in partition["regions"].items():
if region == region_name:
partition_code = partition["partition"]
partition_name = partition["partitionName"]
if partition_code != DEFAULT_PARTITION_CODE:
message_handler(
"Found non-default partition: {} ({})".format(
partition_code, partition_name
),
"HEADER",
)
return partition_code
def check_region_profile(arg_region_name, profile_region_name):
if arg_region_name is None and profile_region_name is None:
exit_critical("Neither region parameter nor region config were passed")
def check_region(region_parameter, region_name, session, partition_code):
"""
Region us-east-1 as a default region here, if not aws partition, just return asked region
This is just to list aws regions, doesn't matter default region
"""
if partition_code != "aws":
return [region_name]
client = session.client("ec2", region_name=DEFAULT_REGION)
valid_region_names = [
region["RegionName"]
for region in client.describe_regions(AllRegions=True)["Regions"]
]
if region_parameter != "all":
if region_name not in valid_region_names:
message = "There is no region named: {0}".format(region_name)
exit_critical(message)
else:
valid_region_names = [region_name]
return valid_region_names
def aws_main(args) -> BaseCommand:
# Check if verbose mode is enabled
if args.verbose:
aws_verbose()
# aws profile check
if "region_name" not in args:
session = generate_session(profile_name=args.profile_name, region_name=None)
else:
session = generate_session(
profile_name=args.profile_name, region_name=args.region_name
)
session.get_credentials()
region_name = session.region_name
partition_code = get_partition(session, region_name)
if "region_name" not in args:
region_names = [DEFAULT_REGION]
else:
# checking region configuration
check_region_profile(
arg_region_name=args.region_name, profile_region_name=region_name
)
# assuming region parameter precedes region configuration
if args.region_name is not None:
region_name = args.region_name
# get regions
region_names = check_region(
region_parameter=args.region_name,
region_name=region_name,
session=session,
partition_code=partition_code,
)
if "threshold" in args:
if args.threshold is not None:
if args.threshold.isdigit() is False:
exit_critical("Threshold must be between 0 and 100")
else:
if int(args.threshold) < 0 or int(args.threshold) > 100:
exit_critical("Threshold must be between 0 and 100")
if args.command == "aws-vpc":
command = Vpc(
vpc_id=args.vpc_id,
region_names=region_names,
session=session,
partition_code=partition_code,
)
elif args.command == "aws-policy":
command = Policy(
region_names=region_names, session=session, partition_code=partition_code
)
elif args.command == "aws-iot":
command = Iot(
thing_name=args.thing_name,
region_names=region_names,
session=session,
partition_code=partition_code,
)
elif args.command == "aws-all":
command = All(
region_names=region_names, session=session, partition_code=partition_code
)
elif args.command == "aws-limit":
command = Limit(
region_names=region_names,
session=session,
threshold=args.threshold,
partition_code=partition_code,
)
elif args.command == "aws-security":
command = Security(
region_names=region_names,
session=session,
commands=args.commands,
partition_code=partition_code,
)
else:
raise NotImplementedError("Unknown command")
return command | 0.434701 | 0.106319 |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import egg.core as core
from egg.core import ConsoleLogger
from egg.core.callbacks import WandbLogger
from egg.zoo.population_game.data import get_dataloader
from egg.zoo.population_game.game_callbacks import (
BestStatsTracker,
DistributedSamplerEpochSetter,
)
from egg.zoo.population_game.games import build_game
from egg.zoo.population_game.LARC import LARC
from egg.zoo.population_game.utils import add_weight_decay, get_common_opts
def main(params):
opts = get_common_opts(params=params)
print(f"{opts}\n")
assert (
not opts.batch_size % 2
), f"Batch size must be multiple of 2. Found {opts.batch_size} instead"
print(
f"Running a distruted training is set to: {opts.distributed_context.is_distributed}. "
f"World size is {opts.distributed_context.world_size}. "
f"Using batch of size {opts.batch_size} on {opts.distributed_context.world_size} device(s)\n"
f"Applying augmentations: {opts.use_augmentations} with image size: {opts.image_size}.\n"
)
if not opts.distributed_context.is_distributed and opts.pdb:
breakpoint()
train_loader = get_dataloader(
dataset_dir=opts.dataset_dir,
dataset_name=opts.dataset_name,
image_size=opts.image_size,
batch_size=opts.batch_size,
num_workers=opts.num_workers,
is_distributed=opts.distributed_context.is_distributed,
seed=opts.random_seed,
use_augmentations=opts.use_augmentations,
return_original_image=opts.return_original_image,
)
game = build_game(opts)
model_parameters = add_weight_decay(game, opts.weight_decay, skip_name="bn")
optimizer = torch.optim.SGD(
model_parameters,
lr=opts.lr,
momentum=0.9,
)
optimizer_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=opts.n_epochs
)
if opts.use_larc:
optimizer = LARC(optimizer, trust_coefficient=0.001, clip=False, eps=1e-8)
callbacks = [
ConsoleLogger(as_json=True, print_train_loss=True),
BestStatsTracker(),
WandbLogger(opts)
]
if opts.distributed_context.is_distributed:
callbacks.append(DistributedSamplerEpochSetter())
trainer = core.Trainer(
game=game,
optimizer=optimizer,
optimizer_scheduler=optimizer_scheduler,
train_data=train_loader,
callbacks=callbacks,
)
trainer.train(n_epochs=opts.n_epochs)
print("| FINISHED JOB")
if __name__ == "__main__":
torch.autograd.set_detect_anomaly(True)
import sys
main(sys.argv[1:]) | egg/zoo/population_game/train.py |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import egg.core as core
from egg.core import ConsoleLogger
from egg.core.callbacks import WandbLogger
from egg.zoo.population_game.data import get_dataloader
from egg.zoo.population_game.game_callbacks import (
BestStatsTracker,
DistributedSamplerEpochSetter,
)
from egg.zoo.population_game.games import build_game
from egg.zoo.population_game.LARC import LARC
from egg.zoo.population_game.utils import add_weight_decay, get_common_opts
def main(params):
opts = get_common_opts(params=params)
print(f"{opts}\n")
assert (
not opts.batch_size % 2
), f"Batch size must be multiple of 2. Found {opts.batch_size} instead"
print(
f"Running a distruted training is set to: {opts.distributed_context.is_distributed}. "
f"World size is {opts.distributed_context.world_size}. "
f"Using batch of size {opts.batch_size} on {opts.distributed_context.world_size} device(s)\n"
f"Applying augmentations: {opts.use_augmentations} with image size: {opts.image_size}.\n"
)
if not opts.distributed_context.is_distributed and opts.pdb:
breakpoint()
train_loader = get_dataloader(
dataset_dir=opts.dataset_dir,
dataset_name=opts.dataset_name,
image_size=opts.image_size,
batch_size=opts.batch_size,
num_workers=opts.num_workers,
is_distributed=opts.distributed_context.is_distributed,
seed=opts.random_seed,
use_augmentations=opts.use_augmentations,
return_original_image=opts.return_original_image,
)
game = build_game(opts)
model_parameters = add_weight_decay(game, opts.weight_decay, skip_name="bn")
optimizer = torch.optim.SGD(
model_parameters,
lr=opts.lr,
momentum=0.9,
)
optimizer_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=opts.n_epochs
)
if opts.use_larc:
optimizer = LARC(optimizer, trust_coefficient=0.001, clip=False, eps=1e-8)
callbacks = [
ConsoleLogger(as_json=True, print_train_loss=True),
BestStatsTracker(),
WandbLogger(opts)
]
if opts.distributed_context.is_distributed:
callbacks.append(DistributedSamplerEpochSetter())
trainer = core.Trainer(
game=game,
optimizer=optimizer,
optimizer_scheduler=optimizer_scheduler,
train_data=train_loader,
callbacks=callbacks,
)
trainer.train(n_epochs=opts.n_epochs)
print("| FINISHED JOB")
if __name__ == "__main__":
torch.autograd.set_detect_anomaly(True)
import sys
main(sys.argv[1:]) | 0.766468 | 0.338979 |
import os
import shutil
from time import sleep
import pytest
import yaml
from ap.topic_model.v1.TopicModelBase_pb2 import (
DocId,
Document,
DocumentPack,
ParallelDocIds,
)
from ap.topic_model.v1.TopicModelTrain_pb2 import (
AddDocumentsToModelRequest,
AddDocumentsToModelResponse,
StartTrainTopicModelRequest,
StartTrainTopicModelResponse,
TrainTopicModelStatusRequest,
TrainTopicModelStatusResponse,
)
@pytest.fixture(scope="module")
def grpc_add_to_server():
from ap.topic_model.v1.TopicModelTrain_pb2_grpc import (
add_TopicModelTrainServiceServicer_to_server,
)
return add_TopicModelTrainServiceServicer_to_server
@pytest.fixture(scope="module")
def models_dir(tmpdir_factory):
return tmpdir_factory.mktemp("models")
@pytest.fixture(scope="module")
def data_dir(tmpdir_factory):
data_dir = tmpdir_factory.mktemp("data")
langs = [
"ru",
"en",
"cs",
"de",
"es",
"fr",
"it",
"ja",
"kk",
"ky",
"nl",
"pl",
"pt",
"tr",
"zh",
]
class_ids = {"@" + language: 1 for language in langs}
with open(os.path.join(data_dir, "classes.yaml"), "w") as f:
yaml.safe_dump(class_ids, f)
shutil.copy("./tests/data/dictionary.txt", os.path.join(data_dir, "dictionary.txt"))
return data_dir
@pytest.fixture(scope="module")
def test_conf():
return {
"num_epochs_full": 3,
"num_epochs_update": 2,
"num_topics": 100,
"num_bg_topics": 100,
"tau": 0.2,
"gamma": 0,
"max_dictionary_size": 10,
}
@pytest.fixture(scope="module")
def grpc_servicer(test_conf, models_dir, data_dir, bpe_models):
from ap.train.server import TopicModelTrainServiceImpl
return TopicModelTrainServiceImpl(bpe_models, test_conf, models_dir, data_dir)
@pytest.fixture(scope="module")
def grpc_stub_cls(grpc_channel):
from ap.topic_model.v1.TopicModelTrain_pb2_grpc import TopicModelTrainServiceStub
return TopicModelTrainServiceStub
@pytest.fixture(scope="function")
def clean_data(data_dir):
vw_new = os.path.join(data_dir, "vw_new")
for file in os.listdir(vw_new):
os.remove(os.path.join(vw_new, file))
@pytest.mark.usefixtures("clean_data")
def test_add_documents(models_dir, data_dir, grpc_stub):
docs = [
Document(Id=DocId(Lo=0, Hi=0), Tokens=["a", "b"], Language="en"),
Document(Id=DocId(Lo=0, Hi=1), Tokens=["c", "D"], Language="en"),
]
parallel_docs = ParallelDocIds(Ids=[DocId(Lo=0, Hi=0)])
resp = grpc_stub.AddDocumentsToModel(
AddDocumentsToModelRequest(
Collection=DocumentPack(Documents=docs), ParallelDocuments=[parallel_docs]
)
)
assert resp.Status == AddDocumentsToModelResponse.AddDocumentsStatus.OK
with open(os.path.join(data_dir, "vw_new", "actual.txt"), "r") as f:
res = f.readlines()
assert len(res) == 2
@pytest.mark.usefixtures("clean_data")
def test_add_documents_new_lang(models_dir, data_dir, grpc_stub):
docs = [
Document(Id=DocId(Lo=0, Hi=0), Tokens=["a", "b"], Language="gf"),
Document(Id=DocId(Lo=0, Hi=1), Tokens=["c", "D"], Language="en"),
]
parallel_docs = ParallelDocIds(Ids=[DocId(Lo=0, Hi=0), DocId(Lo=0, Hi=1)])
resp = grpc_stub.AddDocumentsToModel(
AddDocumentsToModelRequest(
Collection=DocumentPack(Documents=docs), ParallelDocuments=[parallel_docs]
)
)
assert resp.Status == AddDocumentsToModelResponse.AddDocumentsStatus.OK
with open(os.path.join(data_dir, "vw_new", "actual.txt"), "r") as f:
res = f.readlines()
assert len(res) == 2
@pytest.mark.usefixtures("clean_data")
def test_add_documents_new_lang_no_translation(models_dir, data_dir, grpc_stub):
docs = [
Document(Id=DocId(Lo=0, Hi=0), Tokens=["a", "b"], Language="rq"),
Document(Id=DocId(Lo=0, Hi=1), Tokens=["c", "D"], Language="rq"),
Document(Id=DocId(Lo=0, Hi=2), Tokens=["c", "D"], Language="fr"),
]
parallel_docs = ParallelDocIds(Ids=[DocId(Lo=0, Hi=0)])
resp = grpc_stub.AddDocumentsToModel(
AddDocumentsToModelRequest(
Collection=DocumentPack(Documents=docs), ParallelDocuments=[parallel_docs]
)
)
assert resp.Status == AddDocumentsToModelResponse.AddDocumentsStatus.NO_TRANSLATION
assert not os.path.exists(os.path.join(data_dir, "vw_new", "actual.txt"))
def test_start_train(data_dir, grpc_stub):
docs = [
Document(Id=DocId(Lo=0, Hi=0), Tokens=["a", "b"], Language="gf"),
Document(Id=DocId(Lo=0, Hi=1), Tokens=["c", "D"], Language="en"),
Document(Id=DocId(Lo=1, Hi=0), Tokens=["e", "f"], Language="gf"),
Document(Id=DocId(Lo=1, Hi=1), Tokens=["c", "b"], Language="en"),
Document(Id=DocId(Lo=2, Hi=0), Tokens=["a", "f"], Language="gf"),
Document(Id=DocId(Lo=2, Hi=1), Tokens=["a", "b"], Language="en"),
]
parallel_docs = [
ParallelDocIds(Ids=[DocId(Lo=0, Hi=0), DocId(Lo=0, Hi=1)]),
ParallelDocIds(Ids=[DocId(Lo=1, Hi=0), DocId(Lo=1, Hi=1)]),
ParallelDocIds(Ids=[DocId(Lo=2, Hi=0), DocId(Lo=2, Hi=1)]),
]
resp = grpc_stub.AddDocumentsToModel(
AddDocumentsToModelRequest(
Collection=DocumentPack(Documents=docs), ParallelDocuments=parallel_docs
)
)
assert resp.Status == AddDocumentsToModelResponse.AddDocumentsStatus.OK
resp = grpc_stub.StartTrainTopicModel(
StartTrainTopicModelRequest(Type=StartTrainTopicModelRequest.TrainType.FULL)
)
assert resp.Status == StartTrainTopicModelResponse.StartTrainTopicModelStatus.OK
while (
grpc_stub.TrainTopicModelStatus(TrainTopicModelStatusRequest()).Status
== TrainTopicModelStatusResponse.TrainTopicModelStatus.RUNNING
):
sleep(1)
assert (
grpc_stub.TrainTopicModelStatus(TrainTopicModelStatusRequest()).Status
== TrainTopicModelStatusResponse.TrainTopicModelStatus.COMPLETE
)
assert len(os.listdir(os.path.join(data_dir, "vw_new"))) == 0
assert len(os.listdir(os.path.join(data_dir, "batches_new"))) == 0
assert len(os.listdir(os.path.join(data_dir, "vw"))) > 0
assert len(os.listdir(os.path.join(data_dir, "batches"))) > 0
with open(os.path.join(data_dir, "dictionary.txt")) as f:
assert len(f.readlines()) == 10
resp = grpc_stub.StartTrainTopicModel(
StartTrainTopicModelRequest(Type=StartTrainTopicModelRequest.TrainType.UPDATE)
)
assert resp.Status == StartTrainTopicModelResponse.StartTrainTopicModelStatus.OK
while (
grpc_stub.TrainTopicModelStatus(TrainTopicModelStatusRequest()).Status
== TrainTopicModelStatusResponse.TrainTopicModelStatus.RUNNING
):
sleep(1)
assert (
grpc_stub.TrainTopicModelStatus(TrainTopicModelStatusRequest()).Status
== TrainTopicModelStatusResponse.TrainTopicModelStatus.COMPLETE
)
assert len(os.listdir(os.path.join(data_dir, "vw_new"))) == 0
assert len(os.listdir(os.path.join(data_dir, "batches_new"))) == 0
assert len(os.listdir(os.path.join(data_dir, "vw"))) > 0
assert len(os.listdir(os.path.join(data_dir, "batches"))) > 0 | tests/train/test_train_server.py | import os
import shutil
from time import sleep
import pytest
import yaml
from ap.topic_model.v1.TopicModelBase_pb2 import (
DocId,
Document,
DocumentPack,
ParallelDocIds,
)
from ap.topic_model.v1.TopicModelTrain_pb2 import (
AddDocumentsToModelRequest,
AddDocumentsToModelResponse,
StartTrainTopicModelRequest,
StartTrainTopicModelResponse,
TrainTopicModelStatusRequest,
TrainTopicModelStatusResponse,
)
@pytest.fixture(scope="module")
def grpc_add_to_server():
from ap.topic_model.v1.TopicModelTrain_pb2_grpc import (
add_TopicModelTrainServiceServicer_to_server,
)
return add_TopicModelTrainServiceServicer_to_server
@pytest.fixture(scope="module")
def models_dir(tmpdir_factory):
return tmpdir_factory.mktemp("models")
@pytest.fixture(scope="module")
def data_dir(tmpdir_factory):
data_dir = tmpdir_factory.mktemp("data")
langs = [
"ru",
"en",
"cs",
"de",
"es",
"fr",
"it",
"ja",
"kk",
"ky",
"nl",
"pl",
"pt",
"tr",
"zh",
]
class_ids = {"@" + language: 1 for language in langs}
with open(os.path.join(data_dir, "classes.yaml"), "w") as f:
yaml.safe_dump(class_ids, f)
shutil.copy("./tests/data/dictionary.txt", os.path.join(data_dir, "dictionary.txt"))
return data_dir
@pytest.fixture(scope="module")
def test_conf():
return {
"num_epochs_full": 3,
"num_epochs_update": 2,
"num_topics": 100,
"num_bg_topics": 100,
"tau": 0.2,
"gamma": 0,
"max_dictionary_size": 10,
}
@pytest.fixture(scope="module")
def grpc_servicer(test_conf, models_dir, data_dir, bpe_models):
from ap.train.server import TopicModelTrainServiceImpl
return TopicModelTrainServiceImpl(bpe_models, test_conf, models_dir, data_dir)
@pytest.fixture(scope="module")
def grpc_stub_cls(grpc_channel):
from ap.topic_model.v1.TopicModelTrain_pb2_grpc import TopicModelTrainServiceStub
return TopicModelTrainServiceStub
@pytest.fixture(scope="function")
def clean_data(data_dir):
vw_new = os.path.join(data_dir, "vw_new")
for file in os.listdir(vw_new):
os.remove(os.path.join(vw_new, file))
@pytest.mark.usefixtures("clean_data")
def test_add_documents(models_dir, data_dir, grpc_stub):
docs = [
Document(Id=DocId(Lo=0, Hi=0), Tokens=["a", "b"], Language="en"),
Document(Id=DocId(Lo=0, Hi=1), Tokens=["c", "D"], Language="en"),
]
parallel_docs = ParallelDocIds(Ids=[DocId(Lo=0, Hi=0)])
resp = grpc_stub.AddDocumentsToModel(
AddDocumentsToModelRequest(
Collection=DocumentPack(Documents=docs), ParallelDocuments=[parallel_docs]
)
)
assert resp.Status == AddDocumentsToModelResponse.AddDocumentsStatus.OK
with open(os.path.join(data_dir, "vw_new", "actual.txt"), "r") as f:
res = f.readlines()
assert len(res) == 2
@pytest.mark.usefixtures("clean_data")
def test_add_documents_new_lang(models_dir, data_dir, grpc_stub):
docs = [
Document(Id=DocId(Lo=0, Hi=0), Tokens=["a", "b"], Language="gf"),
Document(Id=DocId(Lo=0, Hi=1), Tokens=["c", "D"], Language="en"),
]
parallel_docs = ParallelDocIds(Ids=[DocId(Lo=0, Hi=0), DocId(Lo=0, Hi=1)])
resp = grpc_stub.AddDocumentsToModel(
AddDocumentsToModelRequest(
Collection=DocumentPack(Documents=docs), ParallelDocuments=[parallel_docs]
)
)
assert resp.Status == AddDocumentsToModelResponse.AddDocumentsStatus.OK
with open(os.path.join(data_dir, "vw_new", "actual.txt"), "r") as f:
res = f.readlines()
assert len(res) == 2
@pytest.mark.usefixtures("clean_data")
def test_add_documents_new_lang_no_translation(models_dir, data_dir, grpc_stub):
docs = [
Document(Id=DocId(Lo=0, Hi=0), Tokens=["a", "b"], Language="rq"),
Document(Id=DocId(Lo=0, Hi=1), Tokens=["c", "D"], Language="rq"),
Document(Id=DocId(Lo=0, Hi=2), Tokens=["c", "D"], Language="fr"),
]
parallel_docs = ParallelDocIds(Ids=[DocId(Lo=0, Hi=0)])
resp = grpc_stub.AddDocumentsToModel(
AddDocumentsToModelRequest(
Collection=DocumentPack(Documents=docs), ParallelDocuments=[parallel_docs]
)
)
assert resp.Status == AddDocumentsToModelResponse.AddDocumentsStatus.NO_TRANSLATION
assert not os.path.exists(os.path.join(data_dir, "vw_new", "actual.txt"))
def test_start_train(data_dir, grpc_stub):
docs = [
Document(Id=DocId(Lo=0, Hi=0), Tokens=["a", "b"], Language="gf"),
Document(Id=DocId(Lo=0, Hi=1), Tokens=["c", "D"], Language="en"),
Document(Id=DocId(Lo=1, Hi=0), Tokens=["e", "f"], Language="gf"),
Document(Id=DocId(Lo=1, Hi=1), Tokens=["c", "b"], Language="en"),
Document(Id=DocId(Lo=2, Hi=0), Tokens=["a", "f"], Language="gf"),
Document(Id=DocId(Lo=2, Hi=1), Tokens=["a", "b"], Language="en"),
]
parallel_docs = [
ParallelDocIds(Ids=[DocId(Lo=0, Hi=0), DocId(Lo=0, Hi=1)]),
ParallelDocIds(Ids=[DocId(Lo=1, Hi=0), DocId(Lo=1, Hi=1)]),
ParallelDocIds(Ids=[DocId(Lo=2, Hi=0), DocId(Lo=2, Hi=1)]),
]
resp = grpc_stub.AddDocumentsToModel(
AddDocumentsToModelRequest(
Collection=DocumentPack(Documents=docs), ParallelDocuments=parallel_docs
)
)
assert resp.Status == AddDocumentsToModelResponse.AddDocumentsStatus.OK
resp = grpc_stub.StartTrainTopicModel(
StartTrainTopicModelRequest(Type=StartTrainTopicModelRequest.TrainType.FULL)
)
assert resp.Status == StartTrainTopicModelResponse.StartTrainTopicModelStatus.OK
while (
grpc_stub.TrainTopicModelStatus(TrainTopicModelStatusRequest()).Status
== TrainTopicModelStatusResponse.TrainTopicModelStatus.RUNNING
):
sleep(1)
assert (
grpc_stub.TrainTopicModelStatus(TrainTopicModelStatusRequest()).Status
== TrainTopicModelStatusResponse.TrainTopicModelStatus.COMPLETE
)
assert len(os.listdir(os.path.join(data_dir, "vw_new"))) == 0
assert len(os.listdir(os.path.join(data_dir, "batches_new"))) == 0
assert len(os.listdir(os.path.join(data_dir, "vw"))) > 0
assert len(os.listdir(os.path.join(data_dir, "batches"))) > 0
with open(os.path.join(data_dir, "dictionary.txt")) as f:
assert len(f.readlines()) == 10
resp = grpc_stub.StartTrainTopicModel(
StartTrainTopicModelRequest(Type=StartTrainTopicModelRequest.TrainType.UPDATE)
)
assert resp.Status == StartTrainTopicModelResponse.StartTrainTopicModelStatus.OK
while (
grpc_stub.TrainTopicModelStatus(TrainTopicModelStatusRequest()).Status
== TrainTopicModelStatusResponse.TrainTopicModelStatus.RUNNING
):
sleep(1)
assert (
grpc_stub.TrainTopicModelStatus(TrainTopicModelStatusRequest()).Status
== TrainTopicModelStatusResponse.TrainTopicModelStatus.COMPLETE
)
assert len(os.listdir(os.path.join(data_dir, "vw_new"))) == 0
assert len(os.listdir(os.path.join(data_dir, "batches_new"))) == 0
assert len(os.listdir(os.path.join(data_dir, "vw"))) > 0
assert len(os.listdir(os.path.join(data_dir, "batches"))) > 0 | 0.328637 | 0.216167 |
import os
import sentencepiece as spm
DATAFILE = '../data/pg16457.txt'
MODELDIR = 'models'
spm.SentencePieceTrainer.train(f'''\
--model_type=bpe\
--input={DATAFILE}\
--model_prefix={MODELDIR}/bpe\
--vocab_size=500''')
sp = spm.SentencePieceProcessor()
sp.load(os.path.join(MODELDIR, 'bpe.model'))
input_string = "This is a test"
# encode: text => id
print(sp.encode_as_pieces(input_string)) # ['▁T', 'h', 'is', '▁is', '▁a', '▁t', 'est']
print(sp.encode_as_ids(input_string)) # [72, 435, 26, 101, 5, 3, 153]
# decode: id => text
print(sp.decode_pieces(['▁T', 'h', 'is', '▁is', '▁a', '▁t', 'est'])) # This is a test
print(sp.decode_ids([72, 435, 26, 101, 5, 3, 153])) # This is a test
# returns vocab size
print(f"vocab size: {sp.get_piece_size()}")
# id <=> piece conversion
print(f"id 101 to piece: {sp.id_to_piece(101)}")
print(f"Piece ▁is to id: {sp.piece_to_id('▁is')}")
# You can see from the code that we used the “id_to_piece” function which turns the ID of a token into its corresponding textual representation.
# This is important since SentencePiece enables the subword process to be reversible.
# You can encode your test sentence in ID’s or in subword tokens; what you use is up to you.
# The key is that you can decode either the IDs or the tokens perfectly back into the original sentences,
# including the original spaces. Previously this was not possible with other tokenizers since they just
# provided the tokens and it was not clear exactly what encoding scheme was used,
# e.g. how did they deal with spaces or punctuation? This is a big selling point for SentencePiece.
tokens = ['▁T', 'h', 'is', '▁is', '▁a', '▁t', 'est']
merged = "".join(tokens).replace('▁', " ").strip()
assert merged == input_string, "Input string and detokenized sentence didn't match"
# <unk>, <s>, </s> are defined by default. Their ids are (0, 1, 2)
# <s> and </s> are defined as 'control' symbol.
# control symbol: We only reserve ids for these tokens. Even if these tokens appear in the input text,
# they are not handled as one token. User needs to insert ids explicitly after encoding.
for id in range(3):
print(sp.id_to_piece(id), sp.is_control(id))
# We can define special tokens (symbols) to tweak the DNN behavior through the tokens. Typical examples are BERT's special symbols., e.g., [SEP] and [CLS].
# There are two types of special tokens:
# user defined symbols: Always treated as one token in any context. These symbols can appear in the input sentence.
# control symbol: We only reserve ids for these tokens. Even if these tokens appear in the input text, they are not handled as one token. User needs to insert ids explicitly after encoding.
# Refer to this for more details: https://colab.research.google.com/github/google/sentencepiece/blob/master/python/sentencepiece_python_module_example.ipynb#scrollTo=dngckiPMcWbA
# ## Example of user defined symbols
spm.SentencePieceTrainer.train(f'''\
--model_type=bpe\
--input={DATAFILE}\
--model_prefix={MODELDIR}/bpe_user\
--user_defined_symbols=<sep>,<cls>\
--vocab_size=500''')
sp_user = spm.SentencePieceProcessor()
sp_user.load(os.path.join(MODELDIR, 'bpe_user.model'))
# ids are reserved in both mode.
# <unk>=0, <s>=1, </s>=2, <sep>=3, <cls>=4
# user defined symbols allow these symbol to apper in the text.
print(sp_user.encode_as_pieces('this is a test<sep> hello world<cls>')) # ['▁this', '▁is', '▁a', '▁t', 'est', '<sep>', '▁he', 'll', 'o', '▁wor', 'ld', '<cls>']
print(sp_user.piece_to_id('<sep>')) # 3
print(sp_user.piece_to_id('<cls>')) # 4
print('3=', sp_user.decode_ids([3])) # decoded to <sep>
print('4=', sp_user.decode_ids([4])) # decoded to <cls>
print('bos=', sp_user.bos_id()) # 1
print('eos=', sp_user.eos_id()) # 2
print('unk=', sp_user.unk_id()) # 0
print('pad=', sp_user.pad_id()) # -1, disabled by default
print(sp_user.encode_as_ids('Hello world')) # [189, 320, 430, 233, 71]
# Prepend or append bos/eos ids.
print([sp_user.bos_id()] + sp_user.encode_as_ids('Hello world') + [sp_user.eos_id()]) # [1, 189, 320, 430, 233, 71, 2] | tokenization/bpe_tokenization.py | import os
import sentencepiece as spm
DATAFILE = '../data/pg16457.txt'
MODELDIR = 'models'
spm.SentencePieceTrainer.train(f'''\
--model_type=bpe\
--input={DATAFILE}\
--model_prefix={MODELDIR}/bpe\
--vocab_size=500''')
sp = spm.SentencePieceProcessor()
sp.load(os.path.join(MODELDIR, 'bpe.model'))
input_string = "This is a test"
# encode: text => id
print(sp.encode_as_pieces(input_string)) # ['▁T', 'h', 'is', '▁is', '▁a', '▁t', 'est']
print(sp.encode_as_ids(input_string)) # [72, 435, 26, 101, 5, 3, 153]
# decode: id => text
print(sp.decode_pieces(['▁T', 'h', 'is', '▁is', '▁a', '▁t', 'est'])) # This is a test
print(sp.decode_ids([72, 435, 26, 101, 5, 3, 153])) # This is a test
# returns vocab size
print(f"vocab size: {sp.get_piece_size()}")
# id <=> piece conversion
print(f"id 101 to piece: {sp.id_to_piece(101)}")
print(f"Piece ▁is to id: {sp.piece_to_id('▁is')}")
# You can see from the code that we used the “id_to_piece” function which turns the ID of a token into its corresponding textual representation.
# This is important since SentencePiece enables the subword process to be reversible.
# You can encode your test sentence in ID’s or in subword tokens; what you use is up to you.
# The key is that you can decode either the IDs or the tokens perfectly back into the original sentences,
# including the original spaces. Previously this was not possible with other tokenizers since they just
# provided the tokens and it was not clear exactly what encoding scheme was used,
# e.g. how did they deal with spaces or punctuation? This is a big selling point for SentencePiece.
tokens = ['▁T', 'h', 'is', '▁is', '▁a', '▁t', 'est']
merged = "".join(tokens).replace('▁', " ").strip()
assert merged == input_string, "Input string and detokenized sentence didn't match"
# <unk>, <s>, </s> are defined by default. Their ids are (0, 1, 2)
# <s> and </s> are defined as 'control' symbol.
# control symbol: We only reserve ids for these tokens. Even if these tokens appear in the input text,
# they are not handled as one token. User needs to insert ids explicitly after encoding.
for id in range(3):
print(sp.id_to_piece(id), sp.is_control(id))
# We can define special tokens (symbols) to tweak the DNN behavior through the tokens. Typical examples are BERT's special symbols., e.g., [SEP] and [CLS].
# There are two types of special tokens:
# user defined symbols: Always treated as one token in any context. These symbols can appear in the input sentence.
# control symbol: We only reserve ids for these tokens. Even if these tokens appear in the input text, they are not handled as one token. User needs to insert ids explicitly after encoding.
# Refer to this for more details: https://colab.research.google.com/github/google/sentencepiece/blob/master/python/sentencepiece_python_module_example.ipynb#scrollTo=dngckiPMcWbA
# ## Example of user defined symbols
spm.SentencePieceTrainer.train(f'''\
--model_type=bpe\
--input={DATAFILE}\
--model_prefix={MODELDIR}/bpe_user\
--user_defined_symbols=<sep>,<cls>\
--vocab_size=500''')
sp_user = spm.SentencePieceProcessor()
sp_user.load(os.path.join(MODELDIR, 'bpe_user.model'))
# ids are reserved in both mode.
# <unk>=0, <s>=1, </s>=2, <sep>=3, <cls>=4
# user defined symbols allow these symbol to apper in the text.
print(sp_user.encode_as_pieces('this is a test<sep> hello world<cls>')) # ['▁this', '▁is', '▁a', '▁t', 'est', '<sep>', '▁he', 'll', 'o', '▁wor', 'ld', '<cls>']
print(sp_user.piece_to_id('<sep>')) # 3
print(sp_user.piece_to_id('<cls>')) # 4
print('3=', sp_user.decode_ids([3])) # decoded to <sep>
print('4=', sp_user.decode_ids([4])) # decoded to <cls>
print('bos=', sp_user.bos_id()) # 1
print('eos=', sp_user.eos_id()) # 2
print('unk=', sp_user.unk_id()) # 0
print('pad=', sp_user.pad_id()) # -1, disabled by default
print(sp_user.encode_as_ids('Hello world')) # [189, 320, 430, 233, 71]
# Prepend or append bos/eos ids.
print([sp_user.bos_id()] + sp_user.encode_as_ids('Hello world') + [sp_user.eos_id()]) # [1, 189, 320, 430, 233, 71, 2] | 0.51562 | 0.271481 |
import os
import sys
import random
import numpy as np
from PIL import Image
import h5py
from glob import glob
from tqdm import tqdm
class Dataset():
def __init__(self, h5_path, mode, img_width, img_height, img_dim, is_mem=True):
"""For inputting/outputting font images' dataset.
Use hdf5 files.
Notes:
self.mode == 'r' -> read mode
'w' -> write mode
self.is_mem == True -> put data on memory. Very fast, but use a lot of memory space.
False -> read data from storage. Very slow, not recommended.
"""
self.mode = mode
self.img_width = img_width
self.img_height = img_height
self.img_dim = img_dim
self.is_mem = is_mem
assert mode == 'w' or mode == 'r', 'mode must be \'w\' or \'r\''
if self.mode == 'w':
if os.path.exists(h5_path):
while True:
inp = input('overwrite {}? (y/n)\n'.format(h5_path))
if inp == 'y' or inp == 'n':
break
if inp == 'n':
print('canceled')
sys.exit()
self.h5file = h5py.File(h5_path, mode)
if self.mode == 'r':
assert os.path.exists(h5_path), 'hdf5 file is not found: {}'.format(h5_path)
self.h5file = h5py.File(h5_path, mode)
if self.is_mem:
self._get = self._get_from_mem
else:
self._get = self._get_from_file
def load_imgs_into_h5(self, src_dir_path):
"""Load png images, and save into hdf5 file.
Load png images.
Directory tree have to be like this:
src_dir_path
├ A
│ ├ foo.png
│ ├ bar.png
│ └ baz.png
├ B
│ ├ foo.png
│ └ bar.png
└ C
├ foo.png
├ bar.png
└ baz.png
Don't have to put all character's image.
but image's size have to be same.
Args:
src_dir_path: source directory
"""
dir_paths = sorted(glob('{}/*'.format(src_dir_path)))
for dir_path in tqdm(dir_paths):
if not os.path.isdir(dir_path):
continue
img_paths = sorted(glob('{}/*.png'.format(dir_path)))
imgs = np.empty((len(img_paths), self.img_width, self.img_height, self.img_dim), dtype=np.float32)
fontnames = np.empty((len(img_paths)), dtype=object)
for i, img_path in enumerate(img_paths):
pil_img = Image.open(img_path)
np_img = np.asarray(pil_img)
np_img = (np_img.astype(np.float32) / 127.5) - 1.
if len(np_img.shape) == 2:
np_img = np_img[np.newaxis, :, :, np.newaxis]
if self.img_dim == 3:
np_img = np.repeat(np_img, 3, axis=3)
elif len(np_img.shape) == 3:
np_img = np_img[np.newaxis, :, :, :]
imgs[i] = np_img
fontnames[i] = os.path.basename(img_path).replace('.png', '')
self._save(os.path.basename(dir_path), imgs, fontnames)
def _save(self, char, imgs, fontnames):
"""Save images into hdf5 file.
Args:
char: character name
imgs: image data
fontname: font names
"""
self.h5file.create_group(char)
self.h5file.create_dataset(char + '/imgs', data=imgs)
self.h5file.create_dataset(char + '/fontnames', data=fontnames, dtype=h5py.special_dtype(vlen=str))
self.h5file.flush()
def set_load_data(self, train_rate=1.):
"""Setup data for outputting.
Make data queue for training(testing).
also make label_to_id dictionary.
Args:
train_rate: Rate of training data.
If train_rate == 1., testing data aren't prepared.
"""
print('preparing dataset...')
self.keys_queue_train = list()
self.label_to_id = dict()
fontnames_list = list()
all_fontnames = set()
for i, (key, val) in enumerate(self.h5file.items()):
fontnames = list()
for fontname in val['fontnames'].value:
fontnames.append(fontname)
all_fontnames.add(fontname)
fontnames_list.append(fontnames)
font_n = len(val['imgs'])
for j in range(font_n):
self.keys_queue_train.append((key, j))
self.label_to_id[key] = i
self.font_n = len(all_fontnames)
self.label_n = len(self.label_to_id)
if train_rate != 1.:
for i in range(self.label_n):
for fontname in all_fontnames:
assert fontname in fontnames_list[i], 'If you want to divide train/test, all of fonts must have same characters'
train_n = int(self.font_n * train_rate)
train_ids = random.sample(range(0, self.font_n), train_n)
self.keys_queue_test = list(filter(lambda x: x[1] not in train_ids, self.keys_queue_train))
self.keys_queue_train = list(filter(lambda x: x[1] in train_ids, self.keys_queue_train))
if self.is_mem:
self._put_on_mem()
def shuffle(self, is_test=False):
"""Shuffle data queue.
Args:
is_test: If you want to shuffle test data queue, set True.
"""
if is_test:
random.shuffle(self.keys_queue_test)
else:
random.shuffle(self.keys_queue_train)
def get_data_n(self, is_test=False):
"""Get # of data.
Args:
is_test: If you want to get # of test data queue, set True.
"""
if is_test:
return len(self.keys_queue_test)
return len(self.keys_queue_train)
def get_data_n_by_labels(self, labels, is_test=False):
"""Get # of data of selected labels.
Args:
labels: List of label names
is_test: If you want to get # of test data queue, set True.
"""
if is_test:
keys_queue = self.keys_queue_test
else:
keys_queue = self.keys_queue_train
filtered_keys_queue = list(filter(lambda x: x[0] in labels, keys_queue))
return len(filtered_keys_queue)
def get_ids_from_labels(self, labels):
"""Get label's id from selected labels.
Args:
labels: List of label names.
"""
ids = list()
for label in labels:
ids.append(self.label_to_id[label])
return ids
def get_batch(self, batch_i, batch_size, is_test=False, is_label=False):
"""Get data of a batch.
Divide data by batch_size, and get batch_i/batch_size data.
Args:
batch_i: index of batches.
batch_size: Batch size.
is_test: If you want to get from test data, set True.
is_label: If you want labels too, set True.
"""
keys_list = list()
for i in range(batch_i * batch_size, (batch_i + 1) * batch_size):
if is_test:
keys_list.append(self.keys_queue_test[i])
else:
keys_list.append(self.keys_queue_train[i])
return self._get(keys_list, is_label)
def get_batch_by_labels(self, batch_i, batch_size, labels, is_test=False, is_label=False):
"""Get data of a batch, from selected labels.
Divide data by batch_size, and get batch_i/batch_size data.
But only get from selected labels.
Args:
batch_i: index of batches.
batch_size: Batch size.
labels: List of label names.
is_test: If you want to get from test data, set True.
is_label: If you want labels too, set True.
"""
if is_test:
keys_queue = self.keys_queue_test
else:
keys_queue = self.keys_queue_train
filtered_keys_queue = list(filter(lambda x: x[0] in labels, keys_queue))
keys_list = list()
for i in range(batch_i * batch_size, (batch_i + 1) * batch_size):
keys_list.append(filtered_keys_queue[i])
return self._get(keys_list, is_label)
def get_random(self, batch_size, is_test=False, is_label=False):
"""Get data randomly.
Args:
batch_size: Batch size.
is_test: If you want to get from test data, set True.
is_label: If you want labels too, set True.
"""
keys_list = list()
for _ in range(batch_size):
if is_test:
keys_list.append(random.choice(self.keys_queue_test))
else:
keys_list.append(random.choice(self.keys_queue_train))
return self._get(keys_list, is_label)
def get_random_by_labels(self, batch_size, labels, is_test=False, is_label=False):
"""Get data randomly, from selected labels.
Args:
batch_size: Batch size.
labels: List of label names.
is_test: If you want to get from test data, set True.
is_label: If you want labels too, set True.
"""
if is_test:
keys_queue = self.keys_queue_test
else:
keys_queue = self.keys_queue_train
filtered_keys_queue = list(filter(lambda x: x[0] in labels, keys_queue))
keys_list = list()
for _ in range(batch_size):
keys_list.append(random.choice(filtered_keys_queue))
return self._get(keys_list, is_label)
def get_fontname_by_label_id(self, label, index):
"""Get fontname by label id.
Args:
label: String of label.
index: index of fontnames.
"""
assert self.is_mem, 'Sorry, this function is only available is_mem==True'
return str(self.fontnames[self.label_to_id[label]][index])
def _get_from_file(self, keys_list, is_label=False):
"""Get data from file in storage.
If self.is_mem == False, this function is called.
Args:
keys_list: List of keys that you get.
is_label: If this is true, you also get labels.
"""
imgs = np.empty((len(keys_list), self.img_width, self.img_height, self.img_dim), np.float32)
labels = list()
for i, keys in enumerate(keys_list):
img = self.h5file[keys[0] + '/imgs'].value[keys[1]]
imgs[i] = img[np.newaxis, :]
labels.append(keys[0])
if is_label:
return imgs, labels
return imgs
def _put_on_mem(self):
"""Put data on RAM.
If self.is_mem == True, this function is called.
"""
print('putting data on RAM...')
self.imgs = np.empty((self.label_n, self.font_n, self.img_width, self.img_height, self.img_dim), np.float32)
self.fontnames = np.empty((self.label_n, self.font_n), np.object)
self.label_to_font_n = dict()
for i, key in enumerate(self.h5file.keys()):
val = self.h5file[key + '/imgs'].value
if len(val) < self.font_n:
white_imgs = np.ones((self.font_n - len(val), self.img_width, self.img_height, self.img_dim), np.float32)
val = np.concatenate((val, white_imgs), axis=0)
self.imgs[i] = val
self.fontnames[i] = self.h5file[key + '/fontnames'].value
self.label_to_font_n[key] = len(self.imgs[i])
def _get_from_mem(self, keys_list, is_label=False):
"""Get data from RAM.
If self.is_mem == True, this function is called.
Args:
keys_list: List of keys that you get.
is_label: If this is true, you also get labels.
"""
imgs = np.empty((len(keys_list), self.img_width, self.img_height, self.img_dim), np.float32)
labels = list()
for i, keys in enumerate(keys_list):
assert keys[1] < self.label_to_font_n[keys[0]], 'Image is out of range'
img = self.imgs[self.label_to_id[keys[0]]][keys[1]]
imgs[i] = img[np.newaxis, :]
labels.append(keys[0])
if is_label:
return imgs, labels
return imgs | dataset.py | import os
import sys
import random
import numpy as np
from PIL import Image
import h5py
from glob import glob
from tqdm import tqdm
class Dataset():
def __init__(self, h5_path, mode, img_width, img_height, img_dim, is_mem=True):
"""For inputting/outputting font images' dataset.
Use hdf5 files.
Notes:
self.mode == 'r' -> read mode
'w' -> write mode
self.is_mem == True -> put data on memory. Very fast, but use a lot of memory space.
False -> read data from storage. Very slow, not recommended.
"""
self.mode = mode
self.img_width = img_width
self.img_height = img_height
self.img_dim = img_dim
self.is_mem = is_mem
assert mode == 'w' or mode == 'r', 'mode must be \'w\' or \'r\''
if self.mode == 'w':
if os.path.exists(h5_path):
while True:
inp = input('overwrite {}? (y/n)\n'.format(h5_path))
if inp == 'y' or inp == 'n':
break
if inp == 'n':
print('canceled')
sys.exit()
self.h5file = h5py.File(h5_path, mode)
if self.mode == 'r':
assert os.path.exists(h5_path), 'hdf5 file is not found: {}'.format(h5_path)
self.h5file = h5py.File(h5_path, mode)
if self.is_mem:
self._get = self._get_from_mem
else:
self._get = self._get_from_file
def load_imgs_into_h5(self, src_dir_path):
"""Load png images, and save into hdf5 file.
Load png images.
Directory tree have to be like this:
src_dir_path
├ A
│ ├ foo.png
│ ├ bar.png
│ └ baz.png
├ B
│ ├ foo.png
│ └ bar.png
└ C
├ foo.png
├ bar.png
└ baz.png
Don't have to put all character's image.
but image's size have to be same.
Args:
src_dir_path: source directory
"""
dir_paths = sorted(glob('{}/*'.format(src_dir_path)))
for dir_path in tqdm(dir_paths):
if not os.path.isdir(dir_path):
continue
img_paths = sorted(glob('{}/*.png'.format(dir_path)))
imgs = np.empty((len(img_paths), self.img_width, self.img_height, self.img_dim), dtype=np.float32)
fontnames = np.empty((len(img_paths)), dtype=object)
for i, img_path in enumerate(img_paths):
pil_img = Image.open(img_path)
np_img = np.asarray(pil_img)
np_img = (np_img.astype(np.float32) / 127.5) - 1.
if len(np_img.shape) == 2:
np_img = np_img[np.newaxis, :, :, np.newaxis]
if self.img_dim == 3:
np_img = np.repeat(np_img, 3, axis=3)
elif len(np_img.shape) == 3:
np_img = np_img[np.newaxis, :, :, :]
imgs[i] = np_img
fontnames[i] = os.path.basename(img_path).replace('.png', '')
self._save(os.path.basename(dir_path), imgs, fontnames)
def _save(self, char, imgs, fontnames):
"""Save images into hdf5 file.
Args:
char: character name
imgs: image data
fontname: font names
"""
self.h5file.create_group(char)
self.h5file.create_dataset(char + '/imgs', data=imgs)
self.h5file.create_dataset(char + '/fontnames', data=fontnames, dtype=h5py.special_dtype(vlen=str))
self.h5file.flush()
def set_load_data(self, train_rate=1.):
"""Setup data for outputting.
Make data queue for training(testing).
also make label_to_id dictionary.
Args:
train_rate: Rate of training data.
If train_rate == 1., testing data aren't prepared.
"""
print('preparing dataset...')
self.keys_queue_train = list()
self.label_to_id = dict()
fontnames_list = list()
all_fontnames = set()
for i, (key, val) in enumerate(self.h5file.items()):
fontnames = list()
for fontname in val['fontnames'].value:
fontnames.append(fontname)
all_fontnames.add(fontname)
fontnames_list.append(fontnames)
font_n = len(val['imgs'])
for j in range(font_n):
self.keys_queue_train.append((key, j))
self.label_to_id[key] = i
self.font_n = len(all_fontnames)
self.label_n = len(self.label_to_id)
if train_rate != 1.:
for i in range(self.label_n):
for fontname in all_fontnames:
assert fontname in fontnames_list[i], 'If you want to divide train/test, all of fonts must have same characters'
train_n = int(self.font_n * train_rate)
train_ids = random.sample(range(0, self.font_n), train_n)
self.keys_queue_test = list(filter(lambda x: x[1] not in train_ids, self.keys_queue_train))
self.keys_queue_train = list(filter(lambda x: x[1] in train_ids, self.keys_queue_train))
if self.is_mem:
self._put_on_mem()
def shuffle(self, is_test=False):
"""Shuffle data queue.
Args:
is_test: If you want to shuffle test data queue, set True.
"""
if is_test:
random.shuffle(self.keys_queue_test)
else:
random.shuffle(self.keys_queue_train)
def get_data_n(self, is_test=False):
"""Get # of data.
Args:
is_test: If you want to get # of test data queue, set True.
"""
if is_test:
return len(self.keys_queue_test)
return len(self.keys_queue_train)
def get_data_n_by_labels(self, labels, is_test=False):
"""Get # of data of selected labels.
Args:
labels: List of label names
is_test: If you want to get # of test data queue, set True.
"""
if is_test:
keys_queue = self.keys_queue_test
else:
keys_queue = self.keys_queue_train
filtered_keys_queue = list(filter(lambda x: x[0] in labels, keys_queue))
return len(filtered_keys_queue)
def get_ids_from_labels(self, labels):
"""Get label's id from selected labels.
Args:
labels: List of label names.
"""
ids = list()
for label in labels:
ids.append(self.label_to_id[label])
return ids
def get_batch(self, batch_i, batch_size, is_test=False, is_label=False):
"""Get data of a batch.
Divide data by batch_size, and get batch_i/batch_size data.
Args:
batch_i: index of batches.
batch_size: Batch size.
is_test: If you want to get from test data, set True.
is_label: If you want labels too, set True.
"""
keys_list = list()
for i in range(batch_i * batch_size, (batch_i + 1) * batch_size):
if is_test:
keys_list.append(self.keys_queue_test[i])
else:
keys_list.append(self.keys_queue_train[i])
return self._get(keys_list, is_label)
def get_batch_by_labels(self, batch_i, batch_size, labels, is_test=False, is_label=False):
"""Get data of a batch, from selected labels.
Divide data by batch_size, and get batch_i/batch_size data.
But only get from selected labels.
Args:
batch_i: index of batches.
batch_size: Batch size.
labels: List of label names.
is_test: If you want to get from test data, set True.
is_label: If you want labels too, set True.
"""
if is_test:
keys_queue = self.keys_queue_test
else:
keys_queue = self.keys_queue_train
filtered_keys_queue = list(filter(lambda x: x[0] in labels, keys_queue))
keys_list = list()
for i in range(batch_i * batch_size, (batch_i + 1) * batch_size):
keys_list.append(filtered_keys_queue[i])
return self._get(keys_list, is_label)
def get_random(self, batch_size, is_test=False, is_label=False):
"""Get data randomly.
Args:
batch_size: Batch size.
is_test: If you want to get from test data, set True.
is_label: If you want labels too, set True.
"""
keys_list = list()
for _ in range(batch_size):
if is_test:
keys_list.append(random.choice(self.keys_queue_test))
else:
keys_list.append(random.choice(self.keys_queue_train))
return self._get(keys_list, is_label)
def get_random_by_labels(self, batch_size, labels, is_test=False, is_label=False):
"""Get data randomly, from selected labels.
Args:
batch_size: Batch size.
labels: List of label names.
is_test: If you want to get from test data, set True.
is_label: If you want labels too, set True.
"""
if is_test:
keys_queue = self.keys_queue_test
else:
keys_queue = self.keys_queue_train
filtered_keys_queue = list(filter(lambda x: x[0] in labels, keys_queue))
keys_list = list()
for _ in range(batch_size):
keys_list.append(random.choice(filtered_keys_queue))
return self._get(keys_list, is_label)
def get_fontname_by_label_id(self, label, index):
"""Get fontname by label id.
Args:
label: String of label.
index: index of fontnames.
"""
assert self.is_mem, 'Sorry, this function is only available is_mem==True'
return str(self.fontnames[self.label_to_id[label]][index])
def _get_from_file(self, keys_list, is_label=False):
"""Get data from file in storage.
If self.is_mem == False, this function is called.
Args:
keys_list: List of keys that you get.
is_label: If this is true, you also get labels.
"""
imgs = np.empty((len(keys_list), self.img_width, self.img_height, self.img_dim), np.float32)
labels = list()
for i, keys in enumerate(keys_list):
img = self.h5file[keys[0] + '/imgs'].value[keys[1]]
imgs[i] = img[np.newaxis, :]
labels.append(keys[0])
if is_label:
return imgs, labels
return imgs
def _put_on_mem(self):
"""Put data on RAM.
If self.is_mem == True, this function is called.
"""
print('putting data on RAM...')
self.imgs = np.empty((self.label_n, self.font_n, self.img_width, self.img_height, self.img_dim), np.float32)
self.fontnames = np.empty((self.label_n, self.font_n), np.object)
self.label_to_font_n = dict()
for i, key in enumerate(self.h5file.keys()):
val = self.h5file[key + '/imgs'].value
if len(val) < self.font_n:
white_imgs = np.ones((self.font_n - len(val), self.img_width, self.img_height, self.img_dim), np.float32)
val = np.concatenate((val, white_imgs), axis=0)
self.imgs[i] = val
self.fontnames[i] = self.h5file[key + '/fontnames'].value
self.label_to_font_n[key] = len(self.imgs[i])
def _get_from_mem(self, keys_list, is_label=False):
"""Get data from RAM.
If self.is_mem == True, this function is called.
Args:
keys_list: List of keys that you get.
is_label: If this is true, you also get labels.
"""
imgs = np.empty((len(keys_list), self.img_width, self.img_height, self.img_dim), np.float32)
labels = list()
for i, keys in enumerate(keys_list):
assert keys[1] < self.label_to_font_n[keys[0]], 'Image is out of range'
img = self.imgs[self.label_to_id[keys[0]]][keys[1]]
imgs[i] = img[np.newaxis, :]
labels.append(keys[0])
if is_label:
return imgs, labels
return imgs | 0.496094 | 0.271074 |
import json
import re
from io import BytesIO
from tabnanny import verbose
from urllib.parse import urlparse
import pandas as pd
import PyPDF2
from datasource.io import get_data_from_url
from datasource.utils import get_logger
# DEFINING GLOBAL VARIABLES
API_URL = "https://aides-territoires.beta.gouv.fr/api/aids/"
CRITERIA_WORDS = ["conditions", "critères", "éligible", "éligibilité"]
logger = get_logger("SCRAP_PDF_FROM_API")
NUM_SCRAP = 1
def is_url_working(url, logger):
"""Checks that url is working or not.
Controls:
- url start with http
- Status code not 200
- Url redirect into another one
- also catch unexpected error
Parameters
----------
url : str
Url to check
logger : Logger
logger to display information
Returns
-------
bool
Whether the url is valid or not
str
string explaining the problem
"""
if not url.startswith("http"):
return False, "url not starting with http"
# logger.info("%s" % url)
try:
resp = get_data_from_url(url)
except Exception as e:
logger.error(e)
return False, "Unknown error"
if resp.status_code != 200:
logger.error("Status code not 200, instead %i" % resp.status_code)
return False, "Status %i" % resp.status_code
if resp.url != url:
logger.error("url changed to %s" % resp.url)
return False, "url changed to %s" % resp.url
return True, resp
def get_pdf_content_from_url(pdf_url):
"""Extract all the text from a pdf url
Parameters
----------
pdf_url : str
url of the pdf
Returns
-------
str
string of all the PDF content
Raises
------
ValueError
pdf_url must ends with '.pdf'
"""
if not pdf_url.endswith(".pdf"):
raise ValueError("pdf_url must ends with '.pdf'")
response = get_data_from_url(pdf_url)
my_raw_data = response.content
full_text = ""
with BytesIO(my_raw_data) as data:
read_pdf = PyPDF2.PdfFileReader(data)
for page in range(read_pdf.getNumPages()):
full_text += " " + read_pdf.getPage(page).extractText().lower()
return full_text
def scrap_pdf_in_url(resp):
"""Find all pdf in a html page and scrap them
Parameters
----------
resp :
requests response of an url
Returns
-------
list
List of pdf find in an url
"""
# pdf_urls = re.findall(r"http.*\.pdf", resp.text)
pdf_urls = re.findall(r"(?:(?!\"|').)*?\.pdf", resp.text)
if len(pdf_urls) == 0:
return []
pdf_with_criterias = []
for pdf_url in set(pdf_urls):
if pdf_url.startswith("/"):
parsed_url = urlparse(resp.url)
domain = parsed_url.netloc
pdf_url = "http://" + domain + pdf_url
# logger.debug("PDF Analyse : %s" % pdf_url)
try:
txt = get_pdf_content_from_url(pdf_url)
find_word = []
for w in CRITERIA_WORDS:
if w in txt:
find_word.append(w)
if len(find_word) != 0:
# logger.debug("CRITERIAS FOUND with %s" % (" ".join(find_word)))
pdf_with_criterias.append(pdf_url)
except:
continue
return pdf_with_criterias
def get_one_aide_data(aide, logger):
"""Retrieves all information about one "aide"
from the API defined in global
Parameters
----------
aide : dict
One "aide" from the Mission transition API
logger :
Logger to display all informations
Returns
-------
dict
Result of the "aide" analysis
"""
global NUM_SCRAP
name = aide["name"]
aide_url = aide["url"]
orig_url = aide["origin_url"]
app_url = aide["application_url"]
logger.info("[%i] - Analyse %s" % (NUM_SCRAP, aide_url))
NUM_SCRAP = NUM_SCRAP + 1
current_aide = {"name": name, "url": aide_url}
pdfs = []
# check orig_url
is_ok, resp = is_url_working(orig_url, logger)
current_aide["error_orig"] = is_ok
if is_ok:
pdfs += scrap_pdf_in_url(resp)
else:
current_aide["type_error_orig"] = resp
# check app_url
is_ok, resp = is_url_working(app_url, logger)
current_aide["error_app"] = is_ok
if is_ok:
pdfs += scrap_pdf_in_url(resp)
else:
current_aide["type_error_app"] = resp
current_aide["pdfs"] = pdfs
current_aide["pdf_avec_criteres"] = False
if len(pdfs) != 0:
current_aide["pdf_avec_criteres"] = True
return current_aide
def get_data_aides_results(data, logger):
"""Analyse all "aides" from the data given
by the API
Parameters
----------
data : dict
Return from the Mission transition API
logger :
Logger to display all informations
Returns
-------
[type]
[description]
"""
aides_list = []
for aide in data["results"]:
try:
current_aide = get_one_aide_data(aide, logger)
aides_list.append(current_aide)
except Exception as e:
logger.error(e)
raise e
return aides_list
def scrap_current_api_page(url, logger=None):
"""Scrap current API page from aides territoires API.
Returns the next page if there is one else it returns None
"""
logger.info("Scrap URL : %s" % url)
data = get_data_from_url(url)
next_url = None
if data.status_code != 200:
logger.info(
"Status code different from 200 (HTTP %i instead)" % data.status_code
)
return None, []
# Convert to json object
data = json.loads(data.text)
# Whether the url has a next step or not
if "next" in data:
next_url = data["next"]
aides_list = get_data_aides_results(data, logger)
return next_url, aides_list
def srap_pdf():
"""Main script to scrap and find pdfs given the differents url"""
aides_list = []
url, aides = scrap_current_api_page(API_URL, logger=logger)
aides_list += aides
while url is not None:
url, aides = scrap_current_api_page(url, logger=logger)
aides_list += aides
df = pd.DataFrame(aides_list)
df.to_csv("data/aides_v2.csv", index=False)
if __name__ == "__main__":
srap_pdf() | scripts/scrap_pdf_files.py | import json
import re
from io import BytesIO
from tabnanny import verbose
from urllib.parse import urlparse
import pandas as pd
import PyPDF2
from datasource.io import get_data_from_url
from datasource.utils import get_logger
# DEFINING GLOBAL VARIABLES
API_URL = "https://aides-territoires.beta.gouv.fr/api/aids/"
CRITERIA_WORDS = ["conditions", "critères", "éligible", "éligibilité"]
logger = get_logger("SCRAP_PDF_FROM_API")
NUM_SCRAP = 1
def is_url_working(url, logger):
"""Checks that url is working or not.
Controls:
- url start with http
- Status code not 200
- Url redirect into another one
- also catch unexpected error
Parameters
----------
url : str
Url to check
logger : Logger
logger to display information
Returns
-------
bool
Whether the url is valid or not
str
string explaining the problem
"""
if not url.startswith("http"):
return False, "url not starting with http"
# logger.info("%s" % url)
try:
resp = get_data_from_url(url)
except Exception as e:
logger.error(e)
return False, "Unknown error"
if resp.status_code != 200:
logger.error("Status code not 200, instead %i" % resp.status_code)
return False, "Status %i" % resp.status_code
if resp.url != url:
logger.error("url changed to %s" % resp.url)
return False, "url changed to %s" % resp.url
return True, resp
def get_pdf_content_from_url(pdf_url):
"""Extract all the text from a pdf url
Parameters
----------
pdf_url : str
url of the pdf
Returns
-------
str
string of all the PDF content
Raises
------
ValueError
pdf_url must ends with '.pdf'
"""
if not pdf_url.endswith(".pdf"):
raise ValueError("pdf_url must ends with '.pdf'")
response = get_data_from_url(pdf_url)
my_raw_data = response.content
full_text = ""
with BytesIO(my_raw_data) as data:
read_pdf = PyPDF2.PdfFileReader(data)
for page in range(read_pdf.getNumPages()):
full_text += " " + read_pdf.getPage(page).extractText().lower()
return full_text
def scrap_pdf_in_url(resp):
"""Find all pdf in a html page and scrap them
Parameters
----------
resp :
requests response of an url
Returns
-------
list
List of pdf find in an url
"""
# pdf_urls = re.findall(r"http.*\.pdf", resp.text)
pdf_urls = re.findall(r"(?:(?!\"|').)*?\.pdf", resp.text)
if len(pdf_urls) == 0:
return []
pdf_with_criterias = []
for pdf_url in set(pdf_urls):
if pdf_url.startswith("/"):
parsed_url = urlparse(resp.url)
domain = parsed_url.netloc
pdf_url = "http://" + domain + pdf_url
# logger.debug("PDF Analyse : %s" % pdf_url)
try:
txt = get_pdf_content_from_url(pdf_url)
find_word = []
for w in CRITERIA_WORDS:
if w in txt:
find_word.append(w)
if len(find_word) != 0:
# logger.debug("CRITERIAS FOUND with %s" % (" ".join(find_word)))
pdf_with_criterias.append(pdf_url)
except:
continue
return pdf_with_criterias
def get_one_aide_data(aide, logger):
"""Retrieves all information about one "aide"
from the API defined in global
Parameters
----------
aide : dict
One "aide" from the Mission transition API
logger :
Logger to display all informations
Returns
-------
dict
Result of the "aide" analysis
"""
global NUM_SCRAP
name = aide["name"]
aide_url = aide["url"]
orig_url = aide["origin_url"]
app_url = aide["application_url"]
logger.info("[%i] - Analyse %s" % (NUM_SCRAP, aide_url))
NUM_SCRAP = NUM_SCRAP + 1
current_aide = {"name": name, "url": aide_url}
pdfs = []
# check orig_url
is_ok, resp = is_url_working(orig_url, logger)
current_aide["error_orig"] = is_ok
if is_ok:
pdfs += scrap_pdf_in_url(resp)
else:
current_aide["type_error_orig"] = resp
# check app_url
is_ok, resp = is_url_working(app_url, logger)
current_aide["error_app"] = is_ok
if is_ok:
pdfs += scrap_pdf_in_url(resp)
else:
current_aide["type_error_app"] = resp
current_aide["pdfs"] = pdfs
current_aide["pdf_avec_criteres"] = False
if len(pdfs) != 0:
current_aide["pdf_avec_criteres"] = True
return current_aide
def get_data_aides_results(data, logger):
"""Analyse all "aides" from the data given
by the API
Parameters
----------
data : dict
Return from the Mission transition API
logger :
Logger to display all informations
Returns
-------
[type]
[description]
"""
aides_list = []
for aide in data["results"]:
try:
current_aide = get_one_aide_data(aide, logger)
aides_list.append(current_aide)
except Exception as e:
logger.error(e)
raise e
return aides_list
def scrap_current_api_page(url, logger=None):
"""Scrap current API page from aides territoires API.
Returns the next page if there is one else it returns None
"""
logger.info("Scrap URL : %s" % url)
data = get_data_from_url(url)
next_url = None
if data.status_code != 200:
logger.info(
"Status code different from 200 (HTTP %i instead)" % data.status_code
)
return None, []
# Convert to json object
data = json.loads(data.text)
# Whether the url has a next step or not
if "next" in data:
next_url = data["next"]
aides_list = get_data_aides_results(data, logger)
return next_url, aides_list
def srap_pdf():
"""Main script to scrap and find pdfs given the differents url"""
aides_list = []
url, aides = scrap_current_api_page(API_URL, logger=logger)
aides_list += aides
while url is not None:
url, aides = scrap_current_api_page(url, logger=logger)
aides_list += aides
df = pd.DataFrame(aides_list)
df.to_csv("data/aides_v2.csv", index=False)
if __name__ == "__main__":
srap_pdf() | 0.531696 | 0.08819 |
import math
import os
import pytest
import torch
import pearl.bayesnet as bayesnet
import pearl.common as common
import pearl.nodes.categorical as categorical
import pearl.nodes.continuous as continuous
import pearl.nodes.deterministic as deterministic
ABS_TOL = 1e-4
def test_yaml_encoding_for_categorical_with_dirichlet_prior():
node = categorical.CategoricalNodeWithDirichletPrior(
name="name",
domain=["a", "b"],
plates=["plate1", "plate2"],
parents=[],
observed=True,
prior_params={"alpha": torch.tensor([1.0, 2.0])},
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding.keys() == {
"type",
"domain",
"plates",
"parents",
"prior_params",
"observed",
}
assert yaml_encoding["type"] == "CategoricalNodeWithDirichletPrior"
assert yaml_encoding["domain"] == ["a", "b"]
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == []
assert yaml_encoding["observed"]
assert yaml_encoding["prior_params"].keys() == {"alpha"}
assert len(yaml_encoding["prior_params"]["alpha"]) == 2
assert math.isclose(yaml_encoding["prior_params"]["alpha"][0], 1.0, abs_tol=ABS_TOL)
assert math.isclose(yaml_encoding["prior_params"]["alpha"][1], 2.0, abs_tol=ABS_TOL)
def test_yaml_encoding_for_categorical_node_with_continuous_parents():
parent1 = categorical.CategoricalNodeWithDirichletPrior(
name="parent1",
domain=["a", "b"],
parents=[],
plates=[],
)
parent2 = continuous.ContinuousNodeWithNormalDistribution(
name="parent2",
parents=[],
plates=[],
)
node = categorical.GeneralizedLinearNode(
name="name",
domain=["a", "b"],
plates=["plate1", "plate2"],
parents=[parent1, parent2],
observed=True,
prior_params={
"bias_mean": torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
"bias_scale": torch.tensor([[1.0, 2.0], [3.0, 4.0]]),
"weights_mean": torch.tensor([[[1.0, 2.0]], [[3.0, 4.0]]]),
"weights_scale": torch.tensor([[[5.0, 6.0]], [[7.0, 8.0]]]),
},
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding["type"] == "GeneralizedLinearNode"
assert yaml_encoding["domain"] == ["a", "b"]
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == ["parent1", "parent2"]
assert yaml_encoding["observed"]
assert set(yaml_encoding["prior_params"].keys()) == {
"weights_mean",
"weights_scale",
"bias_mean",
"bias_scale",
}
assert len(yaml_encoding["prior_params"]["weights_mean"]) == 2
assert math.isclose(
yaml_encoding["prior_params"]["weights_mean"][0][0][0], 1.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["weights_mean"][0][0][1], 2.0, abs_tol=ABS_TOL
)
def test_yaml_encoding_for_continuous_node_with_continuous_parents():
parent1 = categorical.CategoricalNodeWithDirichletPrior(
name="parent1",
domain=["a", "b"],
parents=[],
plates=[],
)
parent2 = continuous.ContinuousNodeWithNormalDistribution(
name="parent2",
parents=[],
plates=[],
)
node = continuous.ConditionalLinearGaussianNode(
name="name",
plates=["plate1", "plate2"],
parents=[parent1, parent2],
observed=True,
prior_params={
"bias_mean": torch.tensor([0.0, 0.0]),
"bias_scale": torch.tensor([1.0, 1.0]),
"weights_mean": torch.tensor([[1.0], [2.0]]),
"weights_scale": torch.tensor([[5.0], [6.0]]),
"scale_scale": torch.tensor([1.0, 1.0]),
},
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding["type"] == "ConditionalLinearGaussianNode"
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == ["parent1", "parent2"]
assert yaml_encoding["observed"]
assert set(yaml_encoding["prior_params"].keys()) == {
"weights_mean",
"weights_scale",
"bias_mean",
"bias_scale",
"scale_scale",
}
assert len(yaml_encoding["prior_params"]["weights_mean"]) == 2
assert math.isclose(
yaml_encoding["prior_params"]["weights_mean"][0][0], 1.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["weights_mean"][1][0], 2.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["bias_mean"][0], 0.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["bias_scale"][0], 1.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["scale_scale"][0], 1.0, abs_tol=ABS_TOL
)
def test_yaml_encoding_for_continuous_node_with_normal_distribution():
node = continuous.ContinuousNodeWithNormalDistribution(
name="name",
plates=["plate1", "plate2"],
parents=[],
observed=True,
prior_params={
"mean_mean": torch.tensor(0.0),
"mean_scale": torch.tensor(1.0),
"scale_scale": torch.tensor(1.0),
},
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding.keys() == {
"type",
"plates",
"parents",
"prior_params",
"observed",
}
assert yaml_encoding["type"] == "ContinuousNodeWithNormalDistribution"
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == []
assert yaml_encoding["observed"]
assert math.isclose(
yaml_encoding["prior_params"]["mean_mean"], 0.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["mean_scale"], 1.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["scale_scale"], 1.0, abs_tol=ABS_TOL
)
def test_yaml_encoding_for_exponential_node():
node = deterministic.Exponential(
name="name",
plates=["plate1", "plate2"],
parents=[],
observed=False,
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding.keys() == {
"type",
"plates",
"parents",
"observed",
}
assert yaml_encoding["type"] == "Exponential"
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == []
assert not yaml_encoding["observed"]
def test_yaml_encoding_for_sum_node():
node = deterministic.Sum(
name="name",
plates=["plate1", "plate2"],
parents=[],
observed=False,
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding.keys() == {
"type",
"plates",
"parents",
"observed",
}
assert yaml_encoding["type"] == "Sum"
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == []
assert not yaml_encoding["observed"]
def test_yaml_encoding_for_bayesnet(tmp_path):
bn = bayesnet.BayesianNetwork("bn", torch.device("cpu", 0))
bn.add_variable(
categorical.CategoricalNodeWithDirichletPrior,
"a",
[],
[],
domain=["yes", "no"],
)
bn.add_variable(continuous.ContinuousNodeWithNormalDistribution, "b", ["a"], [])
bn.add_variable(deterministic.Exponential, "c", ["b"], [])
bn.add_variable(continuous.ContinuousNodeWithNormalDistribution, "d", [], [])
bn.add_variable(deterministic.Sum, "e", ["c", "d"], [])
yaml_encoding = bn.to_yaml_encoding()
assert yaml_encoding.keys() == {
"encodingVersion",
"device",
"name",
"plates",
"nodes",
}
assert yaml_encoding["encodingVersion"] == bayesnet.ENCODING_VERSION
assert yaml_encoding["device"] == {"type": "cpu", "index": 0}
assert yaml_encoding["name"] == "bn"
assert yaml_encoding["plates"] == {}
# Since there are separate unit-tests for encoding of nodes we
# will perform basic checks.
assert yaml_encoding["nodes"].keys() == {"a", "b", "c", "d", "e"}
assert yaml_encoding["nodes"]["a"]["parents"] == []
assert yaml_encoding["nodes"]["a"]["type"] == "CategoricalNodeWithDirichletPrior"
assert yaml_encoding["nodes"]["b"]["parents"] == ["a"]
assert yaml_encoding["nodes"]["b"]["type"] == "ContinuousNodeWithNormalDistribution"
assert yaml_encoding["nodes"]["c"]["parents"] == ["b"]
assert yaml_encoding["nodes"]["c"]["type"] == "Exponential"
assert yaml_encoding["nodes"]["d"]["parents"] == []
assert yaml_encoding["nodes"]["d"]["type"] == "ContinuousNodeWithNormalDistribution"
assert yaml_encoding["nodes"]["e"]["parents"] == ["c", "d"]
assert yaml_encoding["nodes"]["e"]["type"] == "Sum"
FIXTURE_DIR = os.path.dirname(os.path.realpath(__file__))
BN_YAML = "bn.yaml"
BN_MINIMAL_YAML = "bn_minimal.yaml"
@pytest.mark.parametrize("yaml_file", [(BN_YAML), (BN_MINIMAL_YAML)])
def test_from_yaml(yaml_file):
yaml_file_path = os.path.join(FIXTURE_DIR, BN_YAML)
bn = bayesnet.from_yaml(yaml_file_path)
assert isinstance(bn, bayesnet.BayesianNetwork)
assert bn.name == "bn"
assert common.same_device(bn.device, torch.device("cpu", 0))
assert set(bn.dag.nodes) == {"a", "b", "c", "d", "e"}
assert bn.plate_dict == dict()
assert set(bn.dag.predecessors("a")) == set()
assert isinstance(
bn.get_node_object("a"), categorical.CategoricalNodeWithDirichletPrior
)
assert set(bn.dag.predecessors("b")) == {"a"}
assert isinstance(
bn.get_node_object("b"), continuous.ContinuousNodeWithNormalDistribution
)
assert set(bn.dag.predecessors("c")) == {"b"}
assert isinstance(bn.get_node_object("c"), deterministic.Exponential)
assert set(bn.dag.predecessors("d")) == set()
assert isinstance(
bn.get_node_object("d"), continuous.ContinuousNodeWithNormalDistribution
)
assert set(bn.dag.predecessors("e")) == {"c", "d"}
assert isinstance(bn.get_node_object("e"), deterministic.Sum) | tests/serialization_test.py | import math
import os
import pytest
import torch
import pearl.bayesnet as bayesnet
import pearl.common as common
import pearl.nodes.categorical as categorical
import pearl.nodes.continuous as continuous
import pearl.nodes.deterministic as deterministic
ABS_TOL = 1e-4
def test_yaml_encoding_for_categorical_with_dirichlet_prior():
node = categorical.CategoricalNodeWithDirichletPrior(
name="name",
domain=["a", "b"],
plates=["plate1", "plate2"],
parents=[],
observed=True,
prior_params={"alpha": torch.tensor([1.0, 2.0])},
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding.keys() == {
"type",
"domain",
"plates",
"parents",
"prior_params",
"observed",
}
assert yaml_encoding["type"] == "CategoricalNodeWithDirichletPrior"
assert yaml_encoding["domain"] == ["a", "b"]
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == []
assert yaml_encoding["observed"]
assert yaml_encoding["prior_params"].keys() == {"alpha"}
assert len(yaml_encoding["prior_params"]["alpha"]) == 2
assert math.isclose(yaml_encoding["prior_params"]["alpha"][0], 1.0, abs_tol=ABS_TOL)
assert math.isclose(yaml_encoding["prior_params"]["alpha"][1], 2.0, abs_tol=ABS_TOL)
def test_yaml_encoding_for_categorical_node_with_continuous_parents():
parent1 = categorical.CategoricalNodeWithDirichletPrior(
name="parent1",
domain=["a", "b"],
parents=[],
plates=[],
)
parent2 = continuous.ContinuousNodeWithNormalDistribution(
name="parent2",
parents=[],
plates=[],
)
node = categorical.GeneralizedLinearNode(
name="name",
domain=["a", "b"],
plates=["plate1", "plate2"],
parents=[parent1, parent2],
observed=True,
prior_params={
"bias_mean": torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
"bias_scale": torch.tensor([[1.0, 2.0], [3.0, 4.0]]),
"weights_mean": torch.tensor([[[1.0, 2.0]], [[3.0, 4.0]]]),
"weights_scale": torch.tensor([[[5.0, 6.0]], [[7.0, 8.0]]]),
},
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding["type"] == "GeneralizedLinearNode"
assert yaml_encoding["domain"] == ["a", "b"]
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == ["parent1", "parent2"]
assert yaml_encoding["observed"]
assert set(yaml_encoding["prior_params"].keys()) == {
"weights_mean",
"weights_scale",
"bias_mean",
"bias_scale",
}
assert len(yaml_encoding["prior_params"]["weights_mean"]) == 2
assert math.isclose(
yaml_encoding["prior_params"]["weights_mean"][0][0][0], 1.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["weights_mean"][0][0][1], 2.0, abs_tol=ABS_TOL
)
def test_yaml_encoding_for_continuous_node_with_continuous_parents():
parent1 = categorical.CategoricalNodeWithDirichletPrior(
name="parent1",
domain=["a", "b"],
parents=[],
plates=[],
)
parent2 = continuous.ContinuousNodeWithNormalDistribution(
name="parent2",
parents=[],
plates=[],
)
node = continuous.ConditionalLinearGaussianNode(
name="name",
plates=["plate1", "plate2"],
parents=[parent1, parent2],
observed=True,
prior_params={
"bias_mean": torch.tensor([0.0, 0.0]),
"bias_scale": torch.tensor([1.0, 1.0]),
"weights_mean": torch.tensor([[1.0], [2.0]]),
"weights_scale": torch.tensor([[5.0], [6.0]]),
"scale_scale": torch.tensor([1.0, 1.0]),
},
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding["type"] == "ConditionalLinearGaussianNode"
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == ["parent1", "parent2"]
assert yaml_encoding["observed"]
assert set(yaml_encoding["prior_params"].keys()) == {
"weights_mean",
"weights_scale",
"bias_mean",
"bias_scale",
"scale_scale",
}
assert len(yaml_encoding["prior_params"]["weights_mean"]) == 2
assert math.isclose(
yaml_encoding["prior_params"]["weights_mean"][0][0], 1.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["weights_mean"][1][0], 2.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["bias_mean"][0], 0.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["bias_scale"][0], 1.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["scale_scale"][0], 1.0, abs_tol=ABS_TOL
)
def test_yaml_encoding_for_continuous_node_with_normal_distribution():
node = continuous.ContinuousNodeWithNormalDistribution(
name="name",
plates=["plate1", "plate2"],
parents=[],
observed=True,
prior_params={
"mean_mean": torch.tensor(0.0),
"mean_scale": torch.tensor(1.0),
"scale_scale": torch.tensor(1.0),
},
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding.keys() == {
"type",
"plates",
"parents",
"prior_params",
"observed",
}
assert yaml_encoding["type"] == "ContinuousNodeWithNormalDistribution"
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == []
assert yaml_encoding["observed"]
assert math.isclose(
yaml_encoding["prior_params"]["mean_mean"], 0.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["mean_scale"], 1.0, abs_tol=ABS_TOL
)
assert math.isclose(
yaml_encoding["prior_params"]["scale_scale"], 1.0, abs_tol=ABS_TOL
)
def test_yaml_encoding_for_exponential_node():
node = deterministic.Exponential(
name="name",
plates=["plate1", "plate2"],
parents=[],
observed=False,
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding.keys() == {
"type",
"plates",
"parents",
"observed",
}
assert yaml_encoding["type"] == "Exponential"
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == []
assert not yaml_encoding["observed"]
def test_yaml_encoding_for_sum_node():
node = deterministic.Sum(
name="name",
plates=["plate1", "plate2"],
parents=[],
observed=False,
)
yaml_encoding = node.to_yaml_encoding()
assert yaml_encoding.keys() == {
"type",
"plates",
"parents",
"observed",
}
assert yaml_encoding["type"] == "Sum"
assert yaml_encoding["plates"] == ["plate1", "plate2"]
assert yaml_encoding["parents"] == []
assert not yaml_encoding["observed"]
def test_yaml_encoding_for_bayesnet(tmp_path):
bn = bayesnet.BayesianNetwork("bn", torch.device("cpu", 0))
bn.add_variable(
categorical.CategoricalNodeWithDirichletPrior,
"a",
[],
[],
domain=["yes", "no"],
)
bn.add_variable(continuous.ContinuousNodeWithNormalDistribution, "b", ["a"], [])
bn.add_variable(deterministic.Exponential, "c", ["b"], [])
bn.add_variable(continuous.ContinuousNodeWithNormalDistribution, "d", [], [])
bn.add_variable(deterministic.Sum, "e", ["c", "d"], [])
yaml_encoding = bn.to_yaml_encoding()
assert yaml_encoding.keys() == {
"encodingVersion",
"device",
"name",
"plates",
"nodes",
}
assert yaml_encoding["encodingVersion"] == bayesnet.ENCODING_VERSION
assert yaml_encoding["device"] == {"type": "cpu", "index": 0}
assert yaml_encoding["name"] == "bn"
assert yaml_encoding["plates"] == {}
# Since there are separate unit-tests for encoding of nodes we
# will perform basic checks.
assert yaml_encoding["nodes"].keys() == {"a", "b", "c", "d", "e"}
assert yaml_encoding["nodes"]["a"]["parents"] == []
assert yaml_encoding["nodes"]["a"]["type"] == "CategoricalNodeWithDirichletPrior"
assert yaml_encoding["nodes"]["b"]["parents"] == ["a"]
assert yaml_encoding["nodes"]["b"]["type"] == "ContinuousNodeWithNormalDistribution"
assert yaml_encoding["nodes"]["c"]["parents"] == ["b"]
assert yaml_encoding["nodes"]["c"]["type"] == "Exponential"
assert yaml_encoding["nodes"]["d"]["parents"] == []
assert yaml_encoding["nodes"]["d"]["type"] == "ContinuousNodeWithNormalDistribution"
assert yaml_encoding["nodes"]["e"]["parents"] == ["c", "d"]
assert yaml_encoding["nodes"]["e"]["type"] == "Sum"
FIXTURE_DIR = os.path.dirname(os.path.realpath(__file__))
BN_YAML = "bn.yaml"
BN_MINIMAL_YAML = "bn_minimal.yaml"
@pytest.mark.parametrize("yaml_file", [(BN_YAML), (BN_MINIMAL_YAML)])
def test_from_yaml(yaml_file):
yaml_file_path = os.path.join(FIXTURE_DIR, BN_YAML)
bn = bayesnet.from_yaml(yaml_file_path)
assert isinstance(bn, bayesnet.BayesianNetwork)
assert bn.name == "bn"
assert common.same_device(bn.device, torch.device("cpu", 0))
assert set(bn.dag.nodes) == {"a", "b", "c", "d", "e"}
assert bn.plate_dict == dict()
assert set(bn.dag.predecessors("a")) == set()
assert isinstance(
bn.get_node_object("a"), categorical.CategoricalNodeWithDirichletPrior
)
assert set(bn.dag.predecessors("b")) == {"a"}
assert isinstance(
bn.get_node_object("b"), continuous.ContinuousNodeWithNormalDistribution
)
assert set(bn.dag.predecessors("c")) == {"b"}
assert isinstance(bn.get_node_object("c"), deterministic.Exponential)
assert set(bn.dag.predecessors("d")) == set()
assert isinstance(
bn.get_node_object("d"), continuous.ContinuousNodeWithNormalDistribution
)
assert set(bn.dag.predecessors("e")) == {"c", "d"}
assert isinstance(bn.get_node_object("e"), deterministic.Sum) | 0.708414 | 0.572783 |
import re
import subprocess
from ovs.log.logHandler import LogHandler
logger = LogHandler('extensions', name='exportfs')
class Nfsexports(object):
"""
Basic management for /etc/exports
"""
def __init__(self):
self._exportsFile = '/etc/exports'
self._cmd = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs']
self._restart = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs', '-ra']
self._rpcmountd_stop = ['/usr/bin/sudo', '-u', 'root', 'pkill', 'rpc.mountd']
self._rpcmountd_start = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/rpc.mountd', '--manage-gids']
def _slurp(self):
"""
Read from /etc/exports
"""
f = open(self._exportsFile, 'r')
dlist = []
for line in f:
if not re.match('^\s*$', line):
dlist.append(line)
f.close()
dlist = [i.strip() for i in dlist if not i.startswith('#')]
dlist = [re.split('\s+|\(|\)', i) for i in dlist]
keys = ['dir', 'network', 'params']
ldict = [dict(zip(keys, line)) for line in dlist]
return ldict
def add(self, directory, network, params):
"""
Add entry to /etc/exports
@param directory: directory to export
@param network: network range allowed
@param params: params for export (eg, 'ro,async,no_root_squash,no_subtree_check')
"""
l = self._slurp()
for i in l:
if i['dir'] == directory:
logger.info('Directory already exported, to export with different params please first remove')
return
f = open(self._exportsFile, 'a')
f.write('%s %s(%s)\n' % (directory, network, params))
f.close()
def remove(self, directory):
"""
Remove entry from /etc/exports
"""
l = self._slurp()
for i in l:
if i['dir'] == directory:
l.remove(i)
f = open(self._exportsFile, 'w')
for i in l:
f.write("%s %s(%s) \n" % (i['dir'], i['network'], i['params']))
f.close()
return
def list_exported(self):
"""
List the current exported filesystems
"""
exports = {}
output = subprocess.check_output(self._cmd)
for export in re.finditer('(\S+?)[\s\n]+(\S+)\n?', output):
exports[export.group(1)] = export.group(2)
return exports
def unexport(self, directory):
"""
Unexport a filesystem
"""
cmd = list(self._cmd)
exports = self.list_exported()
if not directory in exports.keys():
logger.info('Directory %s currently not exported' % directory)
return
logger.info('Unexporting {}:{}'.format(exports[directory] if exports[directory] != '<world>' else '*', directory))
cmd.extend(['-u', '{}:{}'.format(exports[directory] if exports[directory] != '<world>' else '*', directory)])
subprocess.call(cmd)
def export(self, directory, network='*'):
"""
Export a filesystem
"""
cmd = list(self._cmd)
exports = self.list_exported()
if directory in exports.keys():
logger.info('Directory already exported with options %s' % exports[directory])
return
logger.info('Exporting {}:{}'.format(network, directory))
cmd.extend(['-v', '{}:{}'.format(network, directory)])
subprocess.call(cmd)
subprocess.call(self._restart)
def trigger_rpc_mountd(self):
subprocess.call(self._rpcmountd_stop)
subprocess.call(self._rpcmountd_start) | ovs/extensions/fs/exportfs.py |
import re
import subprocess
from ovs.log.logHandler import LogHandler
logger = LogHandler('extensions', name='exportfs')
class Nfsexports(object):
"""
Basic management for /etc/exports
"""
def __init__(self):
self._exportsFile = '/etc/exports'
self._cmd = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs']
self._restart = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs', '-ra']
self._rpcmountd_stop = ['/usr/bin/sudo', '-u', 'root', 'pkill', 'rpc.mountd']
self._rpcmountd_start = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/rpc.mountd', '--manage-gids']
def _slurp(self):
"""
Read from /etc/exports
"""
f = open(self._exportsFile, 'r')
dlist = []
for line in f:
if not re.match('^\s*$', line):
dlist.append(line)
f.close()
dlist = [i.strip() for i in dlist if not i.startswith('#')]
dlist = [re.split('\s+|\(|\)', i) for i in dlist]
keys = ['dir', 'network', 'params']
ldict = [dict(zip(keys, line)) for line in dlist]
return ldict
def add(self, directory, network, params):
"""
Add entry to /etc/exports
@param directory: directory to export
@param network: network range allowed
@param params: params for export (eg, 'ro,async,no_root_squash,no_subtree_check')
"""
l = self._slurp()
for i in l:
if i['dir'] == directory:
logger.info('Directory already exported, to export with different params please first remove')
return
f = open(self._exportsFile, 'a')
f.write('%s %s(%s)\n' % (directory, network, params))
f.close()
def remove(self, directory):
"""
Remove entry from /etc/exports
"""
l = self._slurp()
for i in l:
if i['dir'] == directory:
l.remove(i)
f = open(self._exportsFile, 'w')
for i in l:
f.write("%s %s(%s) \n" % (i['dir'], i['network'], i['params']))
f.close()
return
def list_exported(self):
"""
List the current exported filesystems
"""
exports = {}
output = subprocess.check_output(self._cmd)
for export in re.finditer('(\S+?)[\s\n]+(\S+)\n?', output):
exports[export.group(1)] = export.group(2)
return exports
def unexport(self, directory):
"""
Unexport a filesystem
"""
cmd = list(self._cmd)
exports = self.list_exported()
if not directory in exports.keys():
logger.info('Directory %s currently not exported' % directory)
return
logger.info('Unexporting {}:{}'.format(exports[directory] if exports[directory] != '<world>' else '*', directory))
cmd.extend(['-u', '{}:{}'.format(exports[directory] if exports[directory] != '<world>' else '*', directory)])
subprocess.call(cmd)
def export(self, directory, network='*'):
"""
Export a filesystem
"""
cmd = list(self._cmd)
exports = self.list_exported()
if directory in exports.keys():
logger.info('Directory already exported with options %s' % exports[directory])
return
logger.info('Exporting {}:{}'.format(network, directory))
cmd.extend(['-v', '{}:{}'.format(network, directory)])
subprocess.call(cmd)
subprocess.call(self._restart)
def trigger_rpc_mountd(self):
subprocess.call(self._rpcmountd_stop)
subprocess.call(self._rpcmountd_start) | 0.299617 | 0.065366 |
import malaya
def test_pretrained_bayes_sentiment():
positive_text = 'Kerajaan negeri Kelantan mempersoalkan motif kenyataan Menteri Kewangan Lim Guan Eng yang hanya menyebut Kelantan penerima terbesar bantuan kewangan dari Kerajaan Persekutuan. Sedangkan menurut Timbalan Menteri Besarnya, Datuk <NAME> Nik Abdullah, negeri lain yang lebih maju dari Kelantan turut mendapat pembiayaan dan pinjaman.'
news_sentiment = malaya.pretrained_bayes_sentiment()
assert len(news_sentiment.predict(positive_text)) > 0
def test_pretrained_xgb_sentiment():
positive_text = 'Kerajaan negeri Kelantan mempersoalkan motif kenyataan Menteri Kewangan Lim Guan Eng yang hanya menyebut Kelantan penerima terbesar bantuan kewangan dari Kerajaan Persekutuan. Sedangkan menurut Timbalan Menteri Besarnya, Datuk <NAME>, negeri lain yang lebih maju dari Kelantan turut mendapat pembiayaan dan pinjaman.'
news_sentiment = malaya.pretrained_xgb_sentiment()
assert len(news_sentiment.predict(positive_text)) > 0
def test_pretrained_bayes_sentiment_batch():
positive_text = 'Kerajaan negeri Kelantan mempersoalkan motif kenyataan Menteri Kewangan Lim Guan Eng yang hanya menyebut Kelantan penerima terbesar bantuan kewangan dari Kerajaan Persekutuan. Sedangkan menurut Timbalan Menteri Besarnya, Datuk <NAME> Nik Abdullah, negeri lain yang lebih maju dari Kelantan turut mendapat pembiayaan dan pinjaman.'
news_sentiment = malaya.pretrained_bayes_sentiment()
assert len(news_sentiment.predict_batch([positive_text,positive_text])) > 0
def test_bahdanau_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('bahdanau')
assert len(news_sentiment.predict(negative_text)['attention']) > 1
def test_attention_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('hierarchical')
assert len(news_sentiment.predict(negative_text)['attention']) > 1
def test_luong_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('luong')
assert len(news_sentiment.predict(negative_text)['attention']) > 1
def test_normal_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('bidirectional')
assert news_sentiment.predict(negative_text)['negative'] > 0
def test_fasttext_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('fast-text')
assert news_sentiment.predict(negative_text)['negative'] > 0
def test_normal_sentiment_batch():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('bidirectional')
assert len(news_sentiment.predict_batch([negative_text,negative_text])) > 0
def test_fasttext_sentiment_batch():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('fast-text')
assert len(news_sentiment.predict_batch([negative_text,negative_text])) > 0
def test_bayes_sentiment():
import pandas as pd
df = pd.read_csv('tests/02032018.csv',sep=';')
df = df.iloc[3:,1:]
df.columns = ['text','label']
dataset = [[df.iloc[i,0],df.iloc[i,1]] for i in range(df.shape[0])]
bayes=malaya.bayes_sentiment(dataset)
assert len(bayes.predict(dataset[0][0])) > 0
def test_bayes_sentiment_bow_nosplit():
import pandas as pd
df = pd.read_csv('tests/02032018.csv',sep=';')
df = df.iloc[3:,1:]
df.columns = ['text','label']
dataset = [[df.iloc[i,0],df.iloc[i,1]] for i in range(df.shape[0])]
bayes=malaya.bayes_sentiment(dataset, vector = 'bow', split_size = None)
assert len(bayes.predict(dataset[0][0])) > 0
def test_bayes_sentiment_location():
bayes = malaya.bayes_sentiment('tests/local')
assert len(bayes.predict('saya suka kerajaan dan anwar ibrahim')) > 0 | tests/test_sentiment.py | import malaya
def test_pretrained_bayes_sentiment():
positive_text = 'Kerajaan negeri Kelantan mempersoalkan motif kenyataan Menteri Kewangan Lim Guan Eng yang hanya menyebut Kelantan penerima terbesar bantuan kewangan dari Kerajaan Persekutuan. Sedangkan menurut Timbalan Menteri Besarnya, Datuk <NAME> Nik Abdullah, negeri lain yang lebih maju dari Kelantan turut mendapat pembiayaan dan pinjaman.'
news_sentiment = malaya.pretrained_bayes_sentiment()
assert len(news_sentiment.predict(positive_text)) > 0
def test_pretrained_xgb_sentiment():
positive_text = 'Kerajaan negeri Kelantan mempersoalkan motif kenyataan Menteri Kewangan Lim Guan Eng yang hanya menyebut Kelantan penerima terbesar bantuan kewangan dari Kerajaan Persekutuan. Sedangkan menurut Timbalan Menteri Besarnya, Datuk <NAME>, negeri lain yang lebih maju dari Kelantan turut mendapat pembiayaan dan pinjaman.'
news_sentiment = malaya.pretrained_xgb_sentiment()
assert len(news_sentiment.predict(positive_text)) > 0
def test_pretrained_bayes_sentiment_batch():
positive_text = 'Kerajaan negeri Kelantan mempersoalkan motif kenyataan Menteri Kewangan Lim Guan Eng yang hanya menyebut Kelantan penerima terbesar bantuan kewangan dari Kerajaan Persekutuan. Sedangkan menurut Timbalan Menteri Besarnya, Datuk <NAME> Nik Abdullah, negeri lain yang lebih maju dari Kelantan turut mendapat pembiayaan dan pinjaman.'
news_sentiment = malaya.pretrained_bayes_sentiment()
assert len(news_sentiment.predict_batch([positive_text,positive_text])) > 0
def test_bahdanau_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('bahdanau')
assert len(news_sentiment.predict(negative_text)['attention']) > 1
def test_attention_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('hierarchical')
assert len(news_sentiment.predict(negative_text)['attention']) > 1
def test_luong_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('luong')
assert len(news_sentiment.predict(negative_text)['attention']) > 1
def test_normal_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('bidirectional')
assert news_sentiment.predict(negative_text)['negative'] > 0
def test_fasttext_sentiment():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('fast-text')
assert news_sentiment.predict(negative_text)['negative'] > 0
def test_normal_sentiment_batch():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('bidirectional')
assert len(news_sentiment.predict_batch([negative_text,negative_text])) > 0
def test_fasttext_sentiment_batch():
malaya.get_available_sentiment_models()
negative_text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
news_sentiment = malaya.deep_sentiment('fast-text')
assert len(news_sentiment.predict_batch([negative_text,negative_text])) > 0
def test_bayes_sentiment():
import pandas as pd
df = pd.read_csv('tests/02032018.csv',sep=';')
df = df.iloc[3:,1:]
df.columns = ['text','label']
dataset = [[df.iloc[i,0],df.iloc[i,1]] for i in range(df.shape[0])]
bayes=malaya.bayes_sentiment(dataset)
assert len(bayes.predict(dataset[0][0])) > 0
def test_bayes_sentiment_bow_nosplit():
import pandas as pd
df = pd.read_csv('tests/02032018.csv',sep=';')
df = df.iloc[3:,1:]
df.columns = ['text','label']
dataset = [[df.iloc[i,0],df.iloc[i,1]] for i in range(df.shape[0])]
bayes=malaya.bayes_sentiment(dataset, vector = 'bow', split_size = None)
assert len(bayes.predict(dataset[0][0])) > 0
def test_bayes_sentiment_location():
bayes = malaya.bayes_sentiment('tests/local')
assert len(bayes.predict('saya suka kerajaan dan anwar ibrahim')) > 0 | 0.439747 | 0.433652 |
from .model_properties import String, Integer
from managers import *
class DjangorientBaseNode(type):
"""
Metaclass for Nodes & Edges
"""
def __new__(cls, name, bases, attrs):
#super(DjangorientBaseModel, cls).__new__(name, bases, attrs)
super_new = super(DjangorientBaseNode, cls).__new__
# DjangorientNode/Edge don't require any additional attributes, so return them with no additions
if name in ['DjangorientNode', 'DjangorientEdge']:
return super_new(cls, name, bases, attrs)
new_cls = super_new(cls, name, bases, attrs)
setattr(new_cls, 'objects', DjangorientNodeManager(new_cls))
return new_cls
class DjangorientNode(object):
__metaclass__ = DjangorientBaseNode
def __init__(self):
#super(DjangorientNode, self).__init__()
self._class_name = self.__class__.__name__
@classmethod
def _get_superclass(cls):
return 'V'
class DjangorientBaseEdge(type):
"""
Metaclass for Nodes & Edges
"""
def __new__(cls, name, bases, attrs):
#super(DjangorientBaseModel, cls).__new__(name, bases, attrs)
super_new = super(DjangorientBaseEdge, cls).__new__
# DjangorientNode/Edge don't require any additional attributes, so return them with no additions
if name in ['DjangorientNode', 'DjangorientEdge']:
return super_new(cls, name, bases, attrs)
new_cls = super_new(cls, name, bases, attrs)
setattr(new_cls, 'objects', DjangorientEdgeManager(new_cls))
return new_cls
class DjangorientEdge(object):
__metaclass__ = DjangorientBaseEdge
def __init__(self):
#super(DjangorientNode, self).__init__()
self._class_name = self.__class__.__name__
@classmethod
def _get_superclass(cls):
return 'E'
class DjangorientBuilder(object):
"""
A utility for building schemas in the database, based on the user
defined models in models.py
"""
def __init__(self):
self.user_classes = dict()
self.build_classes_dict()
self.write_classes()
def build_classes_dict(self):
subclasses = DjangorientNode.__subclasses__()
subclasses += DjangorientEdge.__subclasses__()
for cls in subclasses:
class_name = cls.__name__
self.user_classes[class_name] = dict()
self.user_classes[class_name]['superClass'] = cls._get_superclass()
for attr in dir(cls):
obj_property = getattr(cls, attr)
# Test if the property is of a recognized type
if filter(lambda x: x is type(obj_property), all_types):
self.user_classes[class_name][attr] = obj_property.get_orientdb_type()
def write_classes(self):
"""
Write the user-defined classes to the DB.
"""
for class_name, class_properties in self.user_classes.iteritems():
client.create_class(class_name, class_properties) | django_orientdb/models.py | from .model_properties import String, Integer
from managers import *
class DjangorientBaseNode(type):
"""
Metaclass for Nodes & Edges
"""
def __new__(cls, name, bases, attrs):
#super(DjangorientBaseModel, cls).__new__(name, bases, attrs)
super_new = super(DjangorientBaseNode, cls).__new__
# DjangorientNode/Edge don't require any additional attributes, so return them with no additions
if name in ['DjangorientNode', 'DjangorientEdge']:
return super_new(cls, name, bases, attrs)
new_cls = super_new(cls, name, bases, attrs)
setattr(new_cls, 'objects', DjangorientNodeManager(new_cls))
return new_cls
class DjangorientNode(object):
__metaclass__ = DjangorientBaseNode
def __init__(self):
#super(DjangorientNode, self).__init__()
self._class_name = self.__class__.__name__
@classmethod
def _get_superclass(cls):
return 'V'
class DjangorientBaseEdge(type):
"""
Metaclass for Nodes & Edges
"""
def __new__(cls, name, bases, attrs):
#super(DjangorientBaseModel, cls).__new__(name, bases, attrs)
super_new = super(DjangorientBaseEdge, cls).__new__
# DjangorientNode/Edge don't require any additional attributes, so return them with no additions
if name in ['DjangorientNode', 'DjangorientEdge']:
return super_new(cls, name, bases, attrs)
new_cls = super_new(cls, name, bases, attrs)
setattr(new_cls, 'objects', DjangorientEdgeManager(new_cls))
return new_cls
class DjangorientEdge(object):
__metaclass__ = DjangorientBaseEdge
def __init__(self):
#super(DjangorientNode, self).__init__()
self._class_name = self.__class__.__name__
@classmethod
def _get_superclass(cls):
return 'E'
class DjangorientBuilder(object):
"""
A utility for building schemas in the database, based on the user
defined models in models.py
"""
def __init__(self):
self.user_classes = dict()
self.build_classes_dict()
self.write_classes()
def build_classes_dict(self):
subclasses = DjangorientNode.__subclasses__()
subclasses += DjangorientEdge.__subclasses__()
for cls in subclasses:
class_name = cls.__name__
self.user_classes[class_name] = dict()
self.user_classes[class_name]['superClass'] = cls._get_superclass()
for attr in dir(cls):
obj_property = getattr(cls, attr)
# Test if the property is of a recognized type
if filter(lambda x: x is type(obj_property), all_types):
self.user_classes[class_name][attr] = obj_property.get_orientdb_type()
def write_classes(self):
"""
Write the user-defined classes to the DB.
"""
for class_name, class_properties in self.user_classes.iteritems():
client.create_class(class_name, class_properties) | 0.467332 | 0.091261 |
import numpy as np
class Flow(object):
def __new__(cls, *args, **kwargs):
"""
Creates a new Flow object.
:param argument 1: Timestepper object
:param argument 2: Vectorfield object
:param kwargs: Additional parameters accepted by the solver.
:return: Flow object.
+------------------------+-----------------+-----------------+
| Valid kwargs | Default Value | Valid Values |
+========================+=================+=================+
| small | 0.5 | > 0 |
+------------------------+-----------------+-----------------+
| large | 2.0 | > 0 |
+------------------------+-----------------+-----------------+
| pessimist | 0.5 | > 0 |
+------------------------+-----------------+-----------------+
| accept | 1.2 | > 0 |
+------------------------+-----------------+-----------------+
| tol | 1e-6 | > 0 |
+------------------------+-----------------+-----------------+
| dt_max | 1.0 | > 0 |
+------------------------+-----------------+-----------------+
| newexact | 1 | > 0 |
+------------------------+-----------------+-----------------+
| numstep | 8 | > 0 |
+------------------------+-----------------+-----------------+
| tstart | 0.0 | float |
+------------------------+-----------------+-----------------+
| tend | 1.0 | float |
+------------------------+-----------------+-----------------+
| stepsize | 2.0 | > 0 |
+------------------------+-----------------+-----------------+
| localss | 10 | > 0 |
+------------------------+-----------------+-----------------+
| globalss | 10 | > 0 |
+------------------------+-----------------+-----------------+
| disp | False | bool |
+------------------------+-----------------+-----------------+
| variablestep | False | bool |
+------------------------+-----------------+-----------------+
"""
obj = super(Flow, cls).__new__(cls)
obj.small = kwargs.get('small', 0.5)
obj.large = kwargs.get('large', 2.0)
obj.pessimist = kwargs.get('pessimist', 0.9)
obj.accept = kwargs.get('accept', 1.2)
obj.tol = kwargs.get('tol', 1e-6)
obj.dt_max = kwargs.get('dt_max', 1.0)
obj.newexact = kwargs.get('newexact', 1)
obj.numstep = kwargs.get('numstep', 8)
obj.tstart = kwargs.get('tstart', 0.0)
obj.tend = kwargs.get('tend', 1.0)
obj.stepsize = kwargs.get('stepsize', 2.0)
obj.localss = kwargs.get('localss', 10)
obj.globalss = kwargs.get('globalss', 10)
obj.disp = kwargs.get('disp', False)
obj.variablestep = kwargs.get('variablestep', False)
if len(args) > 0:
obj.timestepper = args[0]
else:
obj.timestepper = None
if len(args) > 1:
obj.vectorfield = args[1]
else:
obj.timestepper = None
return obj
def newstepsize(self, dt_old, errest):
method = self.timestepper.method
dt = self.pessimist * (self.tol / errest)**(1/(method.RKord+1))*dt_old
dt = min((dt, self.large*dt_old))
dt = max((dt, self.small*dt_old))
dt_new = min(dt, self.dt_max)
if (self.accept*self.tol - errest) > 0:
accepted = True
else:
accepted = False
return dt_new, accepted
def __call__(self, y, t0, tf, dt):
"""
:param y: states
:param t0: initial time
:param tf: final time
:param dt: time step
:return:
"""
self.timestepper.variablestep = self.variablestep
yi = [y]
ti = np.zeros((1,))
if t0 + dt > tf:
dt = tf - t0
accepted = False
rejected = 0
errest = 0
n = 0
converged = False
while not converged:
if ti[-1] + dt >= tf:
dt = tf - ti[-1]
converged = True
while not accepted:
ylow, yhigh, errest = self.timestepper(self.vectorfield, yi[-1], ti[-1], dt)
if self.variablestep is True:
if errest == -1:
errest = np.linalg.norm(ylow.data - yhigh.data)
[dt_new, accepted] = self.newstepsize(dt, errest)
else:
accepted = True
yhigh = ylow
dt_new = dt
if not accepted:
dt = dt_new
rejected += 1
ti = np.hstack((ti, ti[-1] + dt))
yi.append(yhigh)
accepted = False
rejected = 0
dt = dt_new
return ti, yi | liepack/flow/flow.py | import numpy as np
class Flow(object):
def __new__(cls, *args, **kwargs):
"""
Creates a new Flow object.
:param argument 1: Timestepper object
:param argument 2: Vectorfield object
:param kwargs: Additional parameters accepted by the solver.
:return: Flow object.
+------------------------+-----------------+-----------------+
| Valid kwargs | Default Value | Valid Values |
+========================+=================+=================+
| small | 0.5 | > 0 |
+------------------------+-----------------+-----------------+
| large | 2.0 | > 0 |
+------------------------+-----------------+-----------------+
| pessimist | 0.5 | > 0 |
+------------------------+-----------------+-----------------+
| accept | 1.2 | > 0 |
+------------------------+-----------------+-----------------+
| tol | 1e-6 | > 0 |
+------------------------+-----------------+-----------------+
| dt_max | 1.0 | > 0 |
+------------------------+-----------------+-----------------+
| newexact | 1 | > 0 |
+------------------------+-----------------+-----------------+
| numstep | 8 | > 0 |
+------------------------+-----------------+-----------------+
| tstart | 0.0 | float |
+------------------------+-----------------+-----------------+
| tend | 1.0 | float |
+------------------------+-----------------+-----------------+
| stepsize | 2.0 | > 0 |
+------------------------+-----------------+-----------------+
| localss | 10 | > 0 |
+------------------------+-----------------+-----------------+
| globalss | 10 | > 0 |
+------------------------+-----------------+-----------------+
| disp | False | bool |
+------------------------+-----------------+-----------------+
| variablestep | False | bool |
+------------------------+-----------------+-----------------+
"""
obj = super(Flow, cls).__new__(cls)
obj.small = kwargs.get('small', 0.5)
obj.large = kwargs.get('large', 2.0)
obj.pessimist = kwargs.get('pessimist', 0.9)
obj.accept = kwargs.get('accept', 1.2)
obj.tol = kwargs.get('tol', 1e-6)
obj.dt_max = kwargs.get('dt_max', 1.0)
obj.newexact = kwargs.get('newexact', 1)
obj.numstep = kwargs.get('numstep', 8)
obj.tstart = kwargs.get('tstart', 0.0)
obj.tend = kwargs.get('tend', 1.0)
obj.stepsize = kwargs.get('stepsize', 2.0)
obj.localss = kwargs.get('localss', 10)
obj.globalss = kwargs.get('globalss', 10)
obj.disp = kwargs.get('disp', False)
obj.variablestep = kwargs.get('variablestep', False)
if len(args) > 0:
obj.timestepper = args[0]
else:
obj.timestepper = None
if len(args) > 1:
obj.vectorfield = args[1]
else:
obj.timestepper = None
return obj
def newstepsize(self, dt_old, errest):
method = self.timestepper.method
dt = self.pessimist * (self.tol / errest)**(1/(method.RKord+1))*dt_old
dt = min((dt, self.large*dt_old))
dt = max((dt, self.small*dt_old))
dt_new = min(dt, self.dt_max)
if (self.accept*self.tol - errest) > 0:
accepted = True
else:
accepted = False
return dt_new, accepted
def __call__(self, y, t0, tf, dt):
"""
:param y: states
:param t0: initial time
:param tf: final time
:param dt: time step
:return:
"""
self.timestepper.variablestep = self.variablestep
yi = [y]
ti = np.zeros((1,))
if t0 + dt > tf:
dt = tf - t0
accepted = False
rejected = 0
errest = 0
n = 0
converged = False
while not converged:
if ti[-1] + dt >= tf:
dt = tf - ti[-1]
converged = True
while not accepted:
ylow, yhigh, errest = self.timestepper(self.vectorfield, yi[-1], ti[-1], dt)
if self.variablestep is True:
if errest == -1:
errest = np.linalg.norm(ylow.data - yhigh.data)
[dt_new, accepted] = self.newstepsize(dt, errest)
else:
accepted = True
yhigh = ylow
dt_new = dt
if not accepted:
dt = dt_new
rejected += 1
ti = np.hstack((ti, ti[-1] + dt))
yi.append(yhigh)
accepted = False
rejected = 0
dt = dt_new
return ti, yi | 0.709824 | 0.727776 |
from datetime import datetime
import functools
import os
import time
import sys
from schedule import run_all
# to import local fuctions
sys.path.insert(0, '../tools')
import schedule
from extract.earnings import extract_earnings
from extract.fundamentals import update_fundamental_dates, extract_fundamentals
from extract.price_extract import main_prices
from run_all import main
from communicate.discord import send_message_to_discord
from extract.fundamentals import main_fundamentals
from extract.market_cap import main_market_cap
from indicators.indicators import main_indicators
from signals.create_signals import main_signals
from indicators.benchmarks import benchmark_prices
# This decorator can be applied to
def with_logging(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
print('LOG: Running job "%s"' % func.__name__)
result = func(*args, **kwargs)
print('LOG: Job "%s" completed' % func.__name__)
return result
return wrapper
# Check if file date modified equals to today's date
def check_download(path):
time = os.path.getmtime(path)
file_date = datetime.fromtimestamp(time).date()
if file_date == datetime.today().date():
return True
else:
return False
# Check if it's a weekday
def check_weekday():
# Define variables
hour = datetime.today().hour
weekday = datetime.today().weekday()
# If between 6AM and 2PM and it's a weekday return True
if weekday < 5:
return True
# For the weekend
elif (weekday > 4) & (datetime.today().hour < 9):
return True
@with_logging
def run_prices():
if check_weekday() == True:
main(full_refresh=True)
send_message_to_discord('buy_signal?7')
@with_logging
def earnings():
if check_download("/Users/renovieira/Desktop/swing_trading/docs/earnings.csv") == False:
extract_earnings()
@with_logging
def fundamentals():
print('Extracting Fundamentals')
extract_fundamentals()
print('Update Fundamental Dates')
update_fundamental_dates()
# Update Prices every three hours on weekdays
schedule.every().day.at("06:30").do(run_prices)
schedule.every().day.at("09:30").do(run_prices)
schedule.every().day.at("12:00").do(run_prices)
schedule.every().day.at("13:30").do(run_prices)
# Run Earnings and Fundamentals
schedule.every().thursday.at('21:00').do(earnings)
schedule.every().friday.at('23:00').do(fundamentals)
while 1:
schedule.run_pending()
time.sleep(1) | scheduler/scheduler.py | from datetime import datetime
import functools
import os
import time
import sys
from schedule import run_all
# to import local fuctions
sys.path.insert(0, '../tools')
import schedule
from extract.earnings import extract_earnings
from extract.fundamentals import update_fundamental_dates, extract_fundamentals
from extract.price_extract import main_prices
from run_all import main
from communicate.discord import send_message_to_discord
from extract.fundamentals import main_fundamentals
from extract.market_cap import main_market_cap
from indicators.indicators import main_indicators
from signals.create_signals import main_signals
from indicators.benchmarks import benchmark_prices
# This decorator can be applied to
def with_logging(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
print('LOG: Running job "%s"' % func.__name__)
result = func(*args, **kwargs)
print('LOG: Job "%s" completed' % func.__name__)
return result
return wrapper
# Check if file date modified equals to today's date
def check_download(path):
time = os.path.getmtime(path)
file_date = datetime.fromtimestamp(time).date()
if file_date == datetime.today().date():
return True
else:
return False
# Check if it's a weekday
def check_weekday():
# Define variables
hour = datetime.today().hour
weekday = datetime.today().weekday()
# If between 6AM and 2PM and it's a weekday return True
if weekday < 5:
return True
# For the weekend
elif (weekday > 4) & (datetime.today().hour < 9):
return True
@with_logging
def run_prices():
if check_weekday() == True:
main(full_refresh=True)
send_message_to_discord('buy_signal?7')
@with_logging
def earnings():
if check_download("/Users/renovieira/Desktop/swing_trading/docs/earnings.csv") == False:
extract_earnings()
@with_logging
def fundamentals():
print('Extracting Fundamentals')
extract_fundamentals()
print('Update Fundamental Dates')
update_fundamental_dates()
# Update Prices every three hours on weekdays
schedule.every().day.at("06:30").do(run_prices)
schedule.every().day.at("09:30").do(run_prices)
schedule.every().day.at("12:00").do(run_prices)
schedule.every().day.at("13:30").do(run_prices)
# Run Earnings and Fundamentals
schedule.every().thursday.at('21:00').do(earnings)
schedule.every().friday.at('23:00').do(fundamentals)
while 1:
schedule.run_pending()
time.sleep(1) | 0.362292 | 0.153486 |
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# world characteristics
world.dim_c = 2
num_agents = 3
world.num_agents = num_agents
num_landmarks = num_agents + 1
# adding agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.silent = True
agent.size = 0.05
# adding landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.07
# Initial Conditions
self.reset_world(world)
return world
def reset_world(self, world):
# Landmarks characteristics
for landmark in world.landmarks:
landmark.color = np.array([0.15, 0.15, 0.15])
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
goal = world.landmarks[0]
goal.color = np.array([0.15, 0.65, 0.15])
goal.state.p_pos = [-0.8, -0.8]
# Leader characteristics
world.agents[0].color = np.array([0.85, 0.35, 0.35])
world.agents[0].adversary = True
world.agents[0].goal_a = goal
# Followers
for i in range(1, world.num_agents):
world.agents[i].color = np.array([0.35, 0.35, 0.85])
world.agents[i].adversary = False
# Random intial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(0.1, 0.9, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
def benchmark_data(self, agent, world):
# returning data for benchmark purposes
if agent.adversary:
return np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos))
else:
dists = []
for l in world.landmarks:
dists.append(np.sum(np.square(agent.state.p_pos - l.state.p_pos)))
dists.append(np.sum(np.square(agent.state.p_pos - world.agents[0].state.p_pos)))
return tuple(dists)
def reward(self, agent, world):
reward = self.outside(agent, world) + self.collosion(agent, world)
if agent.adversary:
reward -= np.sqrt(np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos)))
else:
reward -= np.sqrt(np.sum(np.square(agent.state.p_pos - world.agents[0].state.p_pos)))
return reward
def collosion(self, agent, world):
col_rew = 0
for ag in world.agents:
if not ag.name == agent.name:
if np.sqrt(np.sum(np.square(agent.state.p_pos - ag.state.p_pos))) < 2* agent.size:
col_rew -= 15
for i in range(1, len(world.landmarks)):
if np.sqrt(np.sum(np.square(agent.state.p_pos - world.landmarks[i].state.p_pos))) < 2* agent.size:
col_rew -= 15
return col_rew
def outside(self, agent, world):
out_rew = 0
if np.sum(np.absolute(agent.state.p_pos)) > 2:
out_rew -= 20
return out_rew
def observation(self, agent, world):
# position of the landmarks w.r.t the agent
landmark_pos = []
for landmark in world.landmarks:
landmark_pos.append(landmark.state.p_pos - agent.state.p_pos)
# position of the other agents w.r.t this agent
other_pos = []
for other in world.agents:
if other is agent: continue
other_pos.append(other.state.p_pos - agent.state.p_pos)
if not agent.adversary:
return np.concatenate([agent.state.p_pos - world.agents[0].state.p_pos] + landmark_pos + other_pos)
else:
return np.concatenate([agent.goal_a.state.p_pos - agent.state.p_pos] + landmark_pos) | multiagent/scenarios/formation.py | import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# world characteristics
world.dim_c = 2
num_agents = 3
world.num_agents = num_agents
num_landmarks = num_agents + 1
# adding agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.silent = True
agent.size = 0.05
# adding landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.07
# Initial Conditions
self.reset_world(world)
return world
def reset_world(self, world):
# Landmarks characteristics
for landmark in world.landmarks:
landmark.color = np.array([0.15, 0.15, 0.15])
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
goal = world.landmarks[0]
goal.color = np.array([0.15, 0.65, 0.15])
goal.state.p_pos = [-0.8, -0.8]
# Leader characteristics
world.agents[0].color = np.array([0.85, 0.35, 0.35])
world.agents[0].adversary = True
world.agents[0].goal_a = goal
# Followers
for i in range(1, world.num_agents):
world.agents[i].color = np.array([0.35, 0.35, 0.85])
world.agents[i].adversary = False
# Random intial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(0.1, 0.9, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
def benchmark_data(self, agent, world):
# returning data for benchmark purposes
if agent.adversary:
return np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos))
else:
dists = []
for l in world.landmarks:
dists.append(np.sum(np.square(agent.state.p_pos - l.state.p_pos)))
dists.append(np.sum(np.square(agent.state.p_pos - world.agents[0].state.p_pos)))
return tuple(dists)
def reward(self, agent, world):
reward = self.outside(agent, world) + self.collosion(agent, world)
if agent.adversary:
reward -= np.sqrt(np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos)))
else:
reward -= np.sqrt(np.sum(np.square(agent.state.p_pos - world.agents[0].state.p_pos)))
return reward
def collosion(self, agent, world):
col_rew = 0
for ag in world.agents:
if not ag.name == agent.name:
if np.sqrt(np.sum(np.square(agent.state.p_pos - ag.state.p_pos))) < 2* agent.size:
col_rew -= 15
for i in range(1, len(world.landmarks)):
if np.sqrt(np.sum(np.square(agent.state.p_pos - world.landmarks[i].state.p_pos))) < 2* agent.size:
col_rew -= 15
return col_rew
def outside(self, agent, world):
out_rew = 0
if np.sum(np.absolute(agent.state.p_pos)) > 2:
out_rew -= 20
return out_rew
def observation(self, agent, world):
# position of the landmarks w.r.t the agent
landmark_pos = []
for landmark in world.landmarks:
landmark_pos.append(landmark.state.p_pos - agent.state.p_pos)
# position of the other agents w.r.t this agent
other_pos = []
for other in world.agents:
if other is agent: continue
other_pos.append(other.state.p_pos - agent.state.p_pos)
if not agent.adversary:
return np.concatenate([agent.state.p_pos - world.agents[0].state.p_pos] + landmark_pos + other_pos)
else:
return np.concatenate([agent.goal_a.state.p_pos - agent.state.p_pos] + landmark_pos) | 0.458349 | 0.464537 |
import sys
import os
import torch
from allennlp.data.iterators import BucketIterator
from allennlp.data.iterators import BasicIterator
from allennlp.modules.text_field_embedders import TextFieldEmbedder
import torch.optim as optim
from acsa.acsc_pytorch.my_allennlp_trainer import Trainer
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.token_indexers import SingleIdTokenIndexer
from acsa.acsc_pytorch import acsc_models
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.token_embedders import embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from acsa.acsc_pytorch import allennlp_callback
from acsa.model_train_template.model_train_template import ModelTrainTemplate
from acsa.acsc_pytorch import acsc_dataset_reader
class TextAspectInSentimentOutTrainTemplate(ModelTrainTemplate):
def __init__(self, configuration):
super().__init__(configuration)
self.data_reader = None
self.train_data = None
self.dev_data = None
self.test_data = None
self.distinct_categories = None
self.distinct_polarities = None
self._load_data()
self.vocab = None
self._build_vocab()
self.iterator = None
self.val_iterator = None
self._build_iterator()
def _load_data(self):
data_filepath = self.base_data_dir + 'data'
if os.path.exists(data_filepath):
self.train_data, self.dev_data, self.test_data, self.distinct_categories, self.distinct_polarities, \
= super()._load_object(data_filepath)
else:
train_dev_test_data, distinct_categories, distinct_polarities = self.dataset. \
generate_acd_and_sc_data()
distinct_polarities_new = []
for polarity in distinct_polarities:
if polarity != 'conflict':
distinct_polarities_new.append(polarity)
self.distinct_categories = distinct_categories
self.distinct_polarities = distinct_polarities_new
token_indexer = SingleIdTokenIndexer(namespace="tokens",
token_min_padding_length=self.configuration['token_min_padding_length'])
aspect_indexer = SingleIdTokenIndexer(namespace='aspect')
reader = acsc_dataset_reader.TextAspectInSentimentOut(
self.distinct_categories, self.distinct_polarities,
tokenizer=self._get_word_segmenter(),
token_indexers={"tokens": token_indexer},
aspect_indexers={'aspect': aspect_indexer},
configuration=self.configuration
)
self.data_reader = reader
train_dev_test_data_label_indexed = {}
for data_type, data in train_dev_test_data.items():
if data is None:
continue
data_new = []
for sample in data:
sample_new = [sample[0]]
labels_new = []
for label in sample[1]:
aspect = label[0]
polarity = label[1]
aspect_index = distinct_categories.index(aspect)
if polarity == 'conflict':
polarity_index = -100
else:
polarity_index = distinct_polarities_new.index(polarity)
labels_new.append((aspect_index, polarity_index))
if len(labels_new) != 0:
sample_new.append(labels_new)
data_new.append(sample_new)
train_dev_test_data_label_indexed[data_type] = data_new
self.train_data = reader.read(train_dev_test_data_label_indexed['train'])
self.dev_data = reader.read(train_dev_test_data_label_indexed['dev'])
self.test_data = reader.read(train_dev_test_data_label_indexed['test'])
data = [self.train_data, self.dev_data, self.test_data, self.distinct_categories,
self.distinct_polarities]
super()._save_object(data_filepath, data)
def _build_vocab(self):
if self.configuration['train']:
vocab_file_path = self.base_data_dir + 'vocab'
if os.path.exists(vocab_file_path):
self.vocab = super()._load_object(vocab_file_path)
else:
data = self.train_data + self.dev_data + self.test_data
self.vocab = Vocabulary.from_instances(data, max_vocab_size=sys.maxsize)
super()._save_object(vocab_file_path, self.vocab)
self.model_meta_data['vocab'] = self.vocab
else:
self.vocab = self.model_meta_data['vocab']
def _build_iterator(self):
self.iterator = BucketIterator(batch_size=self.configuration['batch_size'],
sorting_keys=[("tokens", "num_tokens")],
)
self.iterator.index_with(self.vocab)
self.val_iterator = BasicIterator(batch_size=self.configuration['batch_size'])
self.val_iterator.index_with(self.vocab)
def _print_args(self, model):
n_trainable_params, n_nontrainable_params = 0, 0
for p in model.parameters():
n_params = torch.prod(torch.tensor(p.shape)).item()
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
self.logger.info('n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
self.logger.info('> training arguments:')
for arg in self.configuration.keys():
self.logger.info('>>> {0}: {1}'.format(arg, self.configuration[arg]))
def _find_model_function_pure(self):
raise NotImplementedError('_find_model_function_pure')
def _get_aspect_embeddings_dim(self):
return 300
def _init_aspect_embeddings_from_word_embeddings(self):
return False
def _find_model_function(self):
embedding_dim = self.configuration['embed_size']
embedding_matrix_filepath = self.base_data_dir + 'embedding_matrix'
if os.path.exists(embedding_matrix_filepath):
embedding_matrix = super()._load_object(embedding_matrix_filepath)
else:
embedding_filepath = self.configuration['embedding_filepath']
embedding_matrix = embedding._read_embeddings_from_text_file(embedding_filepath, embedding_dim,
self.vocab, namespace='tokens')
super()._save_object(embedding_matrix_filepath, embedding_matrix)
token_embedding = Embedding(num_embeddings=self.vocab.get_vocab_size(namespace='tokens'),
embedding_dim=embedding_dim, padding_index=0, vocab_namespace='tokens',
trainable=False, weight=embedding_matrix)
# the embedder maps the input tokens to the appropriate embedding matrix
word_embedder: TextFieldEmbedder = BasicTextFieldEmbedder({"tokens": token_embedding})
aspect_embedding_matrix = None
if self._init_aspect_embeddings_from_word_embeddings():
embedding_filepath = self.configuration['embedding_filepath']
aspect_embedding_matrix = embedding._read_embeddings_from_text_file(embedding_filepath, embedding_dim,
self.vocab, namespace='aspect')
aspect_embedding = Embedding(num_embeddings=self.vocab.get_vocab_size(namespace='aspect'),
embedding_dim=self._get_aspect_embeddings_dim(), padding_index=0,
trainable=True, weight=aspect_embedding_matrix)
aspect_embedder: TextFieldEmbedder = BasicTextFieldEmbedder({"aspect": aspect_embedding},
# we'll be ignoring masks so we'll need to set this to True
allow_unmatched_keys=True)
model_function = self._find_model_function_pure()
model = model_function(
word_embedder,
aspect_embedder,
self.distinct_categories,
self.distinct_polarities,
self.vocab,
self.configuration
)
self._print_args(model)
model = model.to(self.configuration['device'])
return model
def _get_estimator(self, model):
USE_GPU = torch.cuda.is_available()
if USE_GPU:
gpu_id = self.configuration['gpu_id']
else:
gpu_id = -1
estimator = acsc_models.TextAspectInSentimentOutEstimator(model, self.val_iterator,
self.distinct_categories,
self.distinct_polarities,
cuda_device=gpu_id)
return estimator
def _get_estimate_callback(self, model):
result = []
data_type_and_data = {
'train': self.train_data,
'dev': self.dev_data,
'test': self.test_data
}
estimator = self._get_estimator(model)
estimate_callback = allennlp_callback.EstimateCallback(data_type_and_data, estimator, self.logger)
result.append(estimate_callback)
return result
def _inner_train(self):
USE_GPU = torch.cuda.is_available()
if USE_GPU:
gpu_id = self.configuration['gpu_id']
else:
gpu_id = -1
self.model = self._find_model_function()
# optimizer = adagrad.Adagrad(self.model.parameters(), lr=0.01, weight_decay=0.001)
optimizer = optim.Adam(self.model.parameters(), lr=0.001, weight_decay=0.00001)
callbacks = self._get_estimate_callback(self.model)
early_stopping_by_batch: bool = False
estimator = self._get_estimator(self.model)
trainer = Trainer(
model=self.model,
optimizer=optimizer,
iterator=self.iterator,
train_dataset=self.train_data,
validation_dataset=self.dev_data,
cuda_device=gpu_id,
num_epochs=self.configuration['epochs'],
validation_metric='+accuracy',
validation_iterator=self.val_iterator,
serialization_dir=self.model_dir,
patience=self.configuration['patience'],
callbacks =callbacks,
early_stopping_by_batch=early_stopping_by_batch,
estimator=estimator
)
metrics = trainer.train()
self.logger.info('metrics: %s' % str(metrics))
def _save_model(self):
torch.save(self.model, self.best_model_filepath)
def _load_model(self):
self.model = torch.load(self.best_model_filepath)
self.model.configuration = self.configuration
def evaluate(self):
estimator = self._get_estimator(self.model)
data_type_and_data = {
'train': self.train_data,
'dev': self.dev_data,
'test': self.test_data
}
for data_type, data in data_type_and_data.items():
result = estimator.estimate(data)
self.logger.info('data_type: %s result: %s' % (data_type, result))
class Heat(TextAspectInSentimentOutTrainTemplate):
"""
2017-CIKM-Aspect-level Sentiment Classification with HEAT (HiErarchical ATtention) Network
"""
def __init__(self, configuration):
super().__init__(configuration)
def _get_aspect_embeddings_dim(self):
return 32
def _find_model_function_pure(self):
return acsc_models.Heat
class AtaeLstm(TextAspectInSentimentOutTrainTemplate):
"""
2016-emnlp-Attention-based LSTM for Aspect-level Sentiment Classification
"""
def __init__(self, configuration):
super().__init__(configuration)
def _find_model_function_pure(self):
return acsc_models.AtaeLstm | acsa/acsc_pytorch/acsc_templates.py | import sys
import os
import torch
from allennlp.data.iterators import BucketIterator
from allennlp.data.iterators import BasicIterator
from allennlp.modules.text_field_embedders import TextFieldEmbedder
import torch.optim as optim
from acsa.acsc_pytorch.my_allennlp_trainer import Trainer
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.token_indexers import SingleIdTokenIndexer
from acsa.acsc_pytorch import acsc_models
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.token_embedders import embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from acsa.acsc_pytorch import allennlp_callback
from acsa.model_train_template.model_train_template import ModelTrainTemplate
from acsa.acsc_pytorch import acsc_dataset_reader
class TextAspectInSentimentOutTrainTemplate(ModelTrainTemplate):
def __init__(self, configuration):
super().__init__(configuration)
self.data_reader = None
self.train_data = None
self.dev_data = None
self.test_data = None
self.distinct_categories = None
self.distinct_polarities = None
self._load_data()
self.vocab = None
self._build_vocab()
self.iterator = None
self.val_iterator = None
self._build_iterator()
def _load_data(self):
data_filepath = self.base_data_dir + 'data'
if os.path.exists(data_filepath):
self.train_data, self.dev_data, self.test_data, self.distinct_categories, self.distinct_polarities, \
= super()._load_object(data_filepath)
else:
train_dev_test_data, distinct_categories, distinct_polarities = self.dataset. \
generate_acd_and_sc_data()
distinct_polarities_new = []
for polarity in distinct_polarities:
if polarity != 'conflict':
distinct_polarities_new.append(polarity)
self.distinct_categories = distinct_categories
self.distinct_polarities = distinct_polarities_new
token_indexer = SingleIdTokenIndexer(namespace="tokens",
token_min_padding_length=self.configuration['token_min_padding_length'])
aspect_indexer = SingleIdTokenIndexer(namespace='aspect')
reader = acsc_dataset_reader.TextAspectInSentimentOut(
self.distinct_categories, self.distinct_polarities,
tokenizer=self._get_word_segmenter(),
token_indexers={"tokens": token_indexer},
aspect_indexers={'aspect': aspect_indexer},
configuration=self.configuration
)
self.data_reader = reader
train_dev_test_data_label_indexed = {}
for data_type, data in train_dev_test_data.items():
if data is None:
continue
data_new = []
for sample in data:
sample_new = [sample[0]]
labels_new = []
for label in sample[1]:
aspect = label[0]
polarity = label[1]
aspect_index = distinct_categories.index(aspect)
if polarity == 'conflict':
polarity_index = -100
else:
polarity_index = distinct_polarities_new.index(polarity)
labels_new.append((aspect_index, polarity_index))
if len(labels_new) != 0:
sample_new.append(labels_new)
data_new.append(sample_new)
train_dev_test_data_label_indexed[data_type] = data_new
self.train_data = reader.read(train_dev_test_data_label_indexed['train'])
self.dev_data = reader.read(train_dev_test_data_label_indexed['dev'])
self.test_data = reader.read(train_dev_test_data_label_indexed['test'])
data = [self.train_data, self.dev_data, self.test_data, self.distinct_categories,
self.distinct_polarities]
super()._save_object(data_filepath, data)
def _build_vocab(self):
if self.configuration['train']:
vocab_file_path = self.base_data_dir + 'vocab'
if os.path.exists(vocab_file_path):
self.vocab = super()._load_object(vocab_file_path)
else:
data = self.train_data + self.dev_data + self.test_data
self.vocab = Vocabulary.from_instances(data, max_vocab_size=sys.maxsize)
super()._save_object(vocab_file_path, self.vocab)
self.model_meta_data['vocab'] = self.vocab
else:
self.vocab = self.model_meta_data['vocab']
def _build_iterator(self):
self.iterator = BucketIterator(batch_size=self.configuration['batch_size'],
sorting_keys=[("tokens", "num_tokens")],
)
self.iterator.index_with(self.vocab)
self.val_iterator = BasicIterator(batch_size=self.configuration['batch_size'])
self.val_iterator.index_with(self.vocab)
def _print_args(self, model):
n_trainable_params, n_nontrainable_params = 0, 0
for p in model.parameters():
n_params = torch.prod(torch.tensor(p.shape)).item()
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
self.logger.info('n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
self.logger.info('> training arguments:')
for arg in self.configuration.keys():
self.logger.info('>>> {0}: {1}'.format(arg, self.configuration[arg]))
def _find_model_function_pure(self):
raise NotImplementedError('_find_model_function_pure')
def _get_aspect_embeddings_dim(self):
return 300
def _init_aspect_embeddings_from_word_embeddings(self):
return False
def _find_model_function(self):
embedding_dim = self.configuration['embed_size']
embedding_matrix_filepath = self.base_data_dir + 'embedding_matrix'
if os.path.exists(embedding_matrix_filepath):
embedding_matrix = super()._load_object(embedding_matrix_filepath)
else:
embedding_filepath = self.configuration['embedding_filepath']
embedding_matrix = embedding._read_embeddings_from_text_file(embedding_filepath, embedding_dim,
self.vocab, namespace='tokens')
super()._save_object(embedding_matrix_filepath, embedding_matrix)
token_embedding = Embedding(num_embeddings=self.vocab.get_vocab_size(namespace='tokens'),
embedding_dim=embedding_dim, padding_index=0, vocab_namespace='tokens',
trainable=False, weight=embedding_matrix)
# the embedder maps the input tokens to the appropriate embedding matrix
word_embedder: TextFieldEmbedder = BasicTextFieldEmbedder({"tokens": token_embedding})
aspect_embedding_matrix = None
if self._init_aspect_embeddings_from_word_embeddings():
embedding_filepath = self.configuration['embedding_filepath']
aspect_embedding_matrix = embedding._read_embeddings_from_text_file(embedding_filepath, embedding_dim,
self.vocab, namespace='aspect')
aspect_embedding = Embedding(num_embeddings=self.vocab.get_vocab_size(namespace='aspect'),
embedding_dim=self._get_aspect_embeddings_dim(), padding_index=0,
trainable=True, weight=aspect_embedding_matrix)
aspect_embedder: TextFieldEmbedder = BasicTextFieldEmbedder({"aspect": aspect_embedding},
# we'll be ignoring masks so we'll need to set this to True
allow_unmatched_keys=True)
model_function = self._find_model_function_pure()
model = model_function(
word_embedder,
aspect_embedder,
self.distinct_categories,
self.distinct_polarities,
self.vocab,
self.configuration
)
self._print_args(model)
model = model.to(self.configuration['device'])
return model
def _get_estimator(self, model):
USE_GPU = torch.cuda.is_available()
if USE_GPU:
gpu_id = self.configuration['gpu_id']
else:
gpu_id = -1
estimator = acsc_models.TextAspectInSentimentOutEstimator(model, self.val_iterator,
self.distinct_categories,
self.distinct_polarities,
cuda_device=gpu_id)
return estimator
def _get_estimate_callback(self, model):
result = []
data_type_and_data = {
'train': self.train_data,
'dev': self.dev_data,
'test': self.test_data
}
estimator = self._get_estimator(model)
estimate_callback = allennlp_callback.EstimateCallback(data_type_and_data, estimator, self.logger)
result.append(estimate_callback)
return result
def _inner_train(self):
USE_GPU = torch.cuda.is_available()
if USE_GPU:
gpu_id = self.configuration['gpu_id']
else:
gpu_id = -1
self.model = self._find_model_function()
# optimizer = adagrad.Adagrad(self.model.parameters(), lr=0.01, weight_decay=0.001)
optimizer = optim.Adam(self.model.parameters(), lr=0.001, weight_decay=0.00001)
callbacks = self._get_estimate_callback(self.model)
early_stopping_by_batch: bool = False
estimator = self._get_estimator(self.model)
trainer = Trainer(
model=self.model,
optimizer=optimizer,
iterator=self.iterator,
train_dataset=self.train_data,
validation_dataset=self.dev_data,
cuda_device=gpu_id,
num_epochs=self.configuration['epochs'],
validation_metric='+accuracy',
validation_iterator=self.val_iterator,
serialization_dir=self.model_dir,
patience=self.configuration['patience'],
callbacks =callbacks,
early_stopping_by_batch=early_stopping_by_batch,
estimator=estimator
)
metrics = trainer.train()
self.logger.info('metrics: %s' % str(metrics))
def _save_model(self):
torch.save(self.model, self.best_model_filepath)
def _load_model(self):
self.model = torch.load(self.best_model_filepath)
self.model.configuration = self.configuration
def evaluate(self):
estimator = self._get_estimator(self.model)
data_type_and_data = {
'train': self.train_data,
'dev': self.dev_data,
'test': self.test_data
}
for data_type, data in data_type_and_data.items():
result = estimator.estimate(data)
self.logger.info('data_type: %s result: %s' % (data_type, result))
class Heat(TextAspectInSentimentOutTrainTemplate):
"""
2017-CIKM-Aspect-level Sentiment Classification with HEAT (HiErarchical ATtention) Network
"""
def __init__(self, configuration):
super().__init__(configuration)
def _get_aspect_embeddings_dim(self):
return 32
def _find_model_function_pure(self):
return acsc_models.Heat
class AtaeLstm(TextAspectInSentimentOutTrainTemplate):
"""
2016-emnlp-Attention-based LSTM for Aspect-level Sentiment Classification
"""
def __init__(self, configuration):
super().__init__(configuration)
def _find_model_function_pure(self):
return acsc_models.AtaeLstm | 0.466359 | 0.169406 |
from Muse_v2 import *
import struct
import json
import google.protobuf.internal.containers
import threading
import time
import Queue
class MuseProtoBufReaderV2(object):
def __init__(self, verbose):
self.events = []
self.__objects = []
self.__config_id = 0
self.thread_lock = threading.Lock()
self.__verbose = verbose
self.__timestamp = 0
self.added_to_events = 0
self.events_queue = Queue.Queue()
def parse(self, in_stream):
while True:
# (1) Read the message header
header_bin = in_stream.read(4)
# check for EOF
if len(header_bin) == 0:
self.add_done()
break
header = struct.unpack("<i", header_bin)
msg_length = header[0]
msg_type = in_stream.read(2)
msg_type = struct.unpack("<h", msg_type)
msg_type = msg_type[0]
if msg_type != 2:
print 'Corrupted file, type mismatch. Parsed: ' + str(msg_type) + ' expected 2'
self.add_done()
break
# (2) Read and parse the message
msg_bin = in_stream.read(msg_length)
if len(msg_bin) != msg_length:
print 'Corrupted file, length mismatch. Reporting length: ' + str(len(msg_bin)) + ' expected: ' + str(msg_length)
self.add_done()
break
muse_data_collection = MuseDataCollection()
muse_data_collection.ParseFromString(msg_bin)
# (3) Process this chunk of data
self.__objects.extend(muse_data_collection.collection)
for obj in self.__objects:
self.handle_data(obj)
self.__objects = []
def add_done(self):
self.add_to_events_queue([self.__timestamp + 0.001, 'done'])
# dispatch based on data type
def handle_data(self, md):
# Version 2 response
# Configuration data
self.__config_id = md.config_id
if md.datatype == MuseData.CONFIG:
data_obj = md.Extensions[MuseConfig.museData]
self.handle_config(md.timestamp, data_obj)
# Version
if md.datatype == MuseData.VERSION:
data_obj = md.Extensions[MuseVersion.museData]
self.handle_version(md.timestamp, data_obj)
# EEG samples
if md.datatype == MuseData.EEG:
data_obj = md.Extensions[EEG.museData]
# Check if this is a DRL/REF message
if data_obj.HasField("drl"):
self.handle_drlref(md.timestamp, data_obj)
else:
self.handle_eeg(md.timestamp, data_obj)
# Quantization data
if md.datatype == MuseData.QUANT:
data_obj = md.Extensions[MuseQuantization.museData]
self.handle_quantization(md.timestamp, data_obj)
# Accelerometer
if md.datatype == MuseData.ACCEL:
data_obj = md.Extensions[Accelerometer.museData]
self.handle_acc(md.timestamp, data_obj)
# Battery
if md.datatype == MuseData.BATTERY:
data_obj = md.Extensions[Battery.museData]
self.handle_batt(md.timestamp, data_obj)
# Annotations
if md.datatype == MuseData.ANNOTATION:
data_obj = md.Extensions[Annotation.museData]
self.handle_annotation(md.timestamp, data_obj)
# DSP
if md.datatype == MuseData.DSP:
data_obj = md.Extensions[DSP.museData]
self.handle_dsp(md.timestamp, data_obj)
# ComputingDevice
if md.datatype == MuseData.COMPUTING_DEVICE:
data_obj = md.Extensions[ComputingDevice.museData]
self.handle_computing_device(md.timestamp, data_obj)
# EEG Dropped
if md.datatype == MuseData.EEG_DROPPED:
data_obj = md.Extensions[EEG_DroppedSamples.museData]
self.handle_dropped_eeg(md.timestamp, data_obj)
# Acc Dropped
if md.datatype == MuseData.ACC_DROPPED:
data_obj = md.Extensions[ACC_DroppedSamples.museData]
self.handle_dropped_acc(md.timestamp, data_obj)
def handle_json_dictionary_from_proto(self, data_obj):
m={}
for a in dir(data_obj):
upperFlag = False
if a.startswith('_'):
continue
for x in a:
if x.isupper():
upperFlag = True
break
if upperFlag:
continue
value = getattr(data_obj,a)
if isinstance(value, google.protobuf.internal.containers.RepeatedScalarFieldContainer):
temp = []
temp.extend(value)
value = temp
m[a] = value
return json.dumps(m)
def handle_config(self, timestamp, data_obj):
json_dict = self.handle_json_dictionary_from_proto(data_obj)
self.add_to_events_queue([timestamp, "/muse/config", "s", [str(json_dict)], self.__config_id])
def handle_version(self, timestamp, data_obj):
json_dict = self.handle_json_dictionary_from_proto(data_obj)
self.add_to_events_queue([timestamp, "/muse/version", "s", [str(json_dict)], self.__config_id])
def handle_eeg(self, timestamp, data_obj):
# Check if this is a 6 channel EEG message
data_count = len(data_obj.values)
osc_type = 'f'*data_count
self.add_to_events_queue([timestamp, "/muse/eeg", osc_type, data_obj.values, self.__config_id])
def handle_drlref(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/drlref", "ff",
[data_obj.drl, data_obj.ref], self.__config_id])
def handle_quantization(self, timestamp, data_obj):
data_count = len(data_obj.values)
osc_type = 'i'*data_count
self.add_to_events_queue([timestamp, "/muse/eeg/quantization", osc_type,
data_obj.values, self.__config_id])
def handle_acc(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/acc", "fff",
[data_obj.acc1, data_obj.acc2, data_obj.acc3],
self.__config_id])
def handle_batt(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/batt", "iiii",
[data_obj.percent_remaining,
data_obj.battery_fuel_gauge_millivolts,
data_obj.battery_adc_millivolts,
data_obj.temperature_celsius], self.__config_id])
def handle_annotation(self, timestamp, data_obj):
if data_obj.event_data_format == Annotation.OSC:
temp = data_obj.event_data.split(" ")
path = temp[0]
osc_types = temp[1]
string_data = temp[2:2+len(osc_types)]
data = []
i = 0
for osc_type in osc_types:
if 'f' in osc_type:
data.append(float(string_data[i]))
elif 'i' in osc_type:
data.append(int(string_data[i]))
elif 'd' in osc_type:
data.append(float(string_data[i]))
elif 's' in osc_type:
data.append(str(string_data[i]))
i += 1
self.add_to_events_queue([timestamp, path, osc_types, data, self.__config_id])
else:
event_format = ""
if data_obj.event_data_format == Annotation.PLAIN_STRING:
event_format = "Plain String"
elif data_obj.event_data_format == Annotation.JSON:
event_format = "JSON"
self.add_to_events_queue([timestamp, "/muse/annotation", "sssss", [data_obj.event_data, event_format, data_obj.event_type, data_obj.event_id, data_obj.parent_id], self.__config_id])
def handle_dsp(self, timestamp, data_obj):
data_count = len(data_obj.float_array)
osc_type = 'f'*data_count
self.add_to_events_queue([timestamp, "/muse/dsp/" + data_obj.type, osc_type, data_obj.float_array, self.__config_id])
def handle_computing_device(self, timestamp, data_obj):
json_dict = self.handle_json_dictionary_from_proto(data_obj)
self.add_to_events_queue([timestamp, "/muse/device", "s", [str(json_dict)], self.__config_id])
def handle_dropped_eeg(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/eeg/dropped", "i", [data_obj.num], self.__config_id])
def handle_dropped_acc(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/acc/dropped", "i", [data_obj.num], self.__config_id])
def add_to_events_queue(self, event):
self.__timestamp = event[0]
self.events_queue.put(event)
self.added_to_events += 1
while self.events_queue.qsize() >= 30000:
time.sleep(0) | src/proto_reader_v2.py | from Muse_v2 import *
import struct
import json
import google.protobuf.internal.containers
import threading
import time
import Queue
class MuseProtoBufReaderV2(object):
def __init__(self, verbose):
self.events = []
self.__objects = []
self.__config_id = 0
self.thread_lock = threading.Lock()
self.__verbose = verbose
self.__timestamp = 0
self.added_to_events = 0
self.events_queue = Queue.Queue()
def parse(self, in_stream):
while True:
# (1) Read the message header
header_bin = in_stream.read(4)
# check for EOF
if len(header_bin) == 0:
self.add_done()
break
header = struct.unpack("<i", header_bin)
msg_length = header[0]
msg_type = in_stream.read(2)
msg_type = struct.unpack("<h", msg_type)
msg_type = msg_type[0]
if msg_type != 2:
print 'Corrupted file, type mismatch. Parsed: ' + str(msg_type) + ' expected 2'
self.add_done()
break
# (2) Read and parse the message
msg_bin = in_stream.read(msg_length)
if len(msg_bin) != msg_length:
print 'Corrupted file, length mismatch. Reporting length: ' + str(len(msg_bin)) + ' expected: ' + str(msg_length)
self.add_done()
break
muse_data_collection = MuseDataCollection()
muse_data_collection.ParseFromString(msg_bin)
# (3) Process this chunk of data
self.__objects.extend(muse_data_collection.collection)
for obj in self.__objects:
self.handle_data(obj)
self.__objects = []
def add_done(self):
self.add_to_events_queue([self.__timestamp + 0.001, 'done'])
# dispatch based on data type
def handle_data(self, md):
# Version 2 response
# Configuration data
self.__config_id = md.config_id
if md.datatype == MuseData.CONFIG:
data_obj = md.Extensions[MuseConfig.museData]
self.handle_config(md.timestamp, data_obj)
# Version
if md.datatype == MuseData.VERSION:
data_obj = md.Extensions[MuseVersion.museData]
self.handle_version(md.timestamp, data_obj)
# EEG samples
if md.datatype == MuseData.EEG:
data_obj = md.Extensions[EEG.museData]
# Check if this is a DRL/REF message
if data_obj.HasField("drl"):
self.handle_drlref(md.timestamp, data_obj)
else:
self.handle_eeg(md.timestamp, data_obj)
# Quantization data
if md.datatype == MuseData.QUANT:
data_obj = md.Extensions[MuseQuantization.museData]
self.handle_quantization(md.timestamp, data_obj)
# Accelerometer
if md.datatype == MuseData.ACCEL:
data_obj = md.Extensions[Accelerometer.museData]
self.handle_acc(md.timestamp, data_obj)
# Battery
if md.datatype == MuseData.BATTERY:
data_obj = md.Extensions[Battery.museData]
self.handle_batt(md.timestamp, data_obj)
# Annotations
if md.datatype == MuseData.ANNOTATION:
data_obj = md.Extensions[Annotation.museData]
self.handle_annotation(md.timestamp, data_obj)
# DSP
if md.datatype == MuseData.DSP:
data_obj = md.Extensions[DSP.museData]
self.handle_dsp(md.timestamp, data_obj)
# ComputingDevice
if md.datatype == MuseData.COMPUTING_DEVICE:
data_obj = md.Extensions[ComputingDevice.museData]
self.handle_computing_device(md.timestamp, data_obj)
# EEG Dropped
if md.datatype == MuseData.EEG_DROPPED:
data_obj = md.Extensions[EEG_DroppedSamples.museData]
self.handle_dropped_eeg(md.timestamp, data_obj)
# Acc Dropped
if md.datatype == MuseData.ACC_DROPPED:
data_obj = md.Extensions[ACC_DroppedSamples.museData]
self.handle_dropped_acc(md.timestamp, data_obj)
def handle_json_dictionary_from_proto(self, data_obj):
m={}
for a in dir(data_obj):
upperFlag = False
if a.startswith('_'):
continue
for x in a:
if x.isupper():
upperFlag = True
break
if upperFlag:
continue
value = getattr(data_obj,a)
if isinstance(value, google.protobuf.internal.containers.RepeatedScalarFieldContainer):
temp = []
temp.extend(value)
value = temp
m[a] = value
return json.dumps(m)
def handle_config(self, timestamp, data_obj):
json_dict = self.handle_json_dictionary_from_proto(data_obj)
self.add_to_events_queue([timestamp, "/muse/config", "s", [str(json_dict)], self.__config_id])
def handle_version(self, timestamp, data_obj):
json_dict = self.handle_json_dictionary_from_proto(data_obj)
self.add_to_events_queue([timestamp, "/muse/version", "s", [str(json_dict)], self.__config_id])
def handle_eeg(self, timestamp, data_obj):
# Check if this is a 6 channel EEG message
data_count = len(data_obj.values)
osc_type = 'f'*data_count
self.add_to_events_queue([timestamp, "/muse/eeg", osc_type, data_obj.values, self.__config_id])
def handle_drlref(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/drlref", "ff",
[data_obj.drl, data_obj.ref], self.__config_id])
def handle_quantization(self, timestamp, data_obj):
data_count = len(data_obj.values)
osc_type = 'i'*data_count
self.add_to_events_queue([timestamp, "/muse/eeg/quantization", osc_type,
data_obj.values, self.__config_id])
def handle_acc(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/acc", "fff",
[data_obj.acc1, data_obj.acc2, data_obj.acc3],
self.__config_id])
def handle_batt(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/batt", "iiii",
[data_obj.percent_remaining,
data_obj.battery_fuel_gauge_millivolts,
data_obj.battery_adc_millivolts,
data_obj.temperature_celsius], self.__config_id])
def handle_annotation(self, timestamp, data_obj):
if data_obj.event_data_format == Annotation.OSC:
temp = data_obj.event_data.split(" ")
path = temp[0]
osc_types = temp[1]
string_data = temp[2:2+len(osc_types)]
data = []
i = 0
for osc_type in osc_types:
if 'f' in osc_type:
data.append(float(string_data[i]))
elif 'i' in osc_type:
data.append(int(string_data[i]))
elif 'd' in osc_type:
data.append(float(string_data[i]))
elif 's' in osc_type:
data.append(str(string_data[i]))
i += 1
self.add_to_events_queue([timestamp, path, osc_types, data, self.__config_id])
else:
event_format = ""
if data_obj.event_data_format == Annotation.PLAIN_STRING:
event_format = "Plain String"
elif data_obj.event_data_format == Annotation.JSON:
event_format = "JSON"
self.add_to_events_queue([timestamp, "/muse/annotation", "sssss", [data_obj.event_data, event_format, data_obj.event_type, data_obj.event_id, data_obj.parent_id], self.__config_id])
def handle_dsp(self, timestamp, data_obj):
data_count = len(data_obj.float_array)
osc_type = 'f'*data_count
self.add_to_events_queue([timestamp, "/muse/dsp/" + data_obj.type, osc_type, data_obj.float_array, self.__config_id])
def handle_computing_device(self, timestamp, data_obj):
json_dict = self.handle_json_dictionary_from_proto(data_obj)
self.add_to_events_queue([timestamp, "/muse/device", "s", [str(json_dict)], self.__config_id])
def handle_dropped_eeg(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/eeg/dropped", "i", [data_obj.num], self.__config_id])
def handle_dropped_acc(self, timestamp, data_obj):
self.add_to_events_queue([timestamp, "/muse/acc/dropped", "i", [data_obj.num], self.__config_id])
def add_to_events_queue(self, event):
self.__timestamp = event[0]
self.events_queue.put(event)
self.added_to_events += 1
while self.events_queue.qsize() >= 30000:
time.sleep(0) | 0.407098 | 0.178812 |
from typing import List, Any, Union, Dict, Tuple
from numpy import ndarray
from sentence_transformers import SentenceTransformer, util
import torch
import pickle
from torch import Tensor
from models.models_tools import filter_data
from sklearn.cluster import AgglomerativeClustering
import sklearn.cluster
class ContextualEmbeddingModel:
def __init__(self, file_path: str = "", model: SentenceTransformer = None):
self.device = None
self.file_path: str = file_path
self.configuration: List[str] = []
self.model: SentenceTransformer = model
self.transformed_data_ids: List[str] = []
self.transformed_data: List[str] = []
self.embedding: Union[List[Tensor], ndarray, Tensor] = []
def set_file_path(self, file_path: str):
"""
:param file_path: Set the current datasets file used for the embedding !reset the others parameters!
"""
self.file_path = file_path
def set_model(self, model: str):
"""
:param model: Set the current model used for the embedding !reset the others parameters!
"""
self.model = model
def compute_embedding(
self,
tags_filters=None,
convert_to_tensor: bool = True,
convert_to_numpy: bool = False,
show_progress_bar: bool = True,
device: str = "cpu",
random_data: int = None,
) -> Any:
"""
:param device: Which torch.device to use for the computation (from SentenceTransformer)
:param show_progress_bar: Output a progress bar when encode sentences (from SentenceTransformer)
:param convert_to_numpy: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy (from SentenceTransformer)
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy (from SentenceTransformer)
:param tags_filters: List of tags to include in the tf_idf representation default to
["dataset_name", "keywords", "description"]
:param random_data: Number of random data picked from the transformed datas
"""
self.device = device
self.configuration = tags_filters
filtered_data = filter_data(self.file_path, tags_filters, random_data)
self.transformed_data = list(filtered_data.values())
self.transformed_data_ids = list(filtered_data.keys())
self.embedding = self.model.encode(
self.transformed_data,
show_progress_bar=show_progress_bar,
convert_to_tensor=convert_to_tensor,
convert_to_numpy=convert_to_numpy,
device=device,
)
def save_embedding(self, name: str):
"""
Save the embedding as a .pkl
:param name:
:return:
"""
with open(f"{name}.pkl", "wb") as fo:
pickle.dump(
{
"transformed_data": self.transformed_data,
"transformed_data_ids": self.transformed_data_ids,
"embeddings": self.embedding,
},
fo,
)
def load_embedding(self, name: str):
"""
Load a .pkl embedding
:param name:
:return:
"""
with open(f"{name}.pkl", "rb") as fi:
pickle_file = pickle.load(fi)
self.transformed_data = pickle_file["transformed_data"]
self.transformed_data_ids = pickle_file["transformed_data_ids"]
self.embedding = pickle_file["embeddings"]
def kmean_clustering(
self,
clustering_model: sklearn.cluster = AgglomerativeClustering(n_clusters=10),
) -> Dict:
"""
Compute cluster given a skelarn clustering model
:param clustering_model: A cluster model from sklearn.cluster
:return: A dict of clustered datas from the actual embedding
"""
normalized_embedding = self.embedding / torch.linalg.norm(
self.embedding, keepdims=True
)
normalized_embedding = normalized_embedding.cpu().numpy()
clustering_model.fit(normalized_embedding)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[self.transformed_data_ids[sentence_id]] = cluster_id
return clustered_sentences
def cosine_similarity(self, query: str, top_k: int = 5) -> Tuple[Any, Any]:
"""
Compute the cosine similarity for a given query and return the @top_k higher similarities
:param query: Query
:param top_k: Number of document to return
:return: Lists of scores and ids of the @top_k higher similarities
"""
top_k = min(top_k, len(self.transformed_data))
query_embedding = self.model.encode(
query, convert_to_tensor=True, device=self.device
)
cos_scores = util.cos_sim(query_embedding, self.embedding)[0]
top_results = torch.topk(cos_scores, k=top_k + 1)
scores = []
idxs = []
for score, idx in zip(top_results[0], top_results[1]):
scores.append(score.item())
idxs.append(idx.item())
return scores, idxs | models/contextuals/sbert.py | from typing import List, Any, Union, Dict, Tuple
from numpy import ndarray
from sentence_transformers import SentenceTransformer, util
import torch
import pickle
from torch import Tensor
from models.models_tools import filter_data
from sklearn.cluster import AgglomerativeClustering
import sklearn.cluster
class ContextualEmbeddingModel:
def __init__(self, file_path: str = "", model: SentenceTransformer = None):
self.device = None
self.file_path: str = file_path
self.configuration: List[str] = []
self.model: SentenceTransformer = model
self.transformed_data_ids: List[str] = []
self.transformed_data: List[str] = []
self.embedding: Union[List[Tensor], ndarray, Tensor] = []
def set_file_path(self, file_path: str):
"""
:param file_path: Set the current datasets file used for the embedding !reset the others parameters!
"""
self.file_path = file_path
def set_model(self, model: str):
"""
:param model: Set the current model used for the embedding !reset the others parameters!
"""
self.model = model
def compute_embedding(
self,
tags_filters=None,
convert_to_tensor: bool = True,
convert_to_numpy: bool = False,
show_progress_bar: bool = True,
device: str = "cpu",
random_data: int = None,
) -> Any:
"""
:param device: Which torch.device to use for the computation (from SentenceTransformer)
:param show_progress_bar: Output a progress bar when encode sentences (from SentenceTransformer)
:param convert_to_numpy: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy (from SentenceTransformer)
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy (from SentenceTransformer)
:param tags_filters: List of tags to include in the tf_idf representation default to
["dataset_name", "keywords", "description"]
:param random_data: Number of random data picked from the transformed datas
"""
self.device = device
self.configuration = tags_filters
filtered_data = filter_data(self.file_path, tags_filters, random_data)
self.transformed_data = list(filtered_data.values())
self.transformed_data_ids = list(filtered_data.keys())
self.embedding = self.model.encode(
self.transformed_data,
show_progress_bar=show_progress_bar,
convert_to_tensor=convert_to_tensor,
convert_to_numpy=convert_to_numpy,
device=device,
)
def save_embedding(self, name: str):
"""
Save the embedding as a .pkl
:param name:
:return:
"""
with open(f"{name}.pkl", "wb") as fo:
pickle.dump(
{
"transformed_data": self.transformed_data,
"transformed_data_ids": self.transformed_data_ids,
"embeddings": self.embedding,
},
fo,
)
def load_embedding(self, name: str):
"""
Load a .pkl embedding
:param name:
:return:
"""
with open(f"{name}.pkl", "rb") as fi:
pickle_file = pickle.load(fi)
self.transformed_data = pickle_file["transformed_data"]
self.transformed_data_ids = pickle_file["transformed_data_ids"]
self.embedding = pickle_file["embeddings"]
def kmean_clustering(
self,
clustering_model: sklearn.cluster = AgglomerativeClustering(n_clusters=10),
) -> Dict:
"""
Compute cluster given a skelarn clustering model
:param clustering_model: A cluster model from sklearn.cluster
:return: A dict of clustered datas from the actual embedding
"""
normalized_embedding = self.embedding / torch.linalg.norm(
self.embedding, keepdims=True
)
normalized_embedding = normalized_embedding.cpu().numpy()
clustering_model.fit(normalized_embedding)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[self.transformed_data_ids[sentence_id]] = cluster_id
return clustered_sentences
def cosine_similarity(self, query: str, top_k: int = 5) -> Tuple[Any, Any]:
"""
Compute the cosine similarity for a given query and return the @top_k higher similarities
:param query: Query
:param top_k: Number of document to return
:return: Lists of scores and ids of the @top_k higher similarities
"""
top_k = min(top_k, len(self.transformed_data))
query_embedding = self.model.encode(
query, convert_to_tensor=True, device=self.device
)
cos_scores = util.cos_sim(query_embedding, self.embedding)[0]
top_results = torch.topk(cos_scores, k=top_k + 1)
scores = []
idxs = []
for score, idx in zip(top_results[0], top_results[1]):
scores.append(score.item())
idxs.append(idx.item())
return scores, idxs | 0.949047 | 0.485356 |
from __future__ import print_function, unicode_literals
from telegrambot.api.base import APIObject
class ReplyKeyboardMarkup(APIObject):
"""
This object represents a custom keyboard with reply options.
keyboard Array of Array of String Array of button rows, each represented by an Array of Strings
resize_keyboard Boolean (Optional) Requests clients to resize the keyboard vertically
for optimal fit (e.g., make the keyboard smaller if there are
just two rows of buttons). Defaults to false, in which case the
custom keyboard is always of the same height as the app's
standard keyboard.
one_time_keyboard Boolean (Optional) Requests clients to hide the keyboard as soon as it's
been used. Defaults to false.
selective Boolean (Optional) Use this parameter if you want to show the keyboard
to specific users only. Targets:
1) users that are @mentioned in the text of the
Message object;
2) if the bot's message is a reply (has reply_to_message_id)
then sender of the original message.
"""
_api_attrs = (
('keyboard', [[str]], TypeError),
('resize_keyboard', bool, False),
('one_time_keyboard', bool, False),
('selective', bool, False),
)
class ReplyKeyboardHide(APIObject):
"""
Upon receiving a message with this object, Telegram clients will hide the current custom keyboard and display
the default letter-keyboard. By default, custom keyboards are displayed until a new keyboard is sent by a bot.
An exception is made for one-time keyboards that are hidden immediately after the user presses a button.
hide_keyboard True Requests clients to hide the custom keyboard
selective Boolean (Optional) Use this parameter if you want to hide keyboard for specific users only.
Targets:
1) users that are @mentioned in the text of the Message object;
2) if the bot's message is a reply (has reply_to_message_id),
sender of the original message.
"""
_api_attrs = (
('hide_keyboard', bool, True),
('selective', bool, False),
)
class ForceReply(APIObject):
"""
Upon receiving a message with this object, Telegram clients will display a reply interface to the user
(act as if the user has selected the bot's message and tapped 'Reply').
force_reply True Shows reply interface to the user, as if they manually selected the bot's message
and tapped 'Reply'
selective Boolean (Optional) Use this parameter if you want to force reply from specific users only.
Targets:
1) users that are @mentioned in the text of the Message object;
2) if the bot's message is a reply (has reply_to_message_id),
sender of the original message.
"""
_api_attrs = (
('force_reply', bool, True),
('selective', bool, False),
) | telegrambot/api/keyboards.py | from __future__ import print_function, unicode_literals
from telegrambot.api.base import APIObject
class ReplyKeyboardMarkup(APIObject):
"""
This object represents a custom keyboard with reply options.
keyboard Array of Array of String Array of button rows, each represented by an Array of Strings
resize_keyboard Boolean (Optional) Requests clients to resize the keyboard vertically
for optimal fit (e.g., make the keyboard smaller if there are
just two rows of buttons). Defaults to false, in which case the
custom keyboard is always of the same height as the app's
standard keyboard.
one_time_keyboard Boolean (Optional) Requests clients to hide the keyboard as soon as it's
been used. Defaults to false.
selective Boolean (Optional) Use this parameter if you want to show the keyboard
to specific users only. Targets:
1) users that are @mentioned in the text of the
Message object;
2) if the bot's message is a reply (has reply_to_message_id)
then sender of the original message.
"""
_api_attrs = (
('keyboard', [[str]], TypeError),
('resize_keyboard', bool, False),
('one_time_keyboard', bool, False),
('selective', bool, False),
)
class ReplyKeyboardHide(APIObject):
"""
Upon receiving a message with this object, Telegram clients will hide the current custom keyboard and display
the default letter-keyboard. By default, custom keyboards are displayed until a new keyboard is sent by a bot.
An exception is made for one-time keyboards that are hidden immediately after the user presses a button.
hide_keyboard True Requests clients to hide the custom keyboard
selective Boolean (Optional) Use this parameter if you want to hide keyboard for specific users only.
Targets:
1) users that are @mentioned in the text of the Message object;
2) if the bot's message is a reply (has reply_to_message_id),
sender of the original message.
"""
_api_attrs = (
('hide_keyboard', bool, True),
('selective', bool, False),
)
class ForceReply(APIObject):
"""
Upon receiving a message with this object, Telegram clients will display a reply interface to the user
(act as if the user has selected the bot's message and tapped 'Reply').
force_reply True Shows reply interface to the user, as if they manually selected the bot's message
and tapped 'Reply'
selective Boolean (Optional) Use this parameter if you want to force reply from specific users only.
Targets:
1) users that are @mentioned in the text of the Message object;
2) if the bot's message is a reply (has reply_to_message_id),
sender of the original message.
"""
_api_attrs = (
('force_reply', bool, True),
('selective', bool, False),
) | 0.696578 | 0.284554 |
from copy import copy
from unittest.mock import MagicMock
import pytest
import typing as t
from collections.abc import Callable
from tests.abc import BaseTestCase
from uzi import InjectorLookupError
from uzi.graph.nodes import MissingNode as Dependency, SimpleNode
from uzi.injectors import Injector
Dependency = Dependency
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
_T = t.TypeVar("_T")
_T_Dep = t.TypeVar("_T_Dep", bound=Dependency, covariant=True)
_T_NewDep = Callable[..., _T_Dep]
class LookupErrorDependencyTests(BaseTestCase[Dependency]):
@pytest.fixture
def abstract(self):
return _T
@pytest.fixture
def concrete(self, value_setter):
return MagicMock(value_setter, wraps=value_setter)
def test_basic(self, new: _T_NewDep, cls: type[_T_Dep]):
subject = new()
assert isinstance(subject, Dependency)
assert subject.__class__ is cls
assert cls.__slots__ is cls.__dict__["__slots__"]
assert not subject
return subject
def test_copy(self, new: _T_NewDep):
subject = new()
cp = copy(subject)
assert cp.__class__ is subject.__class__
assert cp == subject
return subject, cp
def test_compare(self, new: _T_NewDep, abstract, mock_graph):
subject, subject_2 = new(), new()
simp = SimpleNode(abstract, mock_graph)
assert subject.__class__ is subject_2.__class__
assert subject == subject_2 == abstract
assert subject != object()
assert subject != simp
assert not subject == simp
assert subject.abstract == simp.abstract
assert not subject != subject_2 != abstract
assert not subject == object()
assert hash(subject) == hash(subject_2)
return subject, subject_2
def test_immutable(self, new: _T_NewDep, immutable_attrs):
self.assert_immutable(new(), immutable_attrs)
@xfail(raises=InjectorLookupError, strict=True)
def test_bind(self, new: _T_NewDep, mock_injector: Injector):
subject = new()
subject.bind(mock_injector) | tests/graph/nodes/lookup_error_tests.py | from copy import copy
from unittest.mock import MagicMock
import pytest
import typing as t
from collections.abc import Callable
from tests.abc import BaseTestCase
from uzi import InjectorLookupError
from uzi.graph.nodes import MissingNode as Dependency, SimpleNode
from uzi.injectors import Injector
Dependency = Dependency
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
_T = t.TypeVar("_T")
_T_Dep = t.TypeVar("_T_Dep", bound=Dependency, covariant=True)
_T_NewDep = Callable[..., _T_Dep]
class LookupErrorDependencyTests(BaseTestCase[Dependency]):
@pytest.fixture
def abstract(self):
return _T
@pytest.fixture
def concrete(self, value_setter):
return MagicMock(value_setter, wraps=value_setter)
def test_basic(self, new: _T_NewDep, cls: type[_T_Dep]):
subject = new()
assert isinstance(subject, Dependency)
assert subject.__class__ is cls
assert cls.__slots__ is cls.__dict__["__slots__"]
assert not subject
return subject
def test_copy(self, new: _T_NewDep):
subject = new()
cp = copy(subject)
assert cp.__class__ is subject.__class__
assert cp == subject
return subject, cp
def test_compare(self, new: _T_NewDep, abstract, mock_graph):
subject, subject_2 = new(), new()
simp = SimpleNode(abstract, mock_graph)
assert subject.__class__ is subject_2.__class__
assert subject == subject_2 == abstract
assert subject != object()
assert subject != simp
assert not subject == simp
assert subject.abstract == simp.abstract
assert not subject != subject_2 != abstract
assert not subject == object()
assert hash(subject) == hash(subject_2)
return subject, subject_2
def test_immutable(self, new: _T_NewDep, immutable_attrs):
self.assert_immutable(new(), immutable_attrs)
@xfail(raises=InjectorLookupError, strict=True)
def test_bind(self, new: _T_NewDep, mock_injector: Injector):
subject = new()
subject.bind(mock_injector) | 0.808257 | 0.397792 |
import enum
from typing import Iterable, Optional, Tuple
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from database import BLOCKCHAIN, Database
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
import csv
class ViewMode(enum.Enum):
ASCII_HISTOGRAM = "ascii_histogram"
MAGIC_FILE_HISTOGRAM = "magic_file_histogram"
IMGHDR_FILE_HISTOGRAM = "imghdr_file_histogram"
RECORD_STATS = "record_stats"
def write_csv(file_name: str, row_1: Iterable, label_1: str, row_2: Iterable, label_2: str) -> None:
f = open(file_name + ".csv", 'w')
writer = csv.writer(f)
writer.writerow([label_1, label_2])
writer.writerows(zip(row_1, row_2))
f.close()
class View:
def get_matplotlib_color_from_blockchain(self) -> str:
if self._blockchain is not None:
if "monero" in self._blockchain.value:
return "black"
elif "eth" in self._blockchain.value:
return "blue"
elif "bitcoin" in self._blockchain.value:
return "orange"
return "mediumblue"
def ascii_histogram_complete(self):
result = self._database.ascii_histogram(self._blockchain)
lengths = np.array(list(map(lambda item: item[0], result)))
counts = np.array(list(map(lambda item: item[1], result)))
write_csv("ascii_histogram_" + self._blockchain.value, lengths, "lengths", counts, "counts")
if len(counts) < 35:
return self.ascii_histogram()
lengths_histogram_no_gaps = []
counts_histogram_no_gaps = []
for i in range(10, np.max(lengths) + 1):
lengths_histogram_no_gaps.append(i)
if i in lengths:
counts_histogram_no_gaps.append(int(counts[np.where(lengths == i)]))
else:
counts_histogram_no_gaps.append(0)
accumulated_string_count = []
for i in range(34):
accumulated_string_count.append(np.sum(counts_histogram_no_gaps[i:]))
accumulated_string_count = np.array(accumulated_string_count)
minimum_string_lengths = np.arange(10, 10 + 34)
x3_pos = np.arange(34)
color = self.get_matplotlib_color_from_blockchain()
print(accumulated_string_count, minimum_string_lengths)
x_pos = np.arange(len(lengths))
compact_length_labels = [" " for _ in range(len(lengths))]
for i in range(len(lengths)):
if i == 0:
compact_length_labels[0] = str(lengths[0])
# add 10 more labels interspersed in the histogram, if the last label is too close omit it
if (i % (int(len(lengths) / 15))) == 0 and i < (
len(lengths) - len(lengths) / 30
):
compact_length_labels[i] = lengths[i]
compact_length_labels[-1] = str(lengths[-1])
compact_length_labels = np.array(compact_length_labels)
print(
x_pos,
lengths,
compact_length_labels,
len(x_pos),
len(lengths),
len(compact_length_labels),
)
fig_axis_tuple: Tuple[Figure, Tuple[Axes, Axes, Axes]] = plt.subplots(
3, figsize=(12, 10)
)
fig, (ax1, ax2, ax3) = fig_axis_tuple
ax1.bar(x_pos, counts, color=color)
ax1.set_xticks(x_pos, compact_length_labels)
ax1.set_xlabel("string length")
ax1.set_yscale("log")
ax1.set_ylabel("counts")
# ax1.set_title(self._blockchain.value + " Count of each detected string length")
plt.setp(ax1.get_xticklabels(), fontsize=7, rotation="vertical")
# truncate the histogram at 34 entries
if len(lengths) > 34:
lengths = lengths[:34]
if len(counts) > 34:
counts = counts[:34]
x_pos = np.arange(len(lengths))
ax2.bar(x_pos, counts, color=color)
ax2.set_xticks(x_pos, lengths)
ax2.set_xlabel("string length")
ax2.set_yscale("log")
ax2.set_ylabel("counts")
# ax2.set_title(self._blockchain.value + " Truncated count of each detected string length")
plt.setp(ax2.get_xticklabels(), fontsize=7, rotation="vertical")
print(
x3_pos,
accumulated_string_count,
minimum_string_lengths,
len(x3_pos),
len(accumulated_string_count),
len(minimum_string_lengths),
)
ax3.bar(x3_pos, accumulated_string_count, color=color)
ax3.set_xticks(x3_pos, minimum_string_lengths)
ax3.set_xlabel("minimum string length")
ax3.set_yscale("log")
ax3.set_ylabel("counts")
# ax2.set_title(self._blockchain.value + " Count of detected strings with a minimum length")
plt.setp(ax3.get_xticklabels(), fontsize=7, rotation="vertical")
plt.subplots_adjust(hspace=0.35)
plt.savefig("ascii_histogram_" + self._blockchain.value + ".png", dpi=600)
plt.show()
def ascii_histogram(self):
result = self._database.ascii_histogram(self._blockchain)
lengths = np.array(list(map(lambda item: item[0], result)))
counts = np.array(list(map(lambda item: item[1], result)))
lengths_histogram_no_gaps = []
counts_histogram_no_gaps = []
for i in range(10, np.max(lengths) + 1):
lengths_histogram_no_gaps.append(i)
if i in lengths:
counts_histogram_no_gaps.append(int(counts[np.where(lengths == i)]))
else:
counts_histogram_no_gaps.append(0)
accumulated_string_count = []
for i in range(34):
accumulated_string_count.append(np.sum(counts_histogram_no_gaps[i:]))
accumulated_string_count = np.array(accumulated_string_count)
minimum_string_lengths = np.arange(10, 10 + 34)
x2_pos = np.arange(34)
color = self.get_matplotlib_color_from_blockchain()
print(accumulated_string_count, minimum_string_lengths)
# truncate the histogram at 34 entries
if len(lengths) > 34:
lengths = lengths[:34]
if len(counts) > 34:
counts = counts[:34]
x_pos = np.arange(len(lengths))
fig_axis_tuple: Tuple[Figure, Tuple[Axes, Axes]] = plt.subplots(2)
fig, (ax1, ax2) = fig_axis_tuple
ax1.bar(x_pos, counts, color=color)
ax1.set_xticks(x_pos, lengths)
ax1.set_xlabel("string length")
ax1.set_yscale("log")
ax1.set_ylabel("counts")
# ax1.set_title(self._blockchain.value + " Count of each detected string length")
plt.setp(ax1.get_xticklabels(), fontsize=7, rotation="vertical")
ax2.bar(x2_pos, accumulated_string_count, color=color)
ax2.set_xticks(x2_pos, minimum_string_lengths)
ax2.set_xlabel("minimum string length")
ax2.set_yscale("log")
ax2.set_ylabel("counts")
# ax2.set_title(self._blockchain.value + " Count of detected strings with a minimum length")
plt.setp(ax2.get_xticklabels(), fontsize=7, rotation="vertical")
plt.subplots_adjust(hspace=0.43)
plt.savefig("ascii_histogram_" + self._blockchain.value + ".png", dpi=600)
plt.show()
def magic_file_histogram(self):
result = self._database.magic_file_histogram(self._blockchain)
file_types = np.array(list(map(lambda item: item[0], result)))
counts = np.array(list(map(lambda item: item[1], result)))
# remove some more magic file type entries
filtered_file_types = []
filtered_counts = []
utf_count = 0
jpeg_count = 0
png_count = 0
gringotts_count = 0
dif_count = 0
tar_archive_count = 0
openssl_encrypted_count = 0
pgp_encrypted_count = 0
pgp_key_count = 0
gpg_encrypted_count = 0
gpg_key_count = 0
os_2_count = 0
lzma_count = 0
gif_count = 0
pdf_count = 0
mp3_count = 0
msx_count = 0
musepack_count = 0
for file_type, count in zip(file_types, counts):
if "GIF" in file_type:
gif_count += count
continue
if "PDF" in file_type:
pdf_count += count
continue
if "LZMA" in file_type:
lzma_count += count
continue
if "UTF" in file_type:
utf_count += count
continue
if "JPEG" in file_type:
jpeg_count += count
continue
if "PNG" in file_type:
png_count += count
continue
if "Gringotts" in file_type:
gringotts_count += count
continue
if "DIF" in file_type:
dif_count += count
continue
if "tar archive" in file_type:
tar_archive_count += count
continue
if "openssl" in file_type:
openssl_encrypted_count += count
continue
if "OS/2" in file_type:
os_2_count += count
continue
if "PGP" in file_type and "encrypted" in file_type:
pgp_encrypted_count += count
continue
if "PGP" in file_type and "key" in file_type:
pgp_key_count += count
continue
if "GPG" in file_type and "encrypted" in file_type:
gpg_encrypted_count += count
continue
if "GPG" in file_type and "key" in file_type:
gpg_key_count += count
continue
if "Audio file with ID3 version" in file_type or "MP3" in file_type:
mp3_count += count
continue
if "MSX" in file_type:
msx_count += count
continue
if "Musepack" in file_type:
musepack_count += count
continue
if (
"Windows metafile" in file_type
or "TOC sound file" in file_type
or "Bacula volume" in file_type
or "Concise Binary Object Representation" in file_type
or "DEC SRC" in file_type
or "EdLib" in file_type
or "SPARC" in file_type
or "ispell hash" in file_type
or "SoundBlaster" in file_type
or "Squeak image" in file_type
or "Windows Precom" in file_type
or "Macintosh MFS data" in file_type
or "HP PCL" in file_type
or "core file (Xenix)" in file_type
or "compiled Lisp" in file_type
or "Zebra Metafile" in file_type
or "StarOffice Gallery" in file_type
or "Minix filesystem" in file_type
or "Macintosh HFS" in file_type
or "MacBinary" in file_type
or "Embedded OpenType" in file_type
or "DIY-Thermocam" in file_type
or "Apple HFS" in file_type
or "object file" in file_type
or "b.out" in file_type
or "RISC OS" in file_type
or "MMDF" in file_type
or "Lotus" in file_type
or "FuseCompress" in file_type
or "FIGlet" in file_type
or "AppleDouble" in file_type
or "AppleSingle" in file_type
or "MED_Song" in file_type
or "Android binary" in file_type
or "GDSII" in file_type
or "SunOS" in file_type
or "AppledDouble" in file_type
or "Core file" in file_type
or "MAthematica" in file_type
or "Berkeley DB" in file_type
or "Microstation" in file_type
or "overlay object file" in file_type
or "LADS" in file_type
or "Netscape" in file_type
or "ESRI Shapefile" in file_type
or "Cytovision" in file_type
or "i960" in file_type
or "ddis" in file_type
or "SPEC" in file_type
or "MMFD" in file_type
or "AHX" in file_type
or "libfprint" in file_type
or "SeqBox" in file_type
or "Psion" in file_type
or "PCP compiled" in file_type
or "separate object" in file_type
or "Compiled XKB" in file_type
or "dar archive" in file_type
or "cisco" in file_type
or "Symbian" in file_type
or "Spectrum .TAP" in file_type
or "StuffIt" in file_type
or "Spectrum" in file_type
or "Spectrum" in file_type
or "RAD" in file_type
or "Psion Series" in file_type
or "Progressive Graphics" in file_type
or "Palm" in file_type
or "LFS" in file_type
or "GEM" in file_type
or "ESRI Shapefile" in file_type
or "keymap" in file_type
or "Aster*x" in file_type
):
continue
if count == 1:
continue
filtered_file_types.append(file_type)
filtered_counts.append(count)
if utf_count > 0:
filtered_file_types.append("UTF-8")
filtered_counts.append(utf_count)
if jpeg_count > 0:
filtered_file_types.append("JPEG image data")
filtered_counts.append(jpeg_count)
if png_count > 0:
filtered_file_types.append("PNG image data")
filtered_counts.append(png_count)
if gringotts_count > 0:
filtered_file_types.append("Gringotts data file")
filtered_counts.append(gringotts_count)
if dif_count > 0:
filtered_file_types.append("DIF (DVCPRO) movie file")
filtered_counts.append(dif_count)
if tar_archive_count > 0:
filtered_file_types.append("tar archive")
filtered_counts.append(tar_archive_count)
if openssl_encrypted_count > 0:
filtered_file_types.append("openssl enc'd data")
filtered_counts.append(openssl_encrypted_count)
if os_2_count > 0:
filtered_file_types.append("OS/2 graphic")
filtered_counts.append(os_2_count)
if pgp_encrypted_count > 0:
filtered_file_types.append("PGP encrypted data")
filtered_counts.append(pgp_encrypted_count)
if pgp_key_count > 0:
filtered_file_types.append("PGP key")
filtered_counts.append(pgp_key_count)
if gpg_encrypted_count > 0:
filtered_file_types.append("GPG encrypted data")
filtered_counts.append(pgp_encrypted_count)
if gpg_key_count > 0:
filtered_file_types.append("GPG key")
filtered_counts.append(gpg_key_count)
if lzma_count > 0:
filtered_file_types.append("LZMA compressed d.")
filtered_counts.append(lzma_count)
if gif_count > 0:
filtered_file_types.append("GIF image")
filtered_counts.append(gif_count)
if pdf_count > 0:
filtered_file_types.append("PDF document")
filtered_counts.append(pdf_count)
if mp3_count > 0:
filtered_file_types.append("MP3 audio")
filtered_counts.append(mp3_count)
if msx_count > 0:
filtered_file_types.append("MSX music file")
filtered_counts.append(msx_count)
if musepack_count > 0:
filtered_file_types.append("Musepack audio")
filtered_counts.append(musepack_count)
file_types = np.array(filtered_file_types)
counts = np.array(filtered_counts)
write_csv("magic_file_histogram_" + self._blockchain.value, file_types, "file_types", counts, "counts")
print(file_types, counts, len(file_types), len(counts))
truncated_file_types = []
for file_type in file_types:
if len(file_type) > 20:
truncated_file_types.append(file_type[:18] + ".")
else:
truncated_file_types.append(file_type)
x_pos = np.arange(len(file_types))
color = self.get_matplotlib_color_from_blockchain()
fig_axis_tuple: Tuple[Figure, Axes] = plt.subplots(1)
fig, ax1 = fig_axis_tuple
ax1.bar(x_pos, counts, color=color)
ax1.set_xticks(x_pos, truncated_file_types)
ax1.set_yscale("log")
ax1.set_ylabel("counts")
# ax1.set_title(self._blockchain.value + " Count of magic detected file types")
plt.setp(ax1.get_xticklabels(), fontsize=12, rotation="vertical")
plt.subplots_adjust(bottom=0.41)
plt.savefig("magic_file_histogram_" + self._blockchain.value + ".png", dpi=600)
plt.show()
def imghdr_file_histogram(self):
result = self._database.imghdr_file_histogram(self._blockchain)
file_types = np.array(list(map(lambda item: item[0], result)))
counts = np.array(list(map(lambda item: item[1], result)))
print(file_types, counts, len(file_types), len(counts))
write_csv("imghdr_file_histogram_" + self._blockchain.value, file_types, "file_types", counts, "counts")
truncated_file_types = []
for file_type in file_types:
if len(file_type) > 20:
truncated_file_types.append(file_type[:19] + ".")
else:
truncated_file_types.append(file_type)
x_pos = np.arange(len(file_types))
color = self.get_matplotlib_color_from_blockchain()
fig_axis_tuple: Tuple[Figure, Axes] = plt.subplots(1)
fig, ax1 = fig_axis_tuple
ax1.bar(x_pos, counts, color=color)
ax1.set_xticks(x_pos, truncated_file_types)
ax1.set_yscale("log")
ax1.set_ylabel("counts")
# ax1.set_title(self._blockchain.value + " Count of imghdr detected file types")
plt.setp(ax1.get_xticklabels(), fontsize=12, rotation="vertical")
plt.savefig("imghdr_file_histogram_" + self._blockchain.value + ".png", dpi=600)
plt.show()
def __init__(self, blockchain: Optional[BLOCKCHAIN], database: Database):
self._blockchain = blockchain
self._database = database
def view(self, mode: ViewMode) -> None:
if mode == ViewMode.ASCII_HISTOGRAM:
self.ascii_histogram_complete()
elif mode == ViewMode.IMGHDR_FILE_HISTOGRAM:
self.imghdr_file_histogram()
elif mode == ViewMode.MAGIC_FILE_HISTOGRAM:
self.magic_file_histogram()
elif mode == ViewMode.RECORD_STATS:
stats = self._database.get_record_statistics(self._blockchain)
print(stats) | view.py | import enum
from typing import Iterable, Optional, Tuple
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from database import BLOCKCHAIN, Database
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
import csv
class ViewMode(enum.Enum):
ASCII_HISTOGRAM = "ascii_histogram"
MAGIC_FILE_HISTOGRAM = "magic_file_histogram"
IMGHDR_FILE_HISTOGRAM = "imghdr_file_histogram"
RECORD_STATS = "record_stats"
def write_csv(file_name: str, row_1: Iterable, label_1: str, row_2: Iterable, label_2: str) -> None:
f = open(file_name + ".csv", 'w')
writer = csv.writer(f)
writer.writerow([label_1, label_2])
writer.writerows(zip(row_1, row_2))
f.close()
class View:
def get_matplotlib_color_from_blockchain(self) -> str:
if self._blockchain is not None:
if "monero" in self._blockchain.value:
return "black"
elif "eth" in self._blockchain.value:
return "blue"
elif "bitcoin" in self._blockchain.value:
return "orange"
return "mediumblue"
def ascii_histogram_complete(self):
result = self._database.ascii_histogram(self._blockchain)
lengths = np.array(list(map(lambda item: item[0], result)))
counts = np.array(list(map(lambda item: item[1], result)))
write_csv("ascii_histogram_" + self._blockchain.value, lengths, "lengths", counts, "counts")
if len(counts) < 35:
return self.ascii_histogram()
lengths_histogram_no_gaps = []
counts_histogram_no_gaps = []
for i in range(10, np.max(lengths) + 1):
lengths_histogram_no_gaps.append(i)
if i in lengths:
counts_histogram_no_gaps.append(int(counts[np.where(lengths == i)]))
else:
counts_histogram_no_gaps.append(0)
accumulated_string_count = []
for i in range(34):
accumulated_string_count.append(np.sum(counts_histogram_no_gaps[i:]))
accumulated_string_count = np.array(accumulated_string_count)
minimum_string_lengths = np.arange(10, 10 + 34)
x3_pos = np.arange(34)
color = self.get_matplotlib_color_from_blockchain()
print(accumulated_string_count, minimum_string_lengths)
x_pos = np.arange(len(lengths))
compact_length_labels = [" " for _ in range(len(lengths))]
for i in range(len(lengths)):
if i == 0:
compact_length_labels[0] = str(lengths[0])
# add 10 more labels interspersed in the histogram, if the last label is too close omit it
if (i % (int(len(lengths) / 15))) == 0 and i < (
len(lengths) - len(lengths) / 30
):
compact_length_labels[i] = lengths[i]
compact_length_labels[-1] = str(lengths[-1])
compact_length_labels = np.array(compact_length_labels)
print(
x_pos,
lengths,
compact_length_labels,
len(x_pos),
len(lengths),
len(compact_length_labels),
)
fig_axis_tuple: Tuple[Figure, Tuple[Axes, Axes, Axes]] = plt.subplots(
3, figsize=(12, 10)
)
fig, (ax1, ax2, ax3) = fig_axis_tuple
ax1.bar(x_pos, counts, color=color)
ax1.set_xticks(x_pos, compact_length_labels)
ax1.set_xlabel("string length")
ax1.set_yscale("log")
ax1.set_ylabel("counts")
# ax1.set_title(self._blockchain.value + " Count of each detected string length")
plt.setp(ax1.get_xticklabels(), fontsize=7, rotation="vertical")
# truncate the histogram at 34 entries
if len(lengths) > 34:
lengths = lengths[:34]
if len(counts) > 34:
counts = counts[:34]
x_pos = np.arange(len(lengths))
ax2.bar(x_pos, counts, color=color)
ax2.set_xticks(x_pos, lengths)
ax2.set_xlabel("string length")
ax2.set_yscale("log")
ax2.set_ylabel("counts")
# ax2.set_title(self._blockchain.value + " Truncated count of each detected string length")
plt.setp(ax2.get_xticklabels(), fontsize=7, rotation="vertical")
print(
x3_pos,
accumulated_string_count,
minimum_string_lengths,
len(x3_pos),
len(accumulated_string_count),
len(minimum_string_lengths),
)
ax3.bar(x3_pos, accumulated_string_count, color=color)
ax3.set_xticks(x3_pos, minimum_string_lengths)
ax3.set_xlabel("minimum string length")
ax3.set_yscale("log")
ax3.set_ylabel("counts")
# ax2.set_title(self._blockchain.value + " Count of detected strings with a minimum length")
plt.setp(ax3.get_xticklabels(), fontsize=7, rotation="vertical")
plt.subplots_adjust(hspace=0.35)
plt.savefig("ascii_histogram_" + self._blockchain.value + ".png", dpi=600)
plt.show()
def ascii_histogram(self):
result = self._database.ascii_histogram(self._blockchain)
lengths = np.array(list(map(lambda item: item[0], result)))
counts = np.array(list(map(lambda item: item[1], result)))
lengths_histogram_no_gaps = []
counts_histogram_no_gaps = []
for i in range(10, np.max(lengths) + 1):
lengths_histogram_no_gaps.append(i)
if i in lengths:
counts_histogram_no_gaps.append(int(counts[np.where(lengths == i)]))
else:
counts_histogram_no_gaps.append(0)
accumulated_string_count = []
for i in range(34):
accumulated_string_count.append(np.sum(counts_histogram_no_gaps[i:]))
accumulated_string_count = np.array(accumulated_string_count)
minimum_string_lengths = np.arange(10, 10 + 34)
x2_pos = np.arange(34)
color = self.get_matplotlib_color_from_blockchain()
print(accumulated_string_count, minimum_string_lengths)
# truncate the histogram at 34 entries
if len(lengths) > 34:
lengths = lengths[:34]
if len(counts) > 34:
counts = counts[:34]
x_pos = np.arange(len(lengths))
fig_axis_tuple: Tuple[Figure, Tuple[Axes, Axes]] = plt.subplots(2)
fig, (ax1, ax2) = fig_axis_tuple
ax1.bar(x_pos, counts, color=color)
ax1.set_xticks(x_pos, lengths)
ax1.set_xlabel("string length")
ax1.set_yscale("log")
ax1.set_ylabel("counts")
# ax1.set_title(self._blockchain.value + " Count of each detected string length")
plt.setp(ax1.get_xticklabels(), fontsize=7, rotation="vertical")
ax2.bar(x2_pos, accumulated_string_count, color=color)
ax2.set_xticks(x2_pos, minimum_string_lengths)
ax2.set_xlabel("minimum string length")
ax2.set_yscale("log")
ax2.set_ylabel("counts")
# ax2.set_title(self._blockchain.value + " Count of detected strings with a minimum length")
plt.setp(ax2.get_xticklabels(), fontsize=7, rotation="vertical")
plt.subplots_adjust(hspace=0.43)
plt.savefig("ascii_histogram_" + self._blockchain.value + ".png", dpi=600)
plt.show()
def magic_file_histogram(self):
result = self._database.magic_file_histogram(self._blockchain)
file_types = np.array(list(map(lambda item: item[0], result)))
counts = np.array(list(map(lambda item: item[1], result)))
# remove some more magic file type entries
filtered_file_types = []
filtered_counts = []
utf_count = 0
jpeg_count = 0
png_count = 0
gringotts_count = 0
dif_count = 0
tar_archive_count = 0
openssl_encrypted_count = 0
pgp_encrypted_count = 0
pgp_key_count = 0
gpg_encrypted_count = 0
gpg_key_count = 0
os_2_count = 0
lzma_count = 0
gif_count = 0
pdf_count = 0
mp3_count = 0
msx_count = 0
musepack_count = 0
for file_type, count in zip(file_types, counts):
if "GIF" in file_type:
gif_count += count
continue
if "PDF" in file_type:
pdf_count += count
continue
if "LZMA" in file_type:
lzma_count += count
continue
if "UTF" in file_type:
utf_count += count
continue
if "JPEG" in file_type:
jpeg_count += count
continue
if "PNG" in file_type:
png_count += count
continue
if "Gringotts" in file_type:
gringotts_count += count
continue
if "DIF" in file_type:
dif_count += count
continue
if "tar archive" in file_type:
tar_archive_count += count
continue
if "openssl" in file_type:
openssl_encrypted_count += count
continue
if "OS/2" in file_type:
os_2_count += count
continue
if "PGP" in file_type and "encrypted" in file_type:
pgp_encrypted_count += count
continue
if "PGP" in file_type and "key" in file_type:
pgp_key_count += count
continue
if "GPG" in file_type and "encrypted" in file_type:
gpg_encrypted_count += count
continue
if "GPG" in file_type and "key" in file_type:
gpg_key_count += count
continue
if "Audio file with ID3 version" in file_type or "MP3" in file_type:
mp3_count += count
continue
if "MSX" in file_type:
msx_count += count
continue
if "Musepack" in file_type:
musepack_count += count
continue
if (
"Windows metafile" in file_type
or "TOC sound file" in file_type
or "Bacula volume" in file_type
or "Concise Binary Object Representation" in file_type
or "DEC SRC" in file_type
or "EdLib" in file_type
or "SPARC" in file_type
or "ispell hash" in file_type
or "SoundBlaster" in file_type
or "Squeak image" in file_type
or "Windows Precom" in file_type
or "Macintosh MFS data" in file_type
or "HP PCL" in file_type
or "core file (Xenix)" in file_type
or "compiled Lisp" in file_type
or "Zebra Metafile" in file_type
or "StarOffice Gallery" in file_type
or "Minix filesystem" in file_type
or "Macintosh HFS" in file_type
or "MacBinary" in file_type
or "Embedded OpenType" in file_type
or "DIY-Thermocam" in file_type
or "Apple HFS" in file_type
or "object file" in file_type
or "b.out" in file_type
or "RISC OS" in file_type
or "MMDF" in file_type
or "Lotus" in file_type
or "FuseCompress" in file_type
or "FIGlet" in file_type
or "AppleDouble" in file_type
or "AppleSingle" in file_type
or "MED_Song" in file_type
or "Android binary" in file_type
or "GDSII" in file_type
or "SunOS" in file_type
or "AppledDouble" in file_type
or "Core file" in file_type
or "MAthematica" in file_type
or "Berkeley DB" in file_type
or "Microstation" in file_type
or "overlay object file" in file_type
or "LADS" in file_type
or "Netscape" in file_type
or "ESRI Shapefile" in file_type
or "Cytovision" in file_type
or "i960" in file_type
or "ddis" in file_type
or "SPEC" in file_type
or "MMFD" in file_type
or "AHX" in file_type
or "libfprint" in file_type
or "SeqBox" in file_type
or "Psion" in file_type
or "PCP compiled" in file_type
or "separate object" in file_type
or "Compiled XKB" in file_type
or "dar archive" in file_type
or "cisco" in file_type
or "Symbian" in file_type
or "Spectrum .TAP" in file_type
or "StuffIt" in file_type
or "Spectrum" in file_type
or "Spectrum" in file_type
or "RAD" in file_type
or "Psion Series" in file_type
or "Progressive Graphics" in file_type
or "Palm" in file_type
or "LFS" in file_type
or "GEM" in file_type
or "ESRI Shapefile" in file_type
or "keymap" in file_type
or "Aster*x" in file_type
):
continue
if count == 1:
continue
filtered_file_types.append(file_type)
filtered_counts.append(count)
if utf_count > 0:
filtered_file_types.append("UTF-8")
filtered_counts.append(utf_count)
if jpeg_count > 0:
filtered_file_types.append("JPEG image data")
filtered_counts.append(jpeg_count)
if png_count > 0:
filtered_file_types.append("PNG image data")
filtered_counts.append(png_count)
if gringotts_count > 0:
filtered_file_types.append("Gringotts data file")
filtered_counts.append(gringotts_count)
if dif_count > 0:
filtered_file_types.append("DIF (DVCPRO) movie file")
filtered_counts.append(dif_count)
if tar_archive_count > 0:
filtered_file_types.append("tar archive")
filtered_counts.append(tar_archive_count)
if openssl_encrypted_count > 0:
filtered_file_types.append("openssl enc'd data")
filtered_counts.append(openssl_encrypted_count)
if os_2_count > 0:
filtered_file_types.append("OS/2 graphic")
filtered_counts.append(os_2_count)
if pgp_encrypted_count > 0:
filtered_file_types.append("PGP encrypted data")
filtered_counts.append(pgp_encrypted_count)
if pgp_key_count > 0:
filtered_file_types.append("PGP key")
filtered_counts.append(pgp_key_count)
if gpg_encrypted_count > 0:
filtered_file_types.append("GPG encrypted data")
filtered_counts.append(pgp_encrypted_count)
if gpg_key_count > 0:
filtered_file_types.append("GPG key")
filtered_counts.append(gpg_key_count)
if lzma_count > 0:
filtered_file_types.append("LZMA compressed d.")
filtered_counts.append(lzma_count)
if gif_count > 0:
filtered_file_types.append("GIF image")
filtered_counts.append(gif_count)
if pdf_count > 0:
filtered_file_types.append("PDF document")
filtered_counts.append(pdf_count)
if mp3_count > 0:
filtered_file_types.append("MP3 audio")
filtered_counts.append(mp3_count)
if msx_count > 0:
filtered_file_types.append("MSX music file")
filtered_counts.append(msx_count)
if musepack_count > 0:
filtered_file_types.append("Musepack audio")
filtered_counts.append(musepack_count)
file_types = np.array(filtered_file_types)
counts = np.array(filtered_counts)
write_csv("magic_file_histogram_" + self._blockchain.value, file_types, "file_types", counts, "counts")
print(file_types, counts, len(file_types), len(counts))
truncated_file_types = []
for file_type in file_types:
if len(file_type) > 20:
truncated_file_types.append(file_type[:18] + ".")
else:
truncated_file_types.append(file_type)
x_pos = np.arange(len(file_types))
color = self.get_matplotlib_color_from_blockchain()
fig_axis_tuple: Tuple[Figure, Axes] = plt.subplots(1)
fig, ax1 = fig_axis_tuple
ax1.bar(x_pos, counts, color=color)
ax1.set_xticks(x_pos, truncated_file_types)
ax1.set_yscale("log")
ax1.set_ylabel("counts")
# ax1.set_title(self._blockchain.value + " Count of magic detected file types")
plt.setp(ax1.get_xticklabels(), fontsize=12, rotation="vertical")
plt.subplots_adjust(bottom=0.41)
plt.savefig("magic_file_histogram_" + self._blockchain.value + ".png", dpi=600)
plt.show()
def imghdr_file_histogram(self):
result = self._database.imghdr_file_histogram(self._blockchain)
file_types = np.array(list(map(lambda item: item[0], result)))
counts = np.array(list(map(lambda item: item[1], result)))
print(file_types, counts, len(file_types), len(counts))
write_csv("imghdr_file_histogram_" + self._blockchain.value, file_types, "file_types", counts, "counts")
truncated_file_types = []
for file_type in file_types:
if len(file_type) > 20:
truncated_file_types.append(file_type[:19] + ".")
else:
truncated_file_types.append(file_type)
x_pos = np.arange(len(file_types))
color = self.get_matplotlib_color_from_blockchain()
fig_axis_tuple: Tuple[Figure, Axes] = plt.subplots(1)
fig, ax1 = fig_axis_tuple
ax1.bar(x_pos, counts, color=color)
ax1.set_xticks(x_pos, truncated_file_types)
ax1.set_yscale("log")
ax1.set_ylabel("counts")
# ax1.set_title(self._blockchain.value + " Count of imghdr detected file types")
plt.setp(ax1.get_xticklabels(), fontsize=12, rotation="vertical")
plt.savefig("imghdr_file_histogram_" + self._blockchain.value + ".png", dpi=600)
plt.show()
def __init__(self, blockchain: Optional[BLOCKCHAIN], database: Database):
self._blockchain = blockchain
self._database = database
def view(self, mode: ViewMode) -> None:
if mode == ViewMode.ASCII_HISTOGRAM:
self.ascii_histogram_complete()
elif mode == ViewMode.IMGHDR_FILE_HISTOGRAM:
self.imghdr_file_histogram()
elif mode == ViewMode.MAGIC_FILE_HISTOGRAM:
self.magic_file_histogram()
elif mode == ViewMode.RECORD_STATS:
stats = self._database.get_record_statistics(self._blockchain)
print(stats) | 0.669853 | 0.357007 |