text stringlengths 957 885k |
|---|
<reponame>nppo/search-portal<filename>harvester/core/tests/views/test_extension.py
from django.test import TestCase
from django.contrib.auth.models import User
class TestExtensionAPI(TestCase):
fixtures = ["datasets-history"]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create(username="supersurf")
cls.parent_properties = {
"title": "title",
"description": "description",
"language": "nl",
"copyright": "cc-by-40"
}
cls.extension_properties = {
"authors": [
{"name": "<NAME>"}
],
"parties": [
{"name": "I love the 90's"}
],
"themes": [
{"label": "90's"}
],
"keywords": [
{"label": "90's"}
]
}
def setUp(self):
super().setUp()
self.client.force_login(self.user)
def assert_properties(self, properties, external_id="external-id", is_parent=False):
# First we assert the basic props
self.assertEqual(properties.pop("external_id"), external_id)
# Then we assert all properties related to parentship
for key in self.parent_properties.keys():
if not is_parent:
self.assertNotIn(key, properties)
else:
self.assertEqual(properties.pop(key), self.parent_properties[key])
# All remaining properties should be regular extension properties
for key in self.extension_properties.keys():
self.assertEqual(properties[key], self.extension_properties[key])
def test_list(self):
response = self.client.get("/api/v1/extension/")
self.assertEqual(response.status_code, 200)
response_data = response.json()
self.assertEqual(len(response_data), 2)
extension = response_data[0]
self.assertIn("properties", extension)
def test_create_parent(self):
"""
When creating a parent Extension we should be able to set properties like: title and description,
because when an Extension is a parent there exists no Document that provides that data.
"""
children = [
"5af0e26f-c4d2-4ddd-94ab-7dd0bd531751",
"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257"
]
body = {
"is_parent": True,
"external_id": "external-id",
"children": children,
**self.extension_properties,
**self.parent_properties,
}
response = self.client.post("/api/v1/extension/", body, content_type="application/json")
self.assertEqual(response.status_code, 201)
response_data = response.json()
self.assertIsInstance(response_data, dict)
self.assertTrue(response_data["is_parent"])
self.assertEqual(response_data["properties"].pop("children"), children)
self.assert_properties(response_data["properties"], is_parent=True)
def test_create_parent_no_children(self):
"""
It should be possible to create a "parent" extension that does not have children (yet)
"""
body = {
"is_parent": True,
"external_id": "external-id",
**self.extension_properties,
**self.parent_properties
}
response = self.client.post("/api/v1/extension/", body, content_type="application/json")
self.assertEqual(response.status_code, 201)
response_data = response.json()
self.assertIsInstance(response_data, dict)
self.assertTrue(response_data["is_parent"])
self.assert_properties(response_data["properties"], is_parent=True)
def test_create_no_parent(self):
"""
It should be possible to create a "non-parent", that has children.
This only means that the Extension itself is not a parent, but the underlying Document might be.
We also test making a child of a Document
"""
external_id = "5be6dfeb-b9ad-41a8-b4f5-94b9438e4257"
children = [
"5af0e26f-c4d2-4ddd-94ab-7dd0bd531751"
]
parents = [
"63903863-6c93-4bda-b850-277f3c9ec00e"
]
body = {
"external_id": external_id,
"children": children,
"parents": parents,
**self.extension_properties
}
response = self.client.post("/api/v1/extension/", body, content_type="application/json")
self.assertEqual(response.status_code, 201)
response_data = response.json()
self.assertIsInstance(response_data, dict)
self.assertFalse(response_data["is_parent"])
self.assertEqual(response_data["properties"].pop("children"), children)
self.assertEqual(response_data["properties"].pop("parents"), parents)
self.assert_properties(response_data.pop("properties"), is_parent=False, external_id=external_id)
def test_update_parent(self):
"""
Updating an existing Extension means that all properties will get overridden.
There is no merging done for properties.
"""
external_id = "custom-extension"
children = [
"5af0e26f-c4d2-4ddd-94ab-7dd0bd531751",
"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257"
]
body = {
"external_id": external_id,
"is_parent": True,
"children": children,
**self.extension_properties,
**self.parent_properties,
}
response = self.client.put(f"/api/v1/extension/{external_id}/", body, content_type="application/json")
self.assertEqual(response.status_code, 200)
response_data = response.json()
self.assertIsInstance(response_data, dict)
self.assertTrue(response_data["is_parent"])
self.assertEqual(response_data["properties"].pop("children"), children)
self.assert_properties(response_data["properties"], is_parent=True, external_id=external_id)
def test_update_no_parent(self):
"""
Updating an existing Extension means that all properties will get overridden.
There is no merging done for properties.
"""
external_id = "5af0e26f-c4d2-4ddd-94ab-7dd0bd531751"
children = [
"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257"
]
body = {
"external_id": external_id,
"is_parent": False,
"children": children,
**self.extension_properties,
}
response = self.client.put(f"/api/v1/extension/{external_id}/", body, content_type="application/json")
self.assertEqual(response.status_code, 200)
response_data = response.json()
self.assertIsInstance(response_data, dict)
self.assertFalse(response_data["is_parent"])
self.assertEqual(response_data["properties"].pop("children"), children)
self.assert_properties(response_data["properties"], is_parent=False, external_id=external_id)
def test_invalid_update_parent(self):
"""
Once an Extension is created as parent we can't go back. It is however possible to remove the children.
This effectively tells Elastic to keep using the Extension as a source for data.
It's expected that at a later time new children get added.
"""
external_id = "custom-extension"
body = {
"external_id": external_id,
"is_parent": False,
**self.extension_properties
}
response = self.client.put(f"/api/v1/extension/{external_id}/", body, content_type="application/json")
self.assertEqual(
response.status_code, 400,
"Did not expect that updating a parent extension to a non-parent extension is allowed"
)
external_id = "custom-extension"
body = {
"external_id": external_id,
"is_parent": True,
"children": []
}
response = self.client.put(f"/api/v1/extension/{external_id}/", body, content_type="application/json")
self.assertEqual(response.status_code, 200)
response_data = response.json()
self.assertEqual(response_data["properties"]["children"], [])
def test_delete(self):
external_id = "custom-extension"
response = self.client.delete(f"/api/v1/extension/{external_id}/", content_type="application/json")
self.assertEqual(response.status_code, 204)
external_id = "5af0e26f-c4d2-4ddd-94ab-7dd0bd531751"
response = self.client.delete(f"/api/v1/extension/{external_id}/", content_type="application/json")
self.assertEqual(response.status_code, 204)
external_id = "does-not-exist"
response = self.client.delete(f"/api/v1/extension/{external_id}/", content_type="application/json")
self.assertEqual(response.status_code, 404)
def test_invalid_external_id(self):
# It should be impossible to create non-parent Extensions if a Document with given external_id does not exist
external_id = "not-a-document"
body = {
"external_id": external_id,
"is_parent": False,
**self.extension_properties,
}
response = self.client.post("/api/v1/extension/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
# It should be impossible to update an Extension when external_id in path and body mismatch
external_id = "custom-extension"
body = {
"external_id": "body-id",
"is_parent": True,
**self.extension_properties,
}
response = self.client.put(f"/api/v1/extension/{external_id}/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_invalid_parents(self):
external_id = "5be6dfeb-b9ad-41a8-b4f5-94b9438e4257"
children = [
"5af0e26f-c4d2-4ddd-94ab-7dd0bd531751"
]
parents = [
"does-not-exist"
]
body = {
"external_id": external_id,
"children": children,
"parents": parents,
**self.extension_properties
}
response = self.client.post("/api/v1/extension/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
external_id = "5af0e26f-c4d2-4ddd-94ab-7dd0bd531751"
body = {
"external_id": external_id,
"children": children,
"parents": parents,
**self.extension_properties
}
response = self.client.put(f"/api/v1/extension/{external_id}/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_invalid_children(self):
external_id = "5be6dfeb-b9ad-41a8-b4f5-94b9438e4257"
children = [
"does-not-exist"
]
parents = [
"63903863-6c93-4bda-b850-277f3c9ec00e"
]
body = {
"external_id": external_id,
"children": children,
"parents": parents,
**self.extension_properties
}
response = self.client.post("/api/v1/extension/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
external_id = "5af0e26f-c4d2-4ddd-94ab-7dd0bd531751"
body = {
"external_id": external_id,
"children": children,
"parents": parents,
**self.extension_properties
}
response = self.client.put(f"/api/v1/extension/{external_id}/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_invalid_properties_non_parent(self):
children = [
"5af0e26f-c4d2-4ddd-94ab-7dd0bd531751",
"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257"
]
body = {
"is_parent": False,
"external_id": "external-id",
"children": children,
**self.extension_properties,
**self.parent_properties,
}
response = self.client.post("/api/v1/extension/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
external_id = "custom-extension"
children = [
"5af0e26f-c4d2-4ddd-94ab-7dd0bd531751",
"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257"
]
body = {
"external_id": external_id,
"is_parent": False,
"children": children,
**self.extension_properties,
**self.parent_properties,
}
response = self.client.put(f"/api/v1/extension/{external_id}/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_duplicate_parent(self):
external_id = "5af0e26f-c4d2-4ddd-94ab-7dd0bd531751"
body = {
"external_id": external_id,
**self.extension_properties
}
response = self.client.post("/api/v1/extension/", body, content_type="application/json")
self.assertEqual(response.status_code, 400)
|
<filename>refresh/virology.py
from database import hic_conn
from refresh import export
SQL_DROP_TABLE = '''
IF OBJECT_ID(N'dbo.virology', N'U') IS NOT NULL
BEGIN
DROP TABLE dbo.virology;
END;
'''
SQL_INSERT = '''
SET QUOTED_IDENTIFIER OFF;
SELECT *
INTO wh_hic_covid.dbo.virology
FROM OPENQUERY(
uhldwh, "
SET NOCOUNT ON;
SELECT
p.Hospital_Number AS uhl_system_number,
o.Lab_Ref_No AS laboratory_department,
t.Order_Code_Expan order_name,
tc.Test_Expansion test_name,
CASE
WHEN t.Test_code = 'VBIR' THEN LTRIM(RTRIM(REPLACE(q.Quantity_Description, '*', '')))
ELSE t.Result_Expansion
END test_result,
NULL AS test_result_unit,
NULL AS result_flag,
r.WHO_COLLECTION_DATE_TIME sample_collected_date_time,
r.WHO_RECEIVE_DATE_TIME sample_received_date_time,
t.WHO_TEST_RESULTED_DATE_TIME result_available_date_time
FROM DWPATH.dbo.MICRO_TESTS t
INNER JOIN DWPATH.dbo.MICRO_RESULTS_FILE AS r
ON t.Micro_Results_File = r.ISRN
INNER JOIN DWPATH.dbo.ORDERS_FILE AS o
ON r.Order_No = o.Order_Number
INNER JOIN DWPATH.dbo.REQUEST_PATIENT_DETAILS AS p
ON o.D_Level_Pointer = p.Request_Patient_Details
LEFT JOIN DWPATH.dbo.MICRO_ORGANISMS org
ON org.Micro_Tests = t.Micro_Tests
LEFT OUTER JOIN DWPATH.dbo.MF_TEST_CODES_MICRO_WHO tc
ON t.Test_Code_Key=tc.Test_Codes_Row_ID
LEFT OUTER JOIN DWPATH.dbo.MF_QUANTITY_CODES q
ON org.Quantifier=q.APEX_ID
LEFT OUTER JOIN DWPATH.dbo.REQUEST_SOURCE_DETAILS s
ON o.C_Level_Pointer = s.Request_Source_Details
WHERE
(
t.Test_code IN ( 'VCOV', 'VCOV3', 'VCOV4', 'VCOV5' )
OR (t.Test_code = 'VBIR' AND org.Organism LIKE '%CoV%')
)
AND r.WHO_COLLECTION_DATE_TIME >= '01/01/2020 00:0:0'
AND p.Hospital_Number in (
SELECT asc2.UHL_System_Number
FROM DWBRICCS.dbo.all_suspected_covid asc2
)
;
");
SET QUOTED_IDENTIFIER ON;
'''
SQL_ALTER_TABLE = '''
ALTER TABLE virology ALTER COLUMN uhl_system_number varchar(30) COLLATE Latin1_General_CI_AS NOT NULL;
'''
SQL_INDEXES = '''
CREATE INDEX virology_uhl_system_number_IDX ON virology (uhl_system_number);
'''
def refresh_virology():
print('refresh_virology: started')
with hic_conn() as con:
con.execute(SQL_DROP_TABLE)
con.execute(SQL_INSERT)
con.execute(SQL_ALTER_TABLE)
con.execute(SQL_INDEXES)
print('refresh_virology: ended')
# brc_cv_covid_virology subject anonymised/pseudonymised patient identifier
# brc_cv_covid_virology laboratory_department local laboratory type
# brc_cv_covid_virology order_name local laboratory order name
# brc_cv_covid_virology test_name local pathology test name
# brc_cv_covid_virology test_result result of the laboratory test
# brc_cv_covid_virology test_result_unit unit for results
# brc_cv_covid_virology result_flag local flag indicating high/low result
# brc_cv_covid_virology sample_collected_date_time date/time sample collected
# brc_cv_covid_virology sample_received_date_time date/time sample received
# brc_cv_covid_virology result_available_date_time date/time result available
# brc_cv_covid_virology brc_name data submitting brc name
SQL_SELECT_EXPORT = '''
SELECT
p.participant_identifier AS subject,
a.laboratory_department,
a.order_name,
a.test_name,
a.test_result,
a.test_result_unit,
a.result_flag,
a.sample_collected_date_time,
a.sample_received_date_time,
a.result_available_date_time
FROM virology a
JOIN participant p
ON p.uhl_system_number = a.uhl_system_number
WHERE a.uhl_system_number IN (
SELECT DISTINCT e_.uhl_system_number
FROM episodes e_
WHERE e_.admission_date_time <= '20210630'
)
;
'''
def export_virology():
export('virology', SQL_SELECT_EXPORT)
|
#!/usr/bin/python
"""
Benchmarking experiment for fidelity
Test bandwidth (using iperf) on string/chain networks of fixed size 40,
using kernel datapaths.
First construct a network of 2 hosts and N switches, connected as follows:
h1 - s1 - s2 - ... - sN - h2
Varying link bw, with & without virtual time, test throughput between h1 and h2
"""
import sys
import numpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
import subprocess
from mininet.net import Mininet
from mininet.node import CPULimitedHost, OVSKernelSwitch
from mininet.node import DefaultController, RemoteController
from mininet.topo import Topo
from mininet.log import lg
from mininet.util import irange, custom
from mininet.link import TCLink
from functools import partial
from mininet.clean import cleanup
from mininet.cli import CLI
flush = sys.stdout.flush
class StringTestTopo(Topo):
"Topology for a string of N switches and 2 hosts."
def __init__(self, N, **params):
# Initialize topology
Topo.__init__( self, **params )
# Create switches and hosts
hosts = [ self.addHost( 'h%s' % h ) for h in irange( 1, 2 ) ]
switches = [self.addSwitch( 's%s' % s ) for s in irange(1, N)]
# Wire up hosts with switch
self.addLink(hosts[0], switches[0])
self.addLink(hosts[1], switches[ N - 1 ])
last = None
for switch in switches:
if last:
self.addLink(last, switch)
last = switch
def stringBandwidthTest(host_class, controller_class,
link_class, size, tdf, data_file, bw):
"Check bandwidth at various lengths along a switch chain."
topo_class = StringTestTopo(size)
net = Mininet(topo=topo_class, host=host_class,
switch=OVSKernelSwitch, controller=controller_class,
waitConnected=False, link=link_class)
net.start()
if tdf != 1:
net.dilateEmulation(tdf)
print "*** testing basic connectivity\n"
src, dst = net.hosts
num_pings = 3
for i in irange(1, num_pings):
ping_result = list(net.pingFull( [ src, dst ] ))
# ping_result=[(host1), (host2)]
# host = (src, dst, data)
# data = (#sent, #received, rttmin, rttavg, rttmax, rttdev)
print "Ping avg rtt = %s\n" % ping_result[0][2][3]
rttavg = ping_result[0][2][3]
data_file.write( "RTT Avg = %s ms\n" % rttavg)
print "*** testing bandwidth\n"
num_rounds = 1
client_history = []
time = 10
for i in irange(1, num_rounds):
net.showDilation()
udpBw = '%dM' % bw
bandwidth = net.iperf( [src, dst], l4Type = 'UDP',
fmt = 'm', seconds=time, udpBw=udpBw,
clifile=data_file, serfile=data_file )
flush()
net.showDilation()
serout = bandwidth[1]
cliout = bandwidth[2]
if len(serout) > 0 and len(cliout) > 0:
serDataStr, unit = serout.split(" ")
serData = float(serDataStr)
cliDataStr, unit = cliout.split(" ")
cliData = float(cliDataStr)
client_history.append(cliData)
data_file.write("%s\t%f\t%s\t%s\n" % (size, tdf, serData, cliData))
client_mean = numpy.mean(client_history)
client_stdev = numpy.std(client_history)
data_file.write( "Avg Throughtput = %f\n" % client_mean )
data_file.write( "STD Throughput = %f\n" % client_stdev )
print "AVG = %f " % client_mean
print "STD = %f " % client_stdev
data_file.write('\n\n')
# CLI(net)
net.stop()
cleanup()
return client_mean, client_stdev
def runTest(file_name, controller, tdf, size, set_cpu, set_bw, set_delay="10us"):
lg.setLogLevel( 'info' )
if controller == "POX":
controller = partial( RemoteController, ip = '127.0.0.1', port=6633 )
else:
controller = DefaultController
link = partial( TCLink, bw=set_bw, delay=set_delay )
"""config host's cpu share and time dilation factor"""
host = custom(CPULimitedHost, inNamespace=True,
sched='cfs', period_us=100000, cpu=set_cpu)
"""with w option, it automatically overwrite everytime"""
data_file = open('%s.log' % file_name, 'w')
print "Results are written to %s.log file" % file_name
data_file.write("********* Running stringBandwidthTest *********\n")
data_file.flush()
# seems mininet cannot handle more than 640 switches
print "******* Running with %d switches, TDF = %d *******" % (size, tdf)
client_avg, client_stdev = stringBandwidthTest(host, controller, link,
size, tdf, data_file, set_bw)
cleanup()
return client_avg, client_stdev
def drawData(output, AvgRates, StdRates, BWs):
BWsInGb = [ str(x/1000) for x in BWs]
base_category = tuple(range(1, len(BWsInGb) + 1 ))
dataLables = ['Mininet, TDF=1', 'Mininet, TDF=4', 'Physical Testbed']
xLabel = 'Link Bandwidth (Gbps)'
yLabel = 'Average TCP Throughput (Gbps)'
color_list = ['c', 'r', 'm', 'y', 'g', 'b', 'k', 'w']
hatch_list = ['/', '\\', '+', 'x', 'o', '.', '*', '-']
width = 0.25
fontSize = 14
maxY = max(BWs) / 1000
rects = []
fig, ax = pyplot.subplots()
for index in range(0, len(AvgRates)):
category = [x + index * width for x in base_category]
rect = ax.bar(category, AvgRates[index], width, color=color_list[index], yerr=StdRates[index], hatch=hatch_list[index])
rects.append(rect)
ax.legend(tuple(rects), dataLables, shadow=True, fancybox=True, fontsize=fontSize, loc='upper left')
ax.set_xticks([x + width*3/2 for x in base_category ])
# ax.set_xticklabels(('4', '8', '10'))
ax.set_xticklabels(BWsInGb)
ax.set_yticks(range(maxY+1))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fontSize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fontSize)
pyplot.grid()
pyplot.ylim((0, maxY))
pyplot.xlim(0.65, len(base_category) + 1)
pyplot.xlabel(xLabel, fontsize=fontSize)
pyplot.ylabel(yLabel, fontsize=fontSize)
# pyplot.yticks([x for x in range(0, 1, 11)])
# pyplot.xticks([x for x in range(1, 4, 1)])
# pyplot.show()
pyplot.savefig(output, format='eps')
print "finished plotting"
def main():
AvgRates = []
StdRates = []
TDFs = [1, 4]
BWs = [100]
size = 10
for tdf in TDFs:
avg_rates = []
std_rates = []
for bw in BWs:
file_name = "PerfStringBW%dMTDF%d" %(bw, tdf)
avg, std = runTest(file_name, "NO", tdf, size, 0.5, bw)
# convert to Gbps
avg_rates.append(avg / 1024)
std_rates.append(std / 1024)
AvgRates.append(avg_rates)
StdRates.append(std_rates)
# trust me, I got them from physical testbed
testbed_avg_rates = [3.78, 7.42, 9.22]
testbed_std_rates = [0.06, 0.147, 0.239]
ideal_avg_rates = [x / 1000 for x in BWs]
ideal_std_rates = [x - x for x in BWs]
AvgRates.append(ideal_avg_rates)
StdRates.append(ideal_std_rates)
print AvgRates
print StdRates
# drawData('Perf%dSwDiffBw.eps' % size, AvgRates, StdRates, BWs)
if __name__ == '__main__':
main()
|
import speech_recognition as sr
import pyttsx3
from pyttsx3.drivers import sapi5
import sys
import time
import datetime
import os
import glob
voice_id = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_EN-US_ZIRA_11.0"
INITIAL_TALKING = True
INITIAL_USING_MIC = False
MUSIC_DIR = "C:/Users/geoff/OneDrive/Music"
class tracks_class(object):
def __init__(self, artist=None, album=None, folder=None, filename=None):
self.artist = artist
self.album = album
self.folder = folder
self.filename = filename
tracks = []
def add_track(artist, album, folder, filename):
tracks.append(tracks_class(artist, album, folder, filename))
def add_track_to_queue(track):
if track != "Blank.mp3":
print ("Adding track: " + track)
d = str(datetime.datetime.now())
fname = d + ".txt"
fname = fname.replace(":",".")
f = open(".\\Queue\\" + fname, "w")
f.write(track)
f.close()
time.sleep(0.001)
def clear_tracks_from_queue():
fileList = glob.glob(".\\Queue\\20*.txt")
for filePath in fileList:
os.remove(filePath)
def get_tracks(walk_dir):
tracks.clear()
for dirpath, dirs, files in os.walk(walk_dir):
for filename in files:
if filename.endswith(".mp3") or filename.endswith(".m4a") or filename.endswith(".wma"):
folder = dirpath[len(walk_dir)+1:]
slpos = folder.find("\\")
artist_album = folder[slpos+1:]
slpos = artist_album.find("\\")
artist = artist_album[0:slpos]
album = artist_album[slpos+1:]
add_track (artist, album, folder, filename)
print(dirpath)
def respond_to_command(w):
global talking
global using_mic
# print ("Responding to:", w)
# print (w[0:4])
if w[0:4] == "play":
key = w[5:]
if key != "":
say("Playing " + key)
search_tracks(key)
elif w == "reset":
clear_tracks_from_queue()
elif w == "talk to me":
talking = True
say ("OK")
elif w == "be quiet" or w == "shut up":
talking = False
elif w == "use mike":
using_mic = True
elif w == "mike off":
using_mic = False
elif w != "":
print ("Don't understand")
def get_command():
if using_mic:
recognizer = sr.Recognizer()
microphone = sr.Microphone()
w = ""
s = ""
response = get_command_from_mic(recognizer, microphone)
if response["transcription"]:
s = response["transcription"]
print("You said:",s)
# if response["error"]:
# print("{}".format(response["error"]))
else:
s = input("Enter command: ")
w = s.lower()
return (w)
def get_command_from_mic(recognizer, microphone):
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
print("Listening ...")
audio = recognizer.listen(source)
print("Got something")
response = {
"success": True,
"error": None,
"transcription": None
}
try:
response["transcription"] = recognizer.recognize_google(audio)
except sr.RequestError:
response["success"] = False
response["error"] = "API unavailable"
except sr.UnknownValueError:
response["error"] = "Didn't understand"
return response
def say(text):
if talking:
print("Saying", text)
engine = pyttsx3.init('sapi5')
engine.setProperty('voice', voice_id)
engine.say(text)
engine.runAndWait()
def search_tracks(w):
found = False
print ("Scanning library for:",w)
for i in range(0, len(tracks)):
artist = tracks[i].artist
album = tracks[i].album
folder = tracks[i].folder
filename = tracks[i].filename
if artist != None and album != None:
if artist.lower() == w or album.lower() == w:
found = True
add_track_to_queue(folder + "\\" + filename)
if not found:
print ("No match for " + w)
say ("No match for " + w)
get_tracks(MUSIC_DIR)
talking = INITIAL_TALKING
if len(sys.argv) > 1:
args = ""
for arg in range(1, len(sys.argv)):
args = args + " " + str(sys.argv[arg])
args = args.lstrip()
search_tracks(args)
sys.exit()
using_mic = INITIAL_USING_MIC
command = ""
while True:
command = get_command()
if command == "stop" or command == "quit":
say("Stopping")
add_track_to_queue("Blank.mp3")
break
respond_to_command(command)
|
<reponame>Shihab-Shahriar/scikit-clean
import warnings
import numpy as np
from sklearn import clone
from sklearn.base import BaseEstimator
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_predict
from sklearn.utils import check_random_state
from .base import BaseDetector
class PartitioningDetector(BaseDetector):
"""
Partitions dataset into n subsets, trains a classifier on each.
Trained models are then used to predict on entire dataset.
See :cite:`ipf07` for details.
Parameters
------------
classifier : object, default=None
A classifier instance supporting sklearn API.
If None, `DecisionTreeClassifier` is used.
n_partitions : int, default=5
No of non-overlapping partitions created from dataset.
For small datasets, you might want to use smaller values.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier=None, n_partitions=5, n_jobs=1, random_state=None):
super().__init__(n_jobs=n_jobs, random_state=random_state)
self.classifier = classifier
self.n_partitions = n_partitions
def detect(self, X, y):
X, y = self._check_everything(X, y)
classifier = clone(self.classifier) if self.classifier else \
DecisionTreeClassifier(max_depth=2, random_state=self.random_state)
breaks = [(len(X) // self.n_partitions) * i
for i in range(1, self.n_partitions)]
Xs, ys = np.split(X, breaks), np.split(y, breaks)
clfs = []
for i in range(self.n_partitions): # All clfs have same random_state but diff data
c = clone(classifier).fit(Xs[i], ys[i])
clfs.append(c)
preds = np.zeros((len(X), self.n_partitions))
for i in range(self.n_partitions):
preds[:, i] = clfs[i].predict(X)
eqs = preds == y.reshape(-1, 1)
return eqs.sum(axis=1) / self.n_partitions
class MCS(BaseDetector):
"""
Detects noise using a sequential Markov Chain Monte Carlo sampling algorithm.
Tested for binary classification, multi-class classification sometimes
perform poorly. See :cite:`mcmc19` for details.
Parameters
--------------
classifier : object, default=None
A classifier instance supporting sklearn API.
If None, `LogisticRegression` is used.
n_steps : int, default=20
No of sampling steps to run.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier=None, n_steps=20, n_jobs=1, random_state=None):
super().__init__(n_jobs=n_jobs, random_state=random_state)
self.classifier = classifier
self.n_steps = n_steps
def detect(self, X, y):
X, y = self._check_everything(X, y)
rns = check_random_state(self.random_state)
seeds = rns.randint(10 ** 8, size=self.n_steps)
classifier = clone(self.classifier) if self.classifier \
else LogisticRegression(random_state=self.random_state)
contain_random_state = 'random_state' in classifier.get_params()
mask = np.ones(y.shape, 'bool')
conf_score = np.zeros(y.shape)
for i in range(self.n_steps):
conf_score[mask] += 1
clf = clone(classifier)
if contain_random_state:
clf.set_params(random_state=seeds[i])
clf.fit(X[mask], y[mask])
probs = clf.predict_proba(X) # (N,n_estimators), p(k|x) for all k in classes
pc = probs[range(len(y)), y] # (N,), Prob assigned to correct class
mask = rns.binomial(1, pc).astype('bool')
if not np.all(np.unique(y[mask]) == self.classes_):
warnings.warn(f"One or more classes have been entirely left out "
f"in current iteration {i}, stopping MCMC loop.",
category=RuntimeWarning)
break
return conf_score / self.n_steps
# TODO: Allow both hard & soft voting
class InstanceHardness(BaseDetector):
"""
A set of classifiers are used to predict labels of each sample
using cross-validation. `conf_score` of a sample is percentage
classifiers that correctly predict it's label. See :cite:`ih14`
for details.
Parameters
--------------
classifiers : A single or list of classifier instances supporting sklearn API, default=None
If None, four classifiers are used: `GaussianNB`,
`DecisionTreeClassifier`, `KNeighborsClassifier` and `LogisticRegression`.
cv : int, cross-validation generator or an iterable, default=None
If None, uses 5-fold stratified k-fold
if int, no of folds to use in stratified k-fold
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
DEFAULT_CLFS = [DecisionTreeClassifier(max_leaf_nodes=500), GaussianNB(), KNeighborsClassifier(),
LogisticRegression(multi_class='auto', max_iter=4000, solver='lbfgs')]
def __init__(self, classifiers=None, cv=None, n_jobs=1, random_state=None):
super().__init__(n_jobs=n_jobs, random_state=random_state)
self.classifiers = classifiers
self.cv = cv
def detect(self, X, y):
X, y = self._check_everything(X, y)
if self.classifiers is None:
self.classifiers = InstanceHardness.DEFAULT_CLFS
if isinstance(self.classifiers, BaseEstimator):
self.classifiers = [self.classifiers]
cv = self.cv
if cv is None or type(cv) == int:
n_splits = self.cv or 5
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=self.random_state)
N = len(X)
conf_score = np.zeros_like(y, dtype='float64')
rns = check_random_state(self.random_state)
seeds = rns.randint(10 ** 8, size=len(self.classifiers))
for i, clf in enumerate(self.classifiers):
if 'random_state' in clf.get_params():
clf.set_params(random_state=seeds[i])
# probability given to original class of all samples
probs = cross_val_predict(clf, X, y, cv=cv, n_jobs=self.n_jobs,
method='predict_proba')[range(N), y]
conf_score += probs
return conf_score / len(self.classifiers)
class RandomForestDetector(BaseDetector):
"""
Uses a Random Forest classifer to detect mislabeled samples. In 'bootstrap'
method- for each sample, only trees that didn't select it for training
(via bootstrapping) are used to predict it's label. The 'cv' method uses a
K-fold cross-validation approach, where a fresh Random Forest is trained for
each fold, using remaining k-1 folds as training data. In both cases,
percentage of trees that correctly predicted the label of a sample is its
`conf_score`.
See :cite:`twostage18` for details.
Parameters
--------------
method : str, default='bootstrap'
n_estimators : int, default=101
No of trees in Random Forest.
sampling_ratio : float, 0.0 to 1.0, default=1.0
No of samples drawn at each tree equals: len(X) * sampling_ratio
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
# TODO: Allow other tree ensembles
def __init__(self, method='bootstrap', n_estimators=101, sampling_ratio=None,
cv=None, n_jobs=1, random_state=None):
super().__init__(n_jobs=n_jobs, random_state=random_state)
self.method = method
self.n_estimators = n_estimators
self.sampling_ratio = sampling_ratio
self.cv = cv
def detect(self, X, y):
X, y = self._validate_data(X, y)
rf = RandomForestClassifier(n_estimators=self.n_estimators, oob_score=True,
max_samples=self.sampling_ratio, n_jobs=self.n_jobs,
random_state=self.random_state).fit(X, y)
if self.method == 'bootstrap':
conf_score = rf.oob_decision_function_[range(len(X)), y]
return conf_score
if self.method != 'cv':
raise ValueError("Only 'cv' and 'bootstrap' methods are allowed.")
cv = self.cv
if cv is None or type(cv) == int:
n_splits = self.cv or 5
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=self.random_state)
conf_score = np.zeros_like(y, dtype='float64')
for train_idx, test_idx in cv.split(X, y):
clf = clone(rf).fit(X[train_idx], y[train_idx])
for tree in clf.estimators_:
yp = tree.predict(X[test_idx])
conf_score[test_idx] += (yp == y[test_idx])
return conf_score / rf.n_estimators
|
<reponame>maheshwarigagan/nlp-architect<filename>examples/cross_doc_coref/cross_doc_coref_sieves.py
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import logging
from nlp_architect import LIBRARY_ROOT
from nlp_architect.data.cdc_resources.relations.relation_types_enums import RelationType
from nlp_architect.models.cross_doc_coref.cdc_config import EventConfig, EntityConfig
from nlp_architect.models.cross_doc_coref.cdc_resource import CDCResources
from nlp_architect.models.cross_doc_coref.system.cdc_settings import CDCSettings
from nlp_architect.models.cross_doc_coref.system.sieves.sieves import SieveType
from nlp_architect.models.cross_doc_sieves import run_event_coref, run_entity_coref
def run_example():
event_config = EventConfig()
event_config.sieves_order = [
(SieveType.STRICT, RelationType.SAME_HEAD_LEMMA, 0.0),
(SieveType.VERY_RELAX, RelationType.WIKIPEDIA_DISAMBIGUATION, 0.1),
(SieveType.VERY_RELAX, RelationType.WORD_EMBEDDING_MATCH, 0.7),
(SieveType.RELAX, RelationType.SAME_HEAD_LEMMA_RELAX, 0.5),
]
event_config.gold_mentions_file = LIBRARY_ROOT + \
'/datasets/ecb/ecb_all_event_mentions.json'
entity_config = EntityConfig()
entity_config.sieves_order = [
(SieveType.STRICT, RelationType.SAME_HEAD_LEMMA, 0.0),
(SieveType.VERY_RELAX, RelationType.WIKIPEDIA_REDIRECT_LINK, 0.1),
(SieveType.VERY_RELAX, RelationType.WIKIPEDIA_DISAMBIGUATION, 0.1),
(SieveType.VERY_RELAX, RelationType.WORD_EMBEDDING_MATCH, 0.7),
(SieveType.VERY_RELAX, RelationType.REFERENT_DICT, 0.5)
]
entity_config.gold_mentions_file = LIBRARY_ROOT + \
'/datasets/ecb/ecb_all_entity_mentions.json'
# CDCResources hold default attribute values that might need to be change,
# (using the defaults values in this example), use to configure attributes
# such as resources files location, output directory, resources init methods and other.
# check in class and see if any attributes require change in your set-up
resource_location = CDCResources()
resources = CDCSettings(resource_location, event_config, entity_config)
event_clusters = None
if event_config.run_evaluation:
logger.info('Running event coreference resolution')
event_clusters = run_event_coref(resources)
entity_clusters = None
if entity_config.run_evaluation:
logger.info('Running entity coreference resolution')
entity_clusters = run_entity_coref(resources)
print('-=Cross Document Coref Results=-')
print('-=Event Clusters Mentions=-')
for event_cluster in event_clusters.clusters_list:
print(event_cluster.coref_chain)
for event_mention in event_cluster.mentions:
print(event_mention.mention_id)
print(event_mention.tokens_str)
print('-=Entity Clusters Mentions=-')
for entity_cluster in entity_clusters.clusters_list:
print(entity_cluster.coref_chain)
for entity_mention in entity_cluster.mentions:
print(entity_mention.mention_id)
print(entity_mention.tokens_str)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
run_example()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework.test import APITestCase
from rest_framework import status
import json
from .models import Build
from InvenTree.status_codes import BuildStatus
class BuildTestSimple(TestCase):
fixtures = [
'category',
'part',
'location',
'build',
]
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', '<EMAIL>', 'password')
self.user = User.objects.get(username='testuser')
g = Group.objects.create(name='builders')
self.user.groups.add(g)
for rule in g.rule_sets.all():
if rule.name == 'build':
rule.can_change = True
rule.can_add = True
rule.can_delete = True
rule.save()
g.save()
self.client.login(username='testuser', password='password')
def test_build_objects(self):
# Ensure the Build objects were correctly created
self.assertEqual(Build.objects.count(), 2)
b = Build.objects.get(pk=2)
self.assertEqual(b.batch, 'B2')
self.assertEqual(b.quantity, 21)
self.assertEqual(str(b), '21 x Orphan')
def test_url(self):
b1 = Build.objects.get(pk=1)
self.assertEqual(b1.get_absolute_url(), '/build/1/')
def test_is_complete(self):
b1 = Build.objects.get(pk=1)
b2 = Build.objects.get(pk=2)
self.assertEqual(b1.is_complete, False)
self.assertEqual(b2.is_complete, True)
self.assertEqual(b2.status, BuildStatus.COMPLETE)
def test_is_active(self):
b1 = Build.objects.get(pk=1)
b2 = Build.objects.get(pk=2)
self.assertEqual(b1.is_active, True)
self.assertEqual(b2.is_active, False)
def test_required_parts(self):
# TODO - Generate BOM for test part
pass
def test_cancel_build(self):
""" Test build cancellation function """
build = Build.objects.get(id=1)
self.assertEqual(build.status, BuildStatus.PENDING)
build.cancelBuild(self.user)
self.assertEqual(build.status, BuildStatus.CANCELLED)
class TestBuildAPI(APITestCase):
"""
Series of tests for the Build DRF API
- Tests for Build API
- Tests for BuildItem API
"""
fixtures = [
'category',
'part',
'location',
'build',
]
def setUp(self):
# Create a user for auth
User = get_user_model()
user = User.objects.create_user('testuser', '<EMAIL>', 'password')
g = Group.objects.create(name='builders')
user.groups.add(g)
for rule in g.rule_sets.all():
if rule.name == 'build':
rule.can_change = True
rule.can_add = True
rule.can_delete = True
rule.save()
g.save()
self.client.login(username='testuser', password='password')
def test_get_build_list(self):
""" Test that we can retrieve list of build objects """
url = reverse('api-build-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_build_item_list(self):
""" Test that we can retrieve list of BuildItem objects """
url = reverse('api-build-item-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Test again, filtering by park ID
response = self.client.get(url, {'part': '1'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
class TestBuildViews(TestCase):
""" Tests for Build app views """
fixtures = [
'category',
'part',
'location',
'build',
]
def setUp(self):
super().setUp()
# Create a user
User = get_user_model()
user = User.objects.create_user('username', '<EMAIL>', 'password')
g = Group.objects.create(name='builders')
user.groups.add(g)
for rule in g.rule_sets.all():
if rule.name == 'build':
rule.can_change = True
rule.can_add = True
rule.can_delete = True
rule.save()
g.save()
self.client.login(username='username', password='password')
def test_build_index(self):
""" test build index view """
response = self.client.get(reverse('build-index'))
self.assertEqual(response.status_code, 200)
def test_build_detail(self):
""" Test the detail view for a Build object """
pk = 1
response = self.client.get(reverse('build-detail', args=(pk,)))
self.assertEqual(response.status_code, 200)
build = Build.objects.get(pk=pk)
content = str(response.content)
self.assertIn(build.title, content)
def test_build_create(self):
""" Test the build creation view (ajax form) """
url = reverse('build-create')
# Create build without specifying part
response = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Create build with valid part
response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Create build with invalid part
response = self.client.get(url, {'part': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_build_allocate(self):
""" Test the part allocation view for a Build """
url = reverse('build-allocate', args=(1,))
# Get the page normally
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Get the page in editing mode
response = self.client.get(url, {'edit': 1})
self.assertEqual(response.status_code, 200)
def test_build_item_create(self):
""" Test the BuildItem creation view (ajax form) """
url = reverse('build-item-create')
# Try without a part specified
response = self.client.get(url, {'build': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Try with an invalid build ID
response = self.client.get(url, {'build': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Try with a valid part specified
response = self.client.get(url, {'build': 1, 'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Try with an invalid part specified
response = self.client.get(url, {'build': 1, 'part': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_build_item_edit(self):
""" Test the BuildItem edit view (ajax form) """
# TODO
# url = reverse('build-item-edit')
pass
def test_build_complete(self):
""" Test the build completion form """
url = reverse('build-complete', args=(1,))
# Test without confirmation
response = self.client.post(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertFalse(data['form_valid'])
# Test with confirmation, valid location
response = self.client.post(url, {'confirm': 1, 'location': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertTrue(data['form_valid'])
# Test with confirmation, invalid location
response = self.client.post(url, {'confirm': 1, 'location': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertFalse(data['form_valid'])
def test_build_cancel(self):
""" Test the build cancellation form """
url = reverse('build-cancel', args=(1,))
# Test without confirmation
response = self.client.post(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertFalse(data['form_valid'])
b = Build.objects.get(pk=1)
self.assertEqual(b.status, 10) # Build status is still PENDING
# Test with confirmation
response = self.client.post(url, {'confirm_cancel': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertTrue(data['form_valid'])
b = Build.objects.get(pk=1)
self.assertEqual(b.status, 30) # Build status is now CANCELLED
def test_build_unallocate(self):
""" Test the build unallocation view (ajax form) """
url = reverse('build-unallocate', args=(1,))
# Test without confirmation
response = self.client.post(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertFalse(data['form_valid'])
# Test with confirmation
response = self.client.post(url, {'confirm': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertTrue(data['form_valid'])
|
<reponame>Shmuma/Run-Skeleton-Run
import argparse
import os
import json
import copy
import torch
import torch.multiprocessing as mp
from multiprocessing import Value
from common.misc_util import boolean_flag, str2params, create_if_need
from common.env_wrappers import create_env
from common.torch_util import activations, hard_update
from ddpg.model import create_model, create_act_update_fns, train_multi_thread, \
train_single_thread, play_single_thread
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--difficulty', type=int, default=2)
parser.add_argument('--max-obstacles', type=int, default=3)
parser.add_argument('--logdir', type=str, default="./logs")
parser.add_argument('--num-threads', type=int, default=1)
parser.add_argument('--num-train-threads', type=int, default=1)
boolean_flag(parser, "ddpg-wrapper", default=False)
parser.add_argument('--skip-frames', type=int, default=1)
parser.add_argument('--fail-reward', type=float, default=0.0)
parser.add_argument('--reward-scale', type=float, default=1.)
boolean_flag(parser, "flip-state-action", default=False)
for agent in ["actor", "critic"]:
parser.add_argument('--{}-layers'.format(agent), type=str, default="64-64")
parser.add_argument('--{}-activation'.format(agent), type=str, default="relu")
boolean_flag(parser, "{}-layer-norm".format(agent), default=False)
boolean_flag(parser, "{}-parameters-noise".format(agent), default=False)
boolean_flag(parser, "{}-parameters-noise-factorised".format(agent), default=False)
parser.add_argument('--{}-lr'.format(agent), type=float, default=1e-3)
parser.add_argument('--{}-lr-end'.format(agent), type=float, default=5e-5)
parser.add_argument('--restore-{}-from'.format(agent), type=str, default=None)
parser.add_argument('--gamma', type=float, default=0.96)
parser.add_argument('--loss-type', type=str, default="quadric-linear")
parser.add_argument('--grad-clip', type=float, default=10.)
parser.add_argument('--tau', default=0.01, type=float)
parser.add_argument('--train-steps', type=int, default=int(1e4))
parser.add_argument('--batch-size', type=int, default=256) # per worker
parser.add_argument('--buffer-size', type=int, default=int(1e6))
boolean_flag(parser, "prioritized-replay", default=False)
parser.add_argument('--prioritized-replay-alpha', default=0.6, type=float)
parser.add_argument('--prioritized-replay-beta0', default=0.4, type=float)
parser.add_argument('--initial-epsilon', default=1., type=float)
parser.add_argument('--final-epsilon', default=0.01, type=float)
parser.add_argument('--max-episodes', default=int(1e4), type=int)
parser.add_argument('--max-update-steps', default=int(5e6), type=int)
parser.add_argument('--epsilon-cycle-len', default=int(2e2), type=int)
parser.add_argument('--max-train-days', default=int(1e1), type=int)
parser.add_argument('--rp-type', default="ornstein-uhlenbeck", type=str)
parser.add_argument('--rp-theta', default=0.15, type=float)
parser.add_argument('--rp-sigma', default=0.2, type=float)
parser.add_argument('--rp-sigma-min', default=0.15, type=float)
parser.add_argument('--rp-mu', default=0.0, type=float)
parser.add_argument('--clip-delta', type=int, default=10)
parser.add_argument('--save-step', type=int, default=int(1e4))
parser.add_argument('--restore-args-from', type=str, default=None)
return parser.parse_args()
def restore_args(args):
with open(args.restore_args_from, "r") as fin:
params = json.load(fin)
del params["seed"]
del params["difficulty"]
del params["max_obstacles"]
del params["logdir"]
del params["num_threads"]
del params["num_train_threads"]
del params["skip_frames"]
for agent in ["actor", "critic"]:
del params["{}_lr".format(agent)]
del params["{}_lr_end".format(agent)]
del params["restore_{}_from".format(agent)]
del params["grad_clip"]
del params["tau"]
del params["train_steps"]
del params["batch_size"]
del params["buffer_size"]
del params["prioritized_replay"]
del params["prioritized_replay_alpha"]
del params["prioritized_replay_beta0"]
del params["initial_epsilon"]
del params["final_epsilon"]
del params["max_episodes"]
del params["max_update_steps"]
del params["epsilon_cycle_len"]
del params["max_train_days"]
del params["rp_type"]
del params["rp_theta"]
del params["rp_sigma"]
del params["rp_sigma_min"]
del params["rp_mu"]
del params["clip_delta"]
del params["save_step"]
del params["restore_args_from"]
for key, value in params.items():
setattr(args, key, value)
return args
def train(args, model_fn, act_update_fns, multi_thread, train_single, play_single):
create_if_need(args.logdir)
if args.restore_args_from is not None:
args = restore_args(args)
with open("{}/args.json".format(args.logdir), "w") as fout:
json.dump(vars(args), fout, indent=4, ensure_ascii=False, sort_keys=True)
env = create_env(args)
if args.flip_state_action and hasattr(env, "state_transform"):
args.flip_states = env.state_transform.flip_states
args.batch_size = args.batch_size // 2
args.n_action = env.action_space.shape[0]
args.n_observation = env.observation_space.shape[0]
args.actor_layers = str2params(args.actor_layers)
args.critic_layers = str2params(args.critic_layers)
args.actor_activation = activations[args.actor_activation]
args.critic_activation = activations[args.critic_activation]
actor, critic = model_fn(args)
if args.restore_actor_from is not None:
actor.load_state_dict(torch.load(args.restore_actor_from))
if args.restore_critic_from is not None:
critic.load_state_dict(torch.load(args.restore_critic_from))
actor.train()
critic.train()
actor.share_memory()
critic.share_memory()
target_actor = copy.deepcopy(actor)
target_critic = copy.deepcopy(critic)
hard_update(target_actor, actor)
hard_update(target_critic, critic)
target_actor.train()
target_critic.train()
target_actor.share_memory()
target_critic.share_memory()
_, _, save_fn = act_update_fns(actor, critic, target_actor, target_critic, args)
processes = []
best_reward = Value("f", 0.0)
try:
if args.num_threads == args.num_train_threads:
for rank in range(args.num_threads):
args.thread = rank
p = mp.Process(
target=multi_thread,
args=(actor, critic, target_actor, target_critic, args, act_update_fns,
best_reward))
p.start()
processes.append(p)
else:
global_episode = Value("i", 0)
global_update_step = Value("i", 0)
episodes_queue = mp.Queue()
for rank in range(args.num_threads):
args.thread = rank
if rank < args.num_train_threads:
p = mp.Process(
target=train_single,
args=(actor, critic, target_actor, target_critic, args, act_update_fns,
global_episode, global_update_step, episodes_queue))
else:
p = mp.Process(
target=play_single,
args=(actor, critic, target_actor, target_critic, args, act_update_fns,
global_episode, global_update_step, episodes_queue,
best_reward))
p.start()
processes.append(p)
for p in processes:
p.join()
except KeyboardInterrupt:
pass
save_fn()
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
torch.set_num_threads(1)
args = parse_args()
train(args,
create_model,
create_act_update_fns,
train_multi_thread,
train_single_thread,
play_single_thread)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 20 11:27:20 2018
@author: DrLC
"""
import pandas
import math
import nltk
import re
import gzip, pickle
import matplotlib.pyplot as plt
def load_csv(path="test.csv"):
return pandas.read_csv(path, header=None)
def load_class(path="classes.txt"):
f = open(path, 'r')
lines = f.readlines()
f.close()
ret = [line.strip('\n') for line in lines]
return ret
def simple_clean_sentence(sent):
punctuation = ['.', ',', '!', '/', ':', ';',
'+', '-', '*', '?', '~', '|',
'[', ']', '{', '}', '(', ')',
'_', '=', '%', '&', '$', '#',
'"', '`', '^']
sent = sent.replace('\n', ' ').replace('\\n', ' ').replace('\\', ' ')
for p in punctuation:
sent = sent.replace(p, ' '+p+' ')
sent = re.sub(r'\d+\.?\d*', ' numbernumbernumbernumbernumber ', sent)
return sent.lower()
def extract_questions_and_labels(d, lookup_table):
ret = {"text":[], "label":[], "lookup_table":[]}
for idx in range(len(d[0])):
ret["label"].append(d[0][idx])
appd = ""
if type(d[2][idx]) is float and math.isnan(d[2][idx]):
appd = ""
else:
appd = simple_clean_sentence(d[2][idx])
ret["text"].append((simple_clean_sentence(d[1][idx])+' '+appd).lower())
ret["lookup_table"] = lookup_table
return ret
def word_tokenize(d, max_len=100):
for idx in range(len(d['text'])):
d['text'][idx] = nltk.word_tokenize(d['text'][idx])
ret = {"text":[], "label":[], "lookup_table":[]}
ret["lookup_table"] = d["lookup_table"]
for idx in range(len(d["text"])):
if len(d["text"][idx]) <= max_len:
ret["text"].append(d["text"][idx])
ret["label"].append(d["label"][idx])
return ret
def save_dataset(d, path="yahoo_answers_test.pkl.gz"):
f = gzip.open(path, "wb")
pickle.dump(d, f)
f.close()
def load_dataset(path='yahoo_answers_test.pkl.gz'):
f = gzip.open(path, "rb")
d = pickle.load(f)
f.close()
return d
def stat_word(d):
stat = {}
for s in d["text"]:
for w in s:
if w in stat.keys():
stat[w] += 1
else:
stat[w] = 1
stat = sorted(stat.items(), key=lambda d:d[1], reverse=True)
return stat
def stat_sentence_length(d):
stat = {}
for s in d["text"]:
l = len(s)
if l in stat.keys():
stat[l] += 1
else:
stat[l] = 1
stat = sorted(stat.items(), key=lambda d:d[0], reverse=True)
return stat
def stat_label(d):
stat = {}
for l in d["label"]:
if l in stat.keys():
stat[l] += 1
else:
stat[l] = 1
return stat
def generate_pkl_gz(max_len=100,
class_path="classes.txt",
test_src_path="test.csv",
train_src_path="train.csv",
test_tgt_path="yahoo_answers_test.pkl.gz",
train_tgt_path="yahoo_answers_train.pkl.gz"):
lt = load_class(path=class_path)
print ("Class lookup table loaded!")
d_tr_ = load_csv(path=train_src_path)
print ("All training data loaded!")
d_tr = extract_questions_and_labels(d_tr_, lt)
print ("Training data extracted!")
d_tr = word_tokenize(d_tr, max_len)
print ("Training data word token generated!")
save_dataset(d_tr, path=train_tgt_path)
print ("Training data saved!")
d_te_ = load_csv(path=test_src_path)
print ("All test data loaded!")
d_te = extract_questions_and_labels(d_te_, lt)
print ("Test data extracted!")
d_te = word_tokenize(d_te, max_len)
print ("Test data word token generated!")
save_dataset(d_te, path=test_tgt_path)
print ("Test data saved!")
return d_tr, d_te
def generate_stat_word(tr=None, te=None, d=None,
train_path="yahoo_answers_train.pkl.gz",
test_path="yahoo_answers_test.pkl.gz",
dict_path="yahoo_answers_dict.pkl.gz"):
if (tr is None or te is None) and d is None:
d_te = load_dataset(path="yahoo_answers_test.pkl.gz")
print ("Test set loaded!")
d_tr = load_dataset(path="yahoo_answers_train.pkl.gz")
print ("Train set loaded!")
if d is None:
d = {"text":[], "label":[], "lookup_table":[]}
d["lookup_table"] = d_tr["lookup_table"]
d["text"] = d_tr["text"] + d_te["text"]
d["label"] = d_tr["label"] + d_te["label"]
s_word = stat_word(d)
f = open("word_stat.txt", "w", encoding="UTF-8")
for inst in s_word:
f.write(str(inst[1])+"\t"+inst[0]+"\n")
f.close()
f = gzip.open(dict_path, "wb")
pickle.dump(s_word, f)
f.close()
return s_word, d
def generate_stat_sentence_length(tr=None, te=None, d=None,
train_path="yahoo_answers_train.pkl.gz",
test_path="yahoo_answers_test.pkl.gz"):
if (tr is None or te is None) and d is None:
d_te = load_dataset(path="yahoo_answers_test.pkl.gz")
print ("Test set loaded!")
d_tr = load_dataset(path="yahoo_answers_train.pkl.gz")
print ("Train set loaded!")
if d is None:
d = {"text":[], "label":[], "lookup_table":[]}
d["lookup_table"] = d_tr["lookup_table"]
d["text"] = d_tr["text"] + d_te["text"]
d["label"] = d_tr["label"] + d_te["label"]
s_senlen = stat_sentence_length(d)
count = [i[1] for i in s_senlen]
length = [i[0] for i in s_senlen]
plt.plot(length, count, 'ro')
plt.savefig("len_distribution.png")
plt.show()
return s_senlen, d
def generate_stat_label(tr=None, te=None, d=None,
train_path="yahoo_answers_train.pkl.gz",
test_path="yahoo_answers_test.pkl.gz"):
if (tr is None or te is None) and d is None:
d_te = load_dataset(path="yahoo_answers_test.pkl.gz")
print ("Test set loaded!")
d_tr = load_dataset(path="yahoo_answers_train.pkl.gz")
print ("Train set loaded!")
if d is None:
d = {"text":[], "label":[], "lookup_table":[]}
d["lookup_table"] = d_tr["lookup_table"]
d["text"] = d_tr["text"] + d_te["text"]
d["label"] = d_tr["label"] + d_te["label"]
s_label = stat_label(d)
s_label = s_label.items()
count = [i[1] for i in s_label]
length = [i[0] for i in s_label]
plt.plot(length, count, 'ro')
plt.savefig("label_distribution.png")
plt.show()
return s_label, d
if __name__ == "__main__":
#d_tr, d_te = generate_pkl_gz(max_len=100)
#s_word, d = generate_stat_word()
s_senlen, d = generate_stat_sentence_length()
s_label, d = generate_stat_label(d=d) |
<filename>tests/unittest_typesafe.py
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
import unittest
from pytraits import type_safe
class TestTypeSafe(unittest.TestCase):
def test_shows_unassigned_arguments_error_for_omitted_arguments(self):
# We need to make sure that when user misses argument from the
# function call, we show proper error message.
@type_safe
def checked(existing, missing):
pass
with self.assertRaisesRegex(TypeError, ".*missing 1 required.*"):
checked(True)
def test_shows_unassigned_arguments_error_for_ommitted_arguments_with_type(self):
# Even if argument has any arguments with annotated type, we still
# need to give proper error message, when that argument has been
# omitted.
@type_safe
def checked(existing, missing: int):
pass
with self.assertRaisesRegex(TypeError, ".*missing 1 required.*"):
checked(True)
def test_uses_default_value_for_omitted_arguments(self):
# Missing arguments with default values should be properly used when
# arguments are omitted.
@type_safe
def checked(existing, missing_with_default=42):
return missing_with_default
self.assertEqual(checked(True), 42)
def test_uses_default_value_for_omitted_arguments_with_type(self):
# Missing arguments with default values should be properly used when
# arguments are omitted even when there are annotated arguments.
@type_safe
def checked(existing, missing_with_default: int=42):
return missing_with_default
self.assertEqual(checked(True), 42)
def test_ignores_default_value_when_argument_given_with_type(self):
# Missing arguments with default values should be properly used when
# arguments are omitted even when there are annotated arguments.
@type_safe
def checked(existing, missing_with_default: int=42):
return missing_with_default
self.assertEqual(checked(True, 52), 52)
def test_handles_properly_tuple_arguments(self):
@type_safe
def checked(existing, *remainder):
return existing
self.assertEqual(checked(True), True)
def test_handles_properly_tuple_arguments_with_type(self):
@type_safe
def checked(existing: bool, *remainder):
return existing
self.assertEqual(checked(True), True)
def test_handles_properly_tuple_arguments_with_type(self):
@type_safe
def checked(existing: bool, *remainder):
return existing
with self.assertRaisesRegex(TypeError, "While calling.*"):
checked(2, "tuple", "args")
def test_shows_proper_error_when_too_many_args_given(self):
@type_safe
def checked(existing):
return missing_with_default
with self.assertRaisesRegex(TypeError, ".*takes 1 positional.*"):
self.assertEqual(checked(True, 52), 52)
def test_shows_proper_error_when_too_many_args_given_with_type(self):
@type_safe
def checked(existing: bool):
return missing_with_default
with self.assertRaisesRegex(TypeError, ".*takes 1 positional.*"):
self.assertEqual(checked(True, 52), 52)
def test_shows_proper_error_when_too_many_args_given_with_default(self):
@type_safe
def checked(existing=False):
return missing_with_default
with self.assertRaisesRegex(TypeError, ".*takes from 0 to 1 positional.*"):
self.assertEqual(checked(True, 52), 52)
def test_shows_proper_error_when_too_many_args_given_with_type_and_default(self):
@type_safe
def checked(existing: bool=False):
return missing_with_default
with self.assertRaisesRegex(TypeError, ".*takes from 0 to 1 positional.*"):
self.assertEqual(checked(True, 52), 52)
if __name__ == '__main__':
unittest.main()
|
import asyncio
import time
import pytest
from falcon import testing
from falcon.asgi import App
import falcon.util
def test_sync_helpers():
safely_values = []
unsafely_values = []
shirley_values = []
class SomeResource:
async def on_get(self, req, resp):
safely_coroutine_objects = []
unsafely_coroutine_objects = []
shirley_coroutine_objects = []
def callme_safely(a, b, c=None):
# NOTE(kgriffs): Sleep to prove that there isn't another
# instance running in parallel that is able to race ahead.
time.sleep(0.001)
safely_values.append((a, b, c))
def callme_unsafely(a, b, c=None):
time.sleep(0.01)
# NOTE(vytas): Deliberately exaggerate a race condition here
# in order to ensure a more deterministic test outcome.
if a == 137:
for _ in range(1000):
if len(unsafely_values) > 137:
break
time.sleep(0.01)
unsafely_values.append((a, b, c))
def callme_shirley(a=42, b=None):
time.sleep(0.01)
v = (a, b)
shirley_values.append(v)
# NOTE(kgriffs): Test that returning values works as expected
return v
# NOTE(kgriffs): Test setting threadsafe=True explicitly
cmus = falcon.util.wrap_sync_to_async(callme_unsafely, threadsafe=True)
cms = falcon.util.wrap_sync_to_async(callme_safely, threadsafe=False)
loop = falcon.util.get_running_loop()
# NOTE(kgriffs): create_task() is used here, so that the coroutines
# are scheduled immediately in the order created; under Python
# 3.6, asyncio.gather() does not seem to always schedule
# them in order, so we do it this way to make it predictable.
for i in range(1000):
safely_coroutine_objects.append(
loop.create_task(cms(i, i + 1, c=i + 2))
)
unsafely_coroutine_objects.append(
loop.create_task(cmus(i, i + 1, c=i + 2))
)
shirley_coroutine_objects.append(
loop.create_task(falcon.util.sync_to_async(callme_shirley, 24, b=i))
)
await asyncio.gather(
*(
safely_coroutine_objects
+ unsafely_coroutine_objects
+ shirley_coroutine_objects
)
)
assert (42, None) == await falcon.util.sync_to_async(callme_shirley)
assert (1, 2) == await falcon.util.sync_to_async(callme_shirley, 1, 2)
assert (3, 4) == await falcon.util.sync_to_async(callme_shirley, 3, b=4)
assert (5, None) == await falcon.util.wrap_sync_to_async(callme_shirley)(5)
assert (42, 6) == await falcon.util.wrap_sync_to_async(
callme_shirley, threadsafe=True
)(b=6)
with pytest.raises(TypeError):
await falcon.util.sync_to_async(callme_shirley, -1, bogus=-1)
resource = SomeResource()
app = App()
app.add_route('/', resource)
client = testing.TestClient(app)
result = client.simulate_get()
assert result.status_code == 200
assert len(safely_values) == 1000
for i, val in enumerate(safely_values):
assert val == (i, i + 1, i + 2)
assert len(unsafely_values) == 1000
assert any(val != (i, i + 1, i + 2) for i, val in enumerate(unsafely_values))
for i, val in enumerate(shirley_values):
assert val[0] in {24, 42, 1, 5, 3}
assert val[1] is None or (0 <= val[1] < 1000)
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'viewerWindow.ui'
##
## Created by: Qt User Interface Compiler version 6.2.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,
QMetaObject, QObject, QPoint, QRect,
QSize, QTime, QUrl, Qt)
from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor,
QFont, QFontDatabase, QGradient, QIcon,
QImage, QKeySequence, QLinearGradient, QPainter,
QPalette, QPixmap, QRadialGradient, QTransform)
from PySide6.QtWidgets import (QAbstractItemView, QApplication, QCheckBox, QFormLayout,
QGroupBox, QHBoxLayout, QLabel, QLineEdit,
QListWidget, QListWidgetItem, QMainWindow, QMenuBar,
QPushButton, QRadioButton, QSizePolicy, QSlider,
QStatusBar, QWidget)
from pyqtgraph import (ImageView, PlotWidget)
class Ui_ViewerWindow(object):
def setupUi(self, ViewerWindow):
if not ViewerWindow.objectName():
ViewerWindow.setObjectName(u"ViewerWindow")
ViewerWindow.resize(1577, 736)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ViewerWindow.sizePolicy().hasHeightForWidth())
ViewerWindow.setSizePolicy(sizePolicy)
self.centralwidget = QWidget(ViewerWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.removeButton = QPushButton(self.centralwidget)
self.removeButton.setObjectName(u"removeButton")
self.removeButton.setGeometry(QRect(1090, 450, 89, 25))
self.showButton = QPushButton(self.centralwidget)
self.showButton.setObjectName(u"showButton")
self.showButton.setGeometry(QRect(990, 450, 89, 25))
self.undoButton = QPushButton(self.centralwidget)
self.undoButton.setObjectName(u"undoButton")
self.undoButton.setGeometry(QRect(990, 490, 89, 25))
self.resetButton = QPushButton(self.centralwidget)
self.resetButton.setObjectName(u"resetButton")
self.resetButton.setGeometry(QRect(1090, 490, 89, 25))
self.imagePlot = ImageView(self.centralwidget)
self.imagePlot.setObjectName(u"imagePlot")
self.imagePlot.setGeometry(QRect(20, 110, 541, 561))
self.filterParametersBox = QGroupBox(self.centralwidget)
self.filterParametersBox.setObjectName(u"filterParametersBox")
self.filterParametersBox.setGeometry(QRect(610, 80, 341, 261))
self.formLayoutWidget = QWidget(self.filterParametersBox)
self.formLayoutWidget.setObjectName(u"formLayoutWidget")
self.formLayoutWidget.setGeometry(QRect(30, 30, 261, 201))
self.formLayout = QFormLayout(self.formLayoutWidget)
self.formLayout.setObjectName(u"formLayout")
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.startingFrameLabel = QLabel(self.formLayoutWidget)
self.startingFrameLabel.setObjectName(u"startingFrameLabel")
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.startingFrameLabel)
self.areaLabel = QLabel(self.formLayoutWidget)
self.areaLabel.setObjectName(u"areaLabel")
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.areaLabel)
self.lengthLabel = QLabel(self.formLayoutWidget)
self.lengthLabel.setObjectName(u"lengthLabel")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.lengthLabel)
self.areaSlider = QSlider(self.formLayoutWidget)
self.areaSlider.setObjectName(u"areaSlider")
self.areaSlider.setOrientation(Qt.Horizontal)
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.areaSlider)
self.fractionSlider = QSlider(self.formLayoutWidget)
self.fractionSlider.setObjectName(u"fractionSlider")
self.fractionSlider.setOrientation(Qt.Horizontal)
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.fractionSlider)
self.cellObjectsLabel = QLabel(self.formLayoutWidget)
self.cellObjectsLabel.setObjectName(u"cellObjectsLabel")
self.formLayout.setWidget(3, QFormLayout.LabelRole, self.cellObjectsLabel)
self.cellObjectsSlider = QSlider(self.formLayoutWidget)
self.cellObjectsSlider.setObjectName(u"cellObjectsSlider")
self.cellObjectsSlider.setOrientation(Qt.Horizontal)
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.cellObjectsSlider)
self.frameSlider = QSlider(self.formLayoutWidget)
self.frameSlider.setObjectName(u"frameSlider")
self.frameSlider.setOrientation(Qt.Horizontal)
self.formLayout.setWidget(0, QFormLayout.FieldRole, self.frameSlider)
self.nextAutoButton = QPushButton(self.centralwidget)
self.nextAutoButton.setObjectName(u"nextAutoButton")
self.nextAutoButton.setGeometry(QRect(1090, 530, 89, 25))
self.sendTweezePositionsButton = QPushButton(self.centralwidget)
self.sendTweezePositionsButton.setObjectName(u"sendTweezePositionsButton")
self.sendTweezePositionsButton.setGeometry(QRect(980, 630, 191, 25))
self.propertiesView = PlotWidget(self.centralwidget)
self.propertiesView.setObjectName(u"propertiesView")
self.propertiesView.setGeometry(QRect(610, 410, 321, 241))
self.horizontalLayoutWidget = QWidget(self.centralwidget)
self.horizontalLayoutWidget.setObjectName(u"horizontalLayoutWidget")
self.horizontalLayoutWidget.setGeometry(QRect(30, 10, 401, 41))
self.horizontalLayout = QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.positionLabel = QLabel(self.horizontalLayoutWidget)
self.positionLabel.setObjectName(u"positionLabel")
self.horizontalLayout.addWidget(self.positionLabel)
self.positionNoLine = QLineEdit(self.horizontalLayoutWidget)
self.positionNoLine.setObjectName(u"positionNoLine")
self.horizontalLayout.addWidget(self.positionNoLine)
self.channelNoLabel = QLabel(self.horizontalLayoutWidget)
self.channelNoLabel.setObjectName(u"channelNoLabel")
self.horizontalLayout.addWidget(self.channelNoLabel)
self.channelNoLine = QLineEdit(self.horizontalLayoutWidget)
self.channelNoLine.setObjectName(u"channelNoLine")
self.horizontalLayout.addWidget(self.channelNoLine)
self.fetchButton = QPushButton(self.horizontalLayoutWidget)
self.fetchButton.setObjectName(u"fetchButton")
self.horizontalLayout.addWidget(self.fetchButton)
self.horizontalLayoutWidget_2 = QWidget(self.centralwidget)
self.horizontalLayoutWidget_2.setObjectName(u"horizontalLayoutWidget_2")
self.horizontalLayoutWidget_2.setGeometry(QRect(30, 60, 491, 41))
self.horizontalLayout_2 = QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.phaseImage = QRadioButton(self.horizontalLayoutWidget_2)
self.phaseImage.setObjectName(u"phaseImage")
self.phaseImage.setChecked(True)
self.horizontalLayout_2.addWidget(self.phaseImage)
self.cellSegImage = QRadioButton(self.horizontalLayoutWidget_2)
self.cellSegImage.setObjectName(u"cellSegImage")
self.horizontalLayout_2.addWidget(self.cellSegImage)
self.horizontalLayoutWidget_3 = QWidget(self.centralwidget)
self.horizontalLayoutWidget_3.setObjectName(u"horizontalLayoutWidget_3")
self.horizontalLayoutWidget_3.setGeometry(QRect(570, 340, 391, 61))
self.horizontalLayout_3 = QHBoxLayout(self.horizontalLayoutWidget_3)
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.pushButton = QPushButton(self.horizontalLayoutWidget_3)
self.pushButton.setObjectName(u"pushButton")
self.horizontalLayout_3.addWidget(self.pushButton)
self.findLocationsButton = QPushButton(self.horizontalLayoutWidget_3)
self.findLocationsButton.setObjectName(u"findLocationsButton")
self.horizontalLayout_3.addWidget(self.findLocationsButton)
self.isExptRunning = QCheckBox(self.centralwidget)
self.isExptRunning.setObjectName(u"isExptRunning")
self.isExptRunning.setGeometry(QRect(460, 20, 131, 23))
self.activePositionsList = QListWidget(self.centralwidget)
self.activePositionsList.setObjectName(u"activePositionsList")
self.activePositionsList.setGeometry(QRect(970, 10, 256, 421))
self.activePositionsList.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.tweezePositionsList = QListWidget(self.centralwidget)
self.tweezePositionsList.setObjectName(u"tweezePositionsList")
self.tweezePositionsList.setGeometry(QRect(1280, 10, 256, 421))
self.tweezePositionsList.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.toTweezeListButton = QPushButton(self.centralwidget)
self.toTweezeListButton.setObjectName(u"toTweezeListButton")
self.toTweezeListButton.setGeometry(QRect(1230, 210, 41, 21))
self.toActiveListButton = QPushButton(self.centralwidget)
self.toActiveListButton.setObjectName(u"toActiveListButton")
self.toActiveListButton.setGeometry(QRect(1230, 270, 41, 21))
self.viewActiveListCheck = QCheckBox(self.centralwidget)
self.viewActiveListCheck.setObjectName(u"viewActiveListCheck")
self.viewActiveListCheck.setGeometry(QRect(1000, 580, 92, 23))
self.viewActiveListCheck.setChecked(True)
self.horizontalLayoutWidget_4 = QWidget(self.centralwidget)
self.horizontalLayoutWidget_4.setObjectName(u"horizontalLayoutWidget_4")
self.horizontalLayoutWidget_4.setGeometry(QRect(620, 10, 230, 41))
self.horizontalLayout_4 = QHBoxLayout(self.horizontalLayoutWidget_4)
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.getOnly20Radio = QRadioButton(self.horizontalLayoutWidget_4)
self.getOnly20Radio.setObjectName(u"getOnly20Radio")
self.getOnly20Radio.setChecked(True)
self.horizontalLayout_4.addWidget(self.getOnly20Radio)
self.getAllImagesRadio = QRadioButton(self.horizontalLayoutWidget_4)
self.getAllImagesRadio.setObjectName(u"getAllImagesRadio")
self.horizontalLayout_4.addWidget(self.getAllImagesRadio)
self.plotPropertiesCheck = QCheckBox(self.centralwidget)
self.plotPropertiesCheck.setObjectName(u"plotPropertiesCheck")
self.plotPropertiesCheck.setGeometry(QRect(1090, 580, 121, 23))
ViewerWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(ViewerWindow)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 1577, 22))
ViewerWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(ViewerWindow)
self.statusbar.setObjectName(u"statusbar")
ViewerWindow.setStatusBar(self.statusbar)
self.retranslateUi(ViewerWindow)
QMetaObject.connectSlotsByName(ViewerWindow)
# setupUi
def retranslateUi(self, ViewerWindow):
self.removeButton.setText(QCoreApplication.translate("ViewerWindow", u"Remove", None))
self.showButton.setText(QCoreApplication.translate("ViewerWindow", u"Show", None))
self.undoButton.setText(QCoreApplication.translate("ViewerWindow", u"Undo", None))
self.resetButton.setText(QCoreApplication.translate("ViewerWindow", u"Reset", None))
self.filterParametersBox.setTitle(QCoreApplication.translate("ViewerWindow", u"Filter Parameters", None))
self.startingFrameLabel.setText(QCoreApplication.translate("ViewerWindow", u"Starting Frame No", None))
self.areaLabel.setText(QCoreApplication.translate("ViewerWindow", u"Area Threshold", None))
self.lengthLabel.setText(QCoreApplication.translate("ViewerWindow", u"Fraction", None))
self.cellObjectsLabel.setText(QCoreApplication.translate("ViewerWindow", u"No of Cell like objects", None))
self.nextAutoButton.setText(QCoreApplication.translate("ViewerWindow", u"Next Auto", None))
self.sendTweezePositionsButton.setText(QCoreApplication.translate("ViewerWindow", u"Send Tweeze Positions", None))
self.positionLabel.setText(QCoreApplication.translate("ViewerWindow", u"Position", None))
self.channelNoLabel.setText(QCoreApplication.translate("ViewerWindow", u"Channel No", None))
self.fetchButton.setText(QCoreApplication.translate("ViewerWindow", u"Fetch", None))
self.phaseImage.setText(QCoreApplication.translate("ViewerWindow", u"Phase", None))
self.cellSegImage.setText(QCoreApplication.translate("ViewerWindow", u"Cell Seg", None))
self.pushButton.setText(QCoreApplication.translate("ViewerWindow", u"Update Filter Parameters", None))
self.findLocationsButton.setText(QCoreApplication.translate("ViewerWindow", u"Find All Tweezable Channels", None))
self.isExptRunning.setText(QCoreApplication.translate("ViewerWindow", u"Is Expt running?", None))
self.toTweezeListButton.setText(QCoreApplication.translate("ViewerWindow", u">>", None))
self.toActiveListButton.setText(QCoreApplication.translate("ViewerWindow", u"<<", None))
self.viewActiveListCheck.setText(QCoreApplication.translate("ViewerWindow", u"Active? ", None))
self.getOnly20Radio.setText(QCoreApplication.translate("ViewerWindow", u"Last 20 images", None))
self.getAllImagesRadio.setText(QCoreApplication.translate("ViewerWindow", u"All Images", None))
self.plotPropertiesCheck.setText(QCoreApplication.translate("ViewerWindow", u"Plot Properties", None))
pass
# retranslateUi
|
import argparse
from pathlib import Path
import torch
import torch.nn as nn
import torch.utils.data as data
from PIL import Image, ImageFile
from tensorboardX import SummaryWriter
from torchvision import transforms
from tqdm import tqdm
from torchvision.utils import save_image
import re, os
import math
import vgg
import net
from function import init_weights
from net import calc_losses
from sampler import InfiniteSamplerWrapper
from functools import partial
from collections import OrderedDict
import numpy as np
torch.autograd.set_detect_anomaly(True)
Image.MAX_IMAGE_PIXELS = None # Disable DecompressionBombError
# Disable OSError: image file is truncated
ImageFile.LOAD_TRUNCATED_IMAGES = True
def train_transform(load_size, crop_size):
transform_list = [
transforms.Resize(size=(load_size, load_size)),
transforms.RandomCrop(crop_size),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
class FlatFolderDataset(data.Dataset):
def __init__(self, root, transform):
super(FlatFolderDataset, self).__init__()
if os.path.isdir(root):
self.root = root
self.paths = list(Path(self.root).glob('*'))
else:
self.paths = [root]
self.transform = transform
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(str(path)).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.paths)
def name(self):
return 'FlatFolderDataset'
def set_requires_grad(nets, requires_grad=False):
for param in nets.parameters():
param.trainable = requires_grad
def adjust_learning_rate(optimizer, iteration_count,args):
"""Imitating the original implementation"""
lr = args.lr / (1.0 + args.lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument('--train_model', type=str, default='drafting')
parser.add_argument('--content_dir', type=str, required=True,
help='Directory path to a batch of content images')
parser.add_argument('--style_dir', type=str, required=True,
help='Directory path to a batch of style images')
parser.add_argument('--vgg', type=str, default='models/vgg_normalised.pth')
parser.add_argument('--load_size', type=int, default=128)
parser.add_argument('--crop_size', type=int, default=128)
# training options
parser.add_argument('--save_dir', default='./experiments',
help='Directory to save the model')
parser.add_argument('--log_dir', default='./logs',
help='Directory to save the log')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--lr_decay', type=float, default=5e-5)
parser.add_argument('--max_iter', type=int, default=160000)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--style_weight', type=float, default=10.0)
parser.add_argument('--content_weight', type=float, default=1.0)
parser.add_argument('--n_threads', type=int, default=16)
parser.add_argument('--save_model_interval', type=int, default=10000)
args = parser.parse_args()
device = torch.device('cuda')
save_dir = Path(args.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(args.log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
vgg = vgg.vgg
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children()))
if args.train_model=='drafting':
enc_ = net.Encoder(vgg)
set_requires_grad(enc_, False)
dec_ = net.Decoder()
init_weights(dec_)
dec_.train()
enc_.to(device)
dec_.to(device)
content_tf = train_transform(args.load_size, args.crop_size)
style_tf = train_transform(args.load_size, args.crop_size)
content_dataset = FlatFolderDataset(args.content_dir, content_tf)
style_dataset = FlatFolderDataset(args.style_dir, style_tf)
content_iter = iter(data.DataLoader(
content_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(content_dataset),
num_workers=args.n_threads))
style_iter = iter(data.DataLoader(
style_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(style_dataset),
num_workers=args.n_threads))
optimizer = torch.optim.Adam(dec_.parameters(), lr=args.lr)
for i in tqdm(range(args.max_iter)):
adjust_learning_rate(optimizer, i,args)
ci = next(content_iter).to(device)
si = next(style_iter).to(device)
cF = enc_(ci, detach_all=True)
sF = enc_(si, detach_all=True)
stylized = dec_(sF, cF)
optimizer.zero_grad()
losses = calc_losses(stylized, ci, si, cF, sF, enc_, dec_, calc_identity=True)
loss_c, loss_s, style_remd, content_emd, l_identity1, l_identity2, l_identity3, l_identity4, mdog = losses
loss = loss_c * args.content_weight + loss_s * args.style_weight +\
l_identity1 * 50 + l_identity2 * 1 +\
content_emd * 16 + 10*style_remd + mdog
loss.backward()
optimizer.step()
if (i + 1) % 10 == 0:
print(loss.item())
print(f'c: {loss_c.item():.3f} s: {loss_s.item():.3f} \
style_remd: {style_remd.item():.3f} content_relt: {content_emd.item():.3f} \
id1: {l_identity1.item():.3f} id2: {l_identity2.item():.3f} id3: {l_identity3.item():.3f} id4: {l_identity4.item():.3f} \
mdog: {mdog.item():.3f}')
writer.add_scalar('loss_content', loss_c.item(), i + 1)
writer.add_scalar('loss_style', loss_s.item(), i + 1)
if (i + 1) % 100 == 0:
stylized = stylized.to('cpu')
for j in range(1):
save_image(stylized[j], args.save_dir+'/drafting_training_'+str(j)+'_iter'+str(i+1)+'.jpg')
if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter:
print(loss)
state_dict = dec_.state_dict()
torch.save(state_dict, save_dir /
'decoder_iter_{:d}.pth.tar'.format(i + 1))
writer.close()
|
<filename>example/test_unit.py<gh_stars>10-100
"""TDD-like unit test.
This file is made available under the Creative Commons
CC0 1.0 Universal Public Domain Dedication.
The person who associated a work with this deed has dedicated the work to the
public domain by waiving all of his or her rights to the work worldwide under
copyright law, including all related and neighboring rights, to the extent
allowed by law. You can copy, modify, distribute and perform the work, even for
commercial purposes, all without asking permission.
"""
import unittest
from calculator import Calculator, CalculatorNotPoweredError
class CalculatorPushTestCase(unittest.TestCase):
"""Test :py:meth:`Calculator.push`."""
def setUp(self):
self.calculator = Calculator()
def test_should_add_number_to_stack_if_powered(self):
"""Scenario: add number to stack."""
# Arrange
self.calculator.on()
number = 50
# Act
self.calculator.push(number)
# Assert
self.assertEqual(self.calculator._stack, [number])
def test_should_raise_exception_if_not_powered(self):
"""Scenario: not powered."""
# Act & Assert
self.assertRaises(CalculatorNotPoweredError, self.calculator.push, 50)
def test_should_add_two_numbers_to_stack(self):
"""Scenario: add two numbers to stack."""
# Arrange
self.calculator.on()
number1 = 50
number2 = 70
# Act
self.calculator.push(number1)
self.calculator.push(number2)
# Assert
self.assertEqual(self.calculator._stack, [number1, number2])
class CalculatorAddTestCase(unittest.TestCase):
"""Test :py:meth:`Calculator.add`."""
def setUp(self):
self.calculator = Calculator()
def test_should_add_all_numbers_in_stack_if_powered(self):
"""Scenario: add all numbers."""
# Arrange
self.calculator.on()
self.calculator.push(50)
self.calculator.push(70)
# Act
self.calculator.add()
# Assert
self.assertEqual(self.calculator.get_result(), 120)
def test_should_raise_exception_if_not_powered(self):
"""Scenario: not powered."""
# Act & Assert
self.assertRaises(CalculatorNotPoweredError, self.calculator.add)
def test_should_return_0_if_empty_stack(self):
"""Scenario: empty stack."""
# Arrange
self.calculator.on()
# Act
self.calculator.add()
# Assert
self.assertEqual(self.calculator.get_result(), 0)
class CalculatorGetResultTestCase(unittest.TestCase):
"""Test :py:meth:`Calculator.get_result`."""
def setUp(self):
self.calculator = Calculator()
def test_should_return_result(self):
"""Scenario: addition result present."""
# Arrange
self.calculator.on()
self.calculator.push(50)
self.calculator.push(70)
self.calculator.add()
# Act
result = self.calculator.get_result()
# Assert
self.assertEqual(result, 120)
def test_should_return_last_entered_value_if_no_operation_run(self):
"""Scenario: last result."""
# Arrange
self.calculator.on()
self.calculator.push(50)
self.calculator.push(70)
# Act
result = self.calculator.get_result()
# Assert
self.assertEqual(result, 70)
def test_should_raise_exception_if_not_powered(self):
"""Scenario: not powered."""
# Act & Assert
self.assertRaises(CalculatorNotPoweredError, self.calculator.get_result)
class CalculatorOffTestCase(unittest.TestCase):
"""Test :py:meth:`Calculator.off`."""
def setUp(self):
self.calculator = Calculator()
def test_should_raise_exception_if_number_entered_after_power_off(self):
"""Scenario: power off."""
# Arrange
self.calculator.on()
self.calculator.push(50)
self.calculator.off()
# Act & Assert
self.assertRaises(CalculatorNotPoweredError, self.calculator.push, 70)
def test_should_have_empty_stack_after_on_push_off_on_cycle(self):
"""Scenario: on - push - off - on."""
# Act
self.calculator.on()
self.calculator.push(50)
self.calculator.off()
self.calculator.on()
# Assert
self.assertEqual(self.calculator._stack, [])
if __name__ == "__main__": # pragma: nobranch
unittest.main()
|
########################################################################
# written by : <NAME>, <NAME>, CS, #
# Im<NAME> AlFaisal University #
#----------------------------------------------------------------------#
# #
# This interface is the user main menu where the users can #
# create new delivery task and change their passwords #
# #
########################################################################
import sqlite3
from common import id_generator,send_email
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import login
import changePass
import ScanTape
import ChooseDestination
class userHome():
builder =None
window = None
Username=None
userType=None
#staring function
def __init__(self,username,kind):
#connect to the desired window from glade file
self.builder = Gtk.Builder()
self.builder.add_from_file("HomeUser.glade")
self.window = self.builder.get_object("window1")
#get username+type
self.Username=username
self.userType=kind
#get all objects
createTaskBtn=self.builder.get_object("createTaskBtn")
changePasswordBtn=self.builder.get_object("changePasswordBtn")
createTaskBtn.connect("clicked",self.createTask)
changePasswordBtn.connect("clicked",self.changePassword)
logoutBtn=self.builder.get_object("logoutBtn1")
logoutBtn.connect("clicked",self.onLogoutButtonPressedButtonPressed)
self.window.show()
#go to Scan Tape interface
def createTask(self,button):
self.window.destroy()
self.window=ScanTape.ScanTape(list(),self.Username,self.userType)
#go to change password interface
def changePassword(self,button):
self.window.destroy()
window2 = changePass.change_password(self.Username,self.userType)
#Logout
def onLogoutButtonPressedButtonPressed(self, button):
self.window.destroy()
self.window=login.loginClass()
class tapeInfo():
builder =None
window = None
projectName = None
tapeName = None
rackName = None
slotNumber = None
tapesList = None
barcode = None
hint = None
userType=None
Username=None
#starting function
def __init__(self,volser, tl,username, kind): # tl = tape list
#connect to the desired window from glade file
self.builder = Gtk.Builder()
self.builder.add_from_file("HomeUser.glade")
self.window = self.builder.get_object("window2")
#get the username+type
self.userType=kind
self.Username=username
#get all the objects
scanBtn=self.builder.get_object("scanBtn")
logoutBtn=self.builder.get_object("logoutBtn2")
logoutBtn.connect("clicked",self.onLogoutButtonPressedButtonPressed)
proceedBtn=self.builder.get_object("proceedBtn")
cancelBtn=self.builder.get_object("cancelBtn")
self.projectName=self.builder.get_object("projectName")
self.tapeName=self.builder.get_object("tapeName")
self.rackName=self.builder.get_object("rackName")
self.slotNumber=self.builder.get_object("slotNumber")
self.hint=self.builder.get_object("hint")
scanBtn.connect("clicked",self.scan)
proceedBtn.connect("clicked",self.proceed)
cancelBtn.connect("clicked",self.cancel)
self.tapesList= tl
self.barcode = volser
#if tapeslist.count() == 3 disable scann btn and hover or hint maxmam 3 tape
if self.tapesList!= None and len(self.tapesList) == 2:
scanBtn.set_sensitive(False)
self.hint.set_text("You reached the maximmum number of tapes")
#connect to db+bring the volser info
db = sqlite3.connect('SaedRobot.db')
c = db.cursor()
c.execute('SELECT * from inventory WHERE volser= ?' , (volser,))
data=c.fetchone()
#valid volser
if data !=None and len(data)>0:
self.projectName.set_text(data[1])
self.tapeName.set_text(data[0])
self.rackName.set_text(data[2])
self.slotNumber.set_text(str(data[3]))
self.tapesList.append(self.barcode)
self.window.show()
def scan(self,button):
# this method will append the barcode to the list and send the list back to ScanTape Interface
self.window.destroy()
self.window=ScanTape.ScanTape(self.tapesList,self.Username,self.userType)
def proceed(self,button):
self.window.destroy() # Go ahead to next interface with the tapelist >> Zainab's interface Choose distnation
self.window=ChooseDestination.ChooseDes(self.tapesList,self.Username,self.userType)
def cancel(self,button): #Go to ScanTape interface with the TapeList with no further changes
self.window.destroy()
index = len(self.tapesList)
del self.tapesList[index - 1]
self.window=ScanTape.ScanTape(self.tapesList,self.Username,self.userType)
#Logout
def onLogoutButtonPressedButtonPressed(self, button):
self.window.destroy()
self.window=login.loginClass()
|
from typing import Tuple
import noise
import logging
import numpy
from worldgen.island_mesh.mesh_data import MeshData3D
class IslandMesh:
def __init__(self, size: Tuple[int, int, int], offset: Tuple[int, int, int] = (0, 0, 0), scale: float = .9,
level: float = .5, ocean_level: float = 0, mountain_level: float = 1., octaves: int = 3):
self.octaves = octaves
self.persistence = .5
self.lacunarity = 2.
self.z_depth = size[0]
self.y_height = size[1]
self.x_width = size[2]
self.x_offset = offset[0]
self.y_offset = offset[1]
self.z_offset = offset[2]
self.scale = scale
self.level = level
self.ocean_level = ocean_level
self.mountain_level = mountain_level
self.mesh = MeshData3D(self.x_width,
self.y_height,
self.z_depth)
self.radius = (self.x_width + self.y_height + self.z_depth) / 6
self._sma_x = self.x_width / 2
self._sma_y = self.y_height / 2
self._sma_z = self.z_depth / 2
def apply_3d_noise(self):
logging.info('Applying basic perlin noise...')
self.mesh.data = self.mesh.create_scalar_field_from_function(self.noise_3d)
def apply_sphere(self):
self.mesh.data = self.mesh.create_scalar_field_from_function(self.sphere)
def apply_2d_noise(self):
logging.info('Applying 2d perlin noise...')
self.mesh.data = self.mesh.create_scalar_field_from_function(self.noise_2d)
def apply_combined_noise(self):
logging.info('Applying multiple perlin noise iterations...')
self.mesh.data = self.mesh.create_scalar_field_from_function(self.noise_combined)
def normalize_mesh(self):
logging.info('Normalizing vertex probability...')
self.mesh.normalize()
def flip_mesh(self):
logging.info('Flipping mesh...')
self.mesh.mirror()
def noise_combined(self, x: int, y: int, z: int) -> float:
p3d = self.noise_3d(x, y, z)
p2d = self.noise_2d(x, y, z)
gradient = p2d * (1 + self.ocean_level)
if gradient < 0:
return -p3d * gradient
return p3d * gradient
def noise_3d(self, x: int, y: int, z: int) -> float:
p3d = _perlin_3d((x + self.x_offset) / self.scale,
(y + self.y_offset) / self.scale,
(z + self.z_offset) / self.scale,
octaves=self.octaves,
persistence=self.persistence,
lacunarity=self.lacunarity)
gradient = self._gradient_3d(x, y, z) * (1 + self.ocean_level)
return (gradient - p3d)**2 - self.mountain_level
def noise_2d(self, x: int, y: int, z: int) -> float:
"""turn perlin2d into a 3d scalar field"""
p2d = _perlin_2d((x + self.x_offset) / self.scale,
(z + self.z_offset) / self.scale,
octaves=self.octaves,
persistence=self.persistence,
lacunarity=self.lacunarity)
gradient = self._gradient_2d(x, z)
zero_level = self._zero_level(y)
return gradient - p2d + zero_level
def _gradient_2d(self, x: int, z: int) -> float:
return numpy.tanh((x / self._sma_x - 1)**2 + (z / self._sma_z - 1) ** 2)
def _gradient_3d(self, x: int, y: int, z: int) -> float:
return numpy.tanh((x / self._sma_x - 1)**2 + (y / self._sma_y - 1)**2 + (z / self._sma_z - 1) ** 2)
def _zero_level(self, y: int) -> float:
return y / (self.y_height * self.mountain_level) - self.ocean_level
def march(self):
vertexes, faces, normals, _ = self.mesh.march(level=self.level, gradient_direction='descent')
return vertexes, faces, normals
def sphere(self, x, y, z):
result = self.radius ** 2 - (
(x - self.x_width / 2) ** 2 + (y - self.y_height / 2) ** 2 + (z - self.z_depth / 2) ** 2)
return result
def _perlin_2d(x, y, *args, **kwargs):
return noise.pnoise2(x, y, *args, **kwargs)
def _perlin_3d(x, y, z, *args, **kwargs):
return noise.pnoise3(x, y, z, *args, **kwargs)
|
<filename>src/stratis_cli/_actions/_top.py
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous top-level actions.
"""
from justbytes import Range
from .._errors import StratisCliEngineError
from .._errors import StratisCliIncoherenceError
from .._errors import StratisCliInUseError
from .._errors import StratisCliNoChangeError
from .._errors import StratisCliPartialChangeError
from .._stratisd_constants import BlockDevTiers
from .._stratisd_constants import StratisdErrors
from ._connection import get_object
from ._constants import TOP_OBJECT
from ._constants import SECTOR_SIZE
from ._formatting import print_table
def _check_opposite_tier(managed_objects, to_be_added, other_tier):
"""
Check whether specified blockdevs are already in the other tier.
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param other_tier: the other tier, not the one requested
:type other_tier: _stratisd_constants.BlockDevTiers
:raises StratisCliInUseError: if blockdevs are used by other tier
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev
from ._data import devs
others = frozenset(
str(MODev(info).Devnode())
for (_, info) in devs(props={"Tier": other_tier}).search(managed_objects)
)
already_others = to_be_added.intersection(others)
if already_others != frozenset():
raise StratisCliInUseError(
already_others,
BlockDevTiers.Data
if other_tier == BlockDevTiers.Cache
else BlockDevTiers.Cache,
)
def _check_same_tier(managed_objects, to_be_added, this_tier):
"""
Check whether specified blockdevs are already in the tier to which they
are to be added.
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param other_tier: the tier requested
:type other_tier: _stratisd_constants.BlockDevTiers
:raises StratisCliPartialChangeError: if blockdevs are used by this tier
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev
from ._data import devs
these = frozenset(
str(MODev(info).Devnode())
for (_, info) in devs(props={"Tier": this_tier}).search(managed_objects)
)
already_these = to_be_added.intersection(these)
if already_these != frozenset():
raise StratisCliPartialChangeError(
"add to cache" if this_tier == BlockDevTiers.Cache else "add to data",
to_be_added.difference(already_these),
already_these,
)
class TopActions:
"""
Top level actions.
"""
@staticmethod
def create_pool(namespace):
"""
Create a stratis pool.
:raises StratisCliEngineError:
:raises StratisCliNoChangeError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager
proxy = get_object(TOP_OBJECT)
((changed, (_, _)), rc, message) = Manager.Methods.CreatePool(
proxy,
{
"name": namespace.pool_name,
"redundancy": (True, 0),
"devices": namespace.blockdevs,
},
)
if rc != StratisdErrors.OK: # pragma: no cover
raise StratisCliEngineError(rc, message)
if not changed:
raise StratisCliNoChangeError("create", namespace.pool_name)
@staticmethod
def list_pools(_):
"""
List all stratis pools.
:raises StratisCliEngineError:
"""
# pylint: disable=import-outside-toplevel
from ._data import MOPool
from ._data import ObjectManager
from ._data import pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
mopools = (MOPool(info) for _, info in pools().search(managed_objects))
tables = [
[
mopool.Name(),
str(Range(mopool.TotalPhysicalSize(), SECTOR_SIZE)),
str(Range(mopool.TotalPhysicalUsed(), SECTOR_SIZE)),
]
for mopool in mopools
]
print_table(
["Name", "Total Physical Size", "Total Physical Used"],
sorted(tables, key=lambda entry: entry[0]),
["<", ">", ">"],
)
@staticmethod
def destroy_pool(namespace):
"""
Destroy a stratis pool.
If no pool exists, the method succeeds.
:raises StratisCliEngineError:
:raises StratisCliNoChangeError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager
from ._data import ObjectManager
from ._data import pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
(pool_object_path, _) = next(
pools(props={"Name": namespace.pool_name})
.require_unique_match(True)
.search(managed_objects)
)
((changed, _), rc, message) = Manager.Methods.DestroyPool(
proxy, {"pool": pool_object_path}
)
# This branch can be covered, since the engine will return an error
# if the pool can not be destroyed because it has filesystems.
if rc != StratisdErrors.OK:
raise StratisCliEngineError(rc, message)
if not changed:
raise StratisCliNoChangeError("destroy", namespace.pool_name)
@staticmethod
def rename_pool(namespace):
"""
Rename a pool.
:raises StratisCliEngineError:
:raises StratisCliNoChangeError:
"""
# pylint: disable=import-outside-toplevel
from ._data import ObjectManager
from ._data import Pool
from ._data import pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
(pool_object_path, _) = next(
pools(props={"Name": namespace.current})
.require_unique_match(True)
.search(managed_objects)
)
((changed, _), rc, message) = Pool.Methods.SetName(
get_object(pool_object_path), {"name": namespace.new}
)
if rc != StratisdErrors.OK: # pragma: no cover
raise StratisCliEngineError(rc, message)
if not changed:
raise StratisCliNoChangeError("rename", namespace.new)
@staticmethod
def add_data_devices(namespace):
"""
Add specified data devices to a pool.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
:raises StratisCliInUseError:
:raises StratisCliPartialChangeError:
"""
# pylint: disable=import-outside-toplevel
from ._data import ObjectManager
from ._data import Pool
from ._data import pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
blockdevs = frozenset(namespace.blockdevs)
_check_opposite_tier(managed_objects, blockdevs, BlockDevTiers.Cache)
_check_same_tier(managed_objects, blockdevs, BlockDevTiers.Data)
(pool_object_path, _) = next(
pools(props={"Name": namespace.pool_name})
.require_unique_match(True)
.search(managed_objects)
)
((added, devs_added), rc, message) = Pool.Methods.AddDataDevs(
get_object(pool_object_path), {"devices": list(blockdevs)}
)
if rc != StratisdErrors.OK: # pragma: no cover
raise StratisCliEngineError(rc, message)
if not added or len(devs_added) < len(blockdevs): # pragma: no cover
raise StratisCliIncoherenceError(
(
"Expected to add the specified blockdevs to the data tier "
"in pool %s but stratisd reports that it did not actually "
"add some or all of the blockdevs requested"
)
% namespace.pool_name
)
@staticmethod
def add_cache_devices(namespace):
"""
Add specified cache devices to a pool.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
:raises StratisCliInUseError:
:raises StratisCliPartialChangeError:
"""
# pylint: disable=import-outside-toplevel
from ._data import ObjectManager
from ._data import Pool
from ._data import pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
blockdevs = frozenset(namespace.blockdevs)
_check_opposite_tier(managed_objects, blockdevs, BlockDevTiers.Data)
_check_same_tier(managed_objects, blockdevs, BlockDevTiers.Cache)
(pool_object_path, _) = next(
pools(props={"Name": namespace.pool_name})
.require_unique_match(True)
.search(managed_objects)
)
((added, devs_added), rc, message) = Pool.Methods.AddCacheDevs(
get_object(pool_object_path), {"devices": list(blockdevs)}
)
if rc != StratisdErrors.OK: # pragma: no cover
raise StratisCliEngineError(rc, message)
if not added or len(devs_added) < len(blockdevs): # pragma: no cover
raise StratisCliIncoherenceError(
(
"Expected to add the specified blockdevs to the cache tier "
"in pool %s but stratisd reports that it did not actually "
"add some or all of the blockdevs requested"
)
% namespace.pool_name
)
|
<reponame>pyjads/Python_module_helper<filename>image_processing/watermarking_1.py
#%%
import cv2
import matplotlib.pyplot as plt
import numpy as np
#%%
watermark = cv2.imread(r"E:\Pycharm_Workspace\Data_Science\image_processing\watermark_1.jpg")
original_1 = cv2.imread(r'E:\Pycharm_Workspace\Data_Science\image_processing\original.jpg')
diff = watermark != original_1
print(diff.sum())
#%%
mark = 'E:\Pycharm_Workspace\Data_Science\image_processing\w_{}.jpg'
# im1 = mark.format('1')
w_1 = cv2.imread(mark.format('1'))
w_1 = np.array(w_1, dtype=np.uint16)
w_2 = cv2.imread(mark.format('2'))
w_2 = np.array(w_2, dtype=np.uint16)
w_3 = cv2.imread(mark.format('3'))
w_3 = np.array(w_3, dtype=np.uint16)
w_4 = cv2.imread(mark.format('4'))
w_4 = np.array(w_4, dtype=np.uint16)
w_5 = cv2.imread(mark.format('5'))
w_5 = np.array(w_5, dtype=np.uint16)
# average
final_image = (w_1 + w_2 + w_3 + w_4 + w_5)/5
cv2.imwrite('image_processing/image_average.jpg',final_image)
# majority
red = np.array([w_1.T[0],w_2.T[0], w_3.T[0], w_4.T[0], w_5.T[0]])
green = np.array([w_1.T[1],w_2.T[1], w_3.T[1], w_4.T[1], w_5.T[1]])
blue = np.array([w_1.T[2],w_2.T[2], w_3.T[2], w_4.T[2], w_5.T[2]])
red_majority = red.max(axis=0)
green_majority = green.max(axis=0)
blue_majority = blue.max(axis=0)
image_median = np.array([red_majority, green_majority, blue_majority])
cv2.imwrite('image_processing/image_median.jpg',final_image)
#%%
watermark = cv2.imread(r"E:\Pycharm_Workspace\Data_Science\image_processing\StegoWork_with_OutGuess_01.jpg")
original_1 = cv2.imread(r'E:\Pycharm_Workspace\Data_Science\image_processing\original.jpg')
print(original_1.shape)
#%%
watermark = cv2.imread(r"C:\Users\lenovo\Desktop\matlab\input_sat_image.jpg")
cv2.imshow('image',watermark)
#%%
from io import StringIO # "import StringIO" directly in python2
from PIL import Image
# im2 = Image.open('E:\Pycharm_Workspace\Data_Science\image_processing\StegoWork with OutGuess 02.jpg')
# im1 = Image.open('E:\Pycharm_Workspace\Data_Science\image_processing\StegoWork_with_OutGuess_01.jpg')
# im3 = Image.open('E:\Pycharm_Workspace\Data_Science\image_processing\StegoWork_with_OutGuess_03.jpg')
# im4 = Image.open('E:\Pycharm_Workspace\Data_Science\image_processing\StegoWork_with_OutGuess_04.jpg')
im2 = Image.open('E:\Pycharm_Workspace\Data_Science\image_processing\Test_OutGuess_01.jpg')
r, g, b = im2[:,:,0], im2[:,:,1], im2[:,:,2]
im2 = 0.2989 * r + 0.5870 * g + 0.1140 * b
im1 = Image.open('E:\Pycharm_Workspace\Data_Science\image_processing\Test_OutGuess_02.jpg')
im2.save('image_processing/1_c.jpg', quality=0)
im2.save('image_processing/2_c.jpg', quality=100)
im2.save('image_processing/3_c.jpg', quality=50)
im2.save('image_processing/4_c.jpg', quality=30)
im2.save('image_processing/5_c.jpg', quality=10)
# im3.save('image_processing/3_compressed.jpg', quality=0)
# im4.save('image_processing/4_compressed.jpg', quality=0)
#%%
im1 = cv2.imread('E:\Pycharm_Workspace\Data_Science\image_processing\StegoWork with OutGuess 02.jpg',0)
im2 = cv2.imread('E:\Pycharm_Workspace\Data_Science\image_processing\StegoWork_with_OutGuess_01.jpg',0)
im3 = cv2.imread('E:\Pycharm_Workspace\Data_Science\image_processing\StegoWork_with_OutGuess_03.jpg',0)
plt.hist(im1.ravel(),256,[0,256], color='red')
plt.title('IM1')
plt.show()
plt.clf()
plt.hist(im2.ravel(),256,[0,256], color='green')
plt.title('IM2')
plt.show()
plt.clf()
plt.hist(im3.ravel(),256,[0,256], color='blue')
plt.title('IM3')
plt.show()
|
<gh_stars>0
#!/usr/bin/env python3
from aiohttp import web
from asyncio import sleep, subprocess, gather, Lock, shield
from collections import OrderedDict
import argparse
import ast
import json
import logging
import os
import re
import sys
if sys.version_info.major == 3 and sys.version_info.minor < 7:
from asyncio import ensure_future as create_task
else:
from asyncio import create_task
IWLIST_NETWORKS = re.compile(r"^\s+Cell", re.M)
IWLIST_KEYS = re.compile(r"^\s*(\S[^:\n]*):(.+)", re.M)
# NOTE: ip -br -j addr # -j is not supported on Debian Stretch! :(
IP_ADDR = re.compile(r"^(\w+)[ \t]+\S+[ \t]+(\S.*) ", re.M)
GRACE_PERIOD = 30
KERNEL_MODULES = ["rtl8192cu", "cfg80211"]
HOSTAPD_CONF = "/run/hostapd.conf"
MAX_LIST_NETWORK_FAILURES = 3
async def start_ap():
async with app["lock"]:
logger.info("Starting access point...")
app["essid"].set(None)
with open(app["hostapd"], "wt") as fh:
fh.write(
"""\
interface=%s
ctrl_interface=/var/run/hostapd
ssid=Third-I
channel=1
max_num_sta=1
# speed
logger_syslog=-1
logger_syslog_level=2
logger_stdout=-1
logger_stdout_level=2
ctrl_interface_group=0
country_code=00
ieee80211d=0
ieee80211h=0
hw_mode=g
beacon_int=100
dtim_period=2
rts_threshold=2347
fragm_threshold=2346
macaddr_acl=0
auth_algs=1
ignore_broadcast_ssid=0
wmm_enabled=1
wmm_ac_bk_cwmin=4
wmm_ac_bk_cwmax=10
wmm_ac_bk_aifs=7
wmm_ac_bk_txop_limit=0
wmm_ac_bk_acm=0
wmm_ac_be_aifs=3
wmm_ac_be_cwmin=4
wmm_ac_be_cwmax=10
wmm_ac_be_txop_limit=0
wmm_ac_be_acm=0
wmm_ac_vi_aifs=2
wmm_ac_vi_cwmin=3
wmm_ac_vi_cwmax=4
wmm_ac_vi_txop_limit=94
wmm_ac_vi_acm=0
wmm_ac_vo_aifs=2
wmm_ac_vo_cwmin=2
wmm_ac_vo_cwmax=3
wmm_ac_vo_txop_limit=47
wmm_ac_vo_acm=0
ieee80211n=1
ieee80211ac=1
eapol_key_index_workaround=0
eap_server=0
own_ip_addr=127.0.0.1
""" % app["interface"]
)
await kill_daemons()
await run_check("ip", "link", "set", "{if}", "down")
# NOTE: on this old version of Linux it seems that the network gets into a broken state
# for some reasons
await reload_wifi_modules()
await run_check("ip", "link", "set", "{if}", "up")
await clear_ip()
await run_check("ip", "addr", "add", "192.168.1.1/24", "dev", "{if}")
await run_daemon("hostapd", "{hostapd}")
await run_daemon(
"dnsmasq",
"-i",
"{if}",
"-d",
"-R",
"-F",
"192.168.1.1,192.168.1.32,255.255.255.0",
# NOTE: captive portal mode is disabled for now because it causes more problems than it
# solves
#"-A",
# NOTE: prevent captive portal window to open up on OSX because JS is disabled on that
# window making the third-i frontend completely unusable
#"/captive.apple.com/0.0.0.0",
#"-A",
#"/#/192.168.1.1",
)
await run_daemon("nginx", "-g", "daemon off; error_log stderr;")
logger.info("Access point started successfully.")
app["portal"].set(True)
async def list_networks():
async with app["lock"]:
logger.info("Getting networks...")
output = await run_capture_check("iwlist", "{if}", "scan")
networks = [
{x[0]: x[1]
for x in IWLIST_KEYS.findall(output)} for output in IWLIST_NETWORKS.split(output)
if "ESSID" in output
]
networks = [
(
ast.literal_eval(section["ESSID"]),
section["Encryption key"] == "on",
) for section in networks if section["ESSID"] != "\"\""
]
logger.info("Networks received successfully.")
return networks
async def get_ip_addresses():
output = await run_capture_check("ip", "-br", "addr")
interfaces = {ifname: addr_info.split(" ") for (ifname, addr_info) in IP_ADDR.findall(output)}
return interfaces.get(app["interface"], [])
async def clear_ip():
addr_info = await get_ip_addresses()
for ip in addr_info:
await run_check("ip", "addr", "del", ip, "dev", "{if}")
async def check_ip_status():
addr_info = await get_ip_addresses()
return len(addr_info) > 0
async def reload_wifi_modules():
logger.info("Reloading kernel WiFi modules...")
await run_check("modprobe","-r", *KERNEL_MODULES)
await run_check("modprobe", *KERNEL_MODULES)
async def connect(essid, password):
try:
async with app["lock"]:
logger.info("Connecting to %s (password: %s)...", essid, password is not None)
await kill_daemons()
await run_check("ip", "link", "set", "{if}", "down")
await clear_ip()
# NOTE: on this old version of Linux it seems that the network gets into a broken state
# after stopping wpa_supplicant. On more recent versions of Linux I didn't get
# this issue
await reload_wifi_modules()
await run_check("ip", "link", "set", "{if}", "up")
if password is not None:
output = await run_capture_check(
"wpa_passphrase",
essid,
password,
)
with open("/run/%s.conf" % app["interface"], "wt") as fh:
fh.write(output)
await run_daemon(
"wpa_supplicant",
"-i",
"{if}",
"-D",
"nl80211,wext",
"-c",
"/run/{if}.conf",
)
else:
await run_check("iwconfig", "{if}", "essid", essid)
await run_check("iwconfig", "{if}", "ap", "any")
await run_daemon("dhclient", "{if}", "-d")
logger.info("Checking if connection is ready...")
for i in range(GRACE_PERIOD):
if i % 2 == 0 and await check_ip_status():
logger.info("Connection succeeded.")
break
await sleep(1)
else:
logger.info(
"The connection to the WiFi did not succeed in the allowed amount of time"
)
raise Exception("Could not connect to network.")
await run_daemon("nginx", "-g", "daemon off; error_log stderr;")
app["portal"].set(False)
app["essid"].set(essid)
except Exception:
await start_ap()
raise
async def get_info():
async with app["lock"]:
return {
"portal": app["portal"].get(),
"essid": app["essid"].get(),
}
async def stop_wifi():
async with app["lock"]:
logger.info("Shutting down interface...")
await kill_daemons()
await run_check("ip", "link", "set", "{if}", "down")
# daemon management
async def run_daemon(*cmd, **format_args):
name = cmd[0]
await stop_daemon(name)
proc = app["daemons"][name] = await run_proc(cmd, format_args, {})
await sleep(1)
if proc.returncode is not None:
raise Exception("daemon execution failed (exit status: %s): %s" % (proc.returncode, cmd))
return proc
async def stop_daemon(name):
if name not in app["daemons"]:
return
proc = app["daemons"].pop(name)
if proc.returncode is not None:
return
logger.info("Terminating process %s..." % name)
proc.terminate()
await sleep(2)
if proc.returncode is None:
logger.info("Killing process %s..." % name)
proc.kill()
logger.info("Waiting process %s..." % name)
await proc.wait()
async def kill_daemons():
daemons = list(reversed(app["daemons"].keys()))
if daemons:
logger.debug("Killing daemons...")
else:
logger.debug("No daemon to kill.")
return
await gather(
*[stop_daemon(name) for name in daemons],
return_exceptions=True,
)
if daemons:
logger.debug("Killing daemons completed.")
# process management
async def run_proc(cmd, format_args, subprocess_args):
format_args.update({
"if": app["interface"],
"hostapd": app["hostapd"],
})
cmd = [str(x).format_map(format_args) for x in cmd]
logger.debug("Running command: %s", cmd)
return await subprocess.create_subprocess_exec(*cmd, **subprocess_args)
async def run_check(*cmd, **format_args):
proc = await run_proc(cmd, format_args, {})
rc = await proc.wait()
if rc != 0:
raise Exception("command execution failed (exit status != 0): %s" % (cmd, ))
async def run_capture_check(*cmd, **format_args):
proc = await run_proc(cmd, format_args, {"stdout": subprocess.PIPE})
rc = await proc.wait()
if rc != 0:
raise Exception("command execution failed (exit status != 0): %s" % (cmd, ))
stdout = await proc.stdout.read()
return stdout.decode("utf8")
###################################################################################################
async def route_start_ap(_request):
if not app["portal"]:
await shield(start_ap())
return web.Response(text="OK")
async def route_connect(request):
try:
create_task(connect(request.query["essid"], request.query.get("password")))
return web.Response(text="OK")
except KeyError as exc:
return web.Response(text="Missing query key essid or password!", status=400)
async def route_list_networks(_request):
networks = await shield(list_networks())
data = [
{
"essid": essid,
"password": password,
} for (essid, password) in dict(networks).items()
]
return web.json_response(data)
async def route_ap(_request):
res = await get_info()
return web.json_response(res)
async def route_wifi_off(_request):
await stop_wifi()
return web.Response(text="OK")
async def route_wifi_on(_request):
await shield(start_ap())
return web.Response(text="OK")
async def start_ap_on_startup(app):
create_task(start_ap())
class Container:
"""
Help against the deprecation warning when storing a value in `app` and re-assigning during
execution
"""
def __init__(self, value):
self.value = value
def __bool__(self):
return bool(self.value)
def set(self, value):
self.value = value
def get(self):
return self.value
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("captive-portal")
app = web.Application()
app.on_startup.append(start_ap_on_startup)
app.on_cleanup.append(lambda _app: stop_wifi())
app["daemons"] = OrderedDict()
app["lock"] = Lock()
app["portal"] = Container(False)
app["hostapd"] = HOSTAPD_CONF
app["essid"] = Container(None)
app.add_routes(
[
web.get("/start-ap", route_start_ap),
web.get("/list-networks", route_list_networks),
web.get("/connect", route_connect),
web.get("/portal", route_ap),
web.get("/wifi-off", route_wifi_off),
web.get("/wifi-on", route_wifi_on),
]
)
parser = argparse.ArgumentParser(description="A captive portal service for the thingy")
parser.add_argument(
"--unix",
type=str,
help="open the server on a UNIX socket",
)
parser.add_argument(
"--host",
type=str,
help="open the server on a TCP/IP host",
)
parser.add_argument(
"--port",
type=int,
help="open the server on a TCP/IP port",
)
parser.add_argument(
"--debug",
action="store_true",
help="show debug logs",
)
parser.add_argument(
"interface",
type=str,
help="WiFi interface",
)
if __name__ == "__main__":
# production mode
args = parser.parse_args()
if args.unix is None and (args.host is None or args.port is None):
print("You must at least provide the UNIX socket or the TCP/IP host " "and port.")
sys.exit(1)
if args.debug:
logger.setLevel(logging.DEBUG)
app["interface"] = args.interface
web.run_app(
app,
host=args.host,
port=args.port,
path=args.unix,
)
else:
if "INTERFACE" not in os.environ:
print("Missing `INTERFACE` in environment variables.", file=sys.stderr)
print("Example: INTERFACE=wlan0 pipenv run dev", file=sys.stderr)
sys.exit(1)
logger.setLevel(logging.DEBUG)
# development mode
app["interface"] = os.environ["INTERFACE"]
|
from typing import TYPE_CHECKING, List
import graphene
from django.core.exceptions import ValidationError
from ...channel.models import Channel
from ...checkout.calculations import calculate_checkout_total_with_gift_cards
from ...checkout.checkout_cleaner import clean_billing_address, clean_checkout_shipping
from ...checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from ...checkout.utils import cancel_active_payments
from ...core.error_codes import MetadataErrorCode
from ...core.permissions import OrderPermissions
from ...core.utils import get_client_ip
from ...core.utils.url import validate_storefront_url
from ...payment import PaymentError, StorePaymentMethod, gateway
from ...payment.error_codes import PaymentErrorCode
from ...payment.utils import create_payment, is_currency_supported
from ..account.i18n import I18nMixin
from ..channel.utils import validate_channel
from ..checkout.mutations.utils import get_checkout_by_token
from ..checkout.types import Checkout
from ..core.descriptions import ADDED_IN_31, DEPRECATED_IN_3X_INPUT
from ..core.enums import to_enum
from ..core.fields import JSONString
from ..core.mutations import BaseMutation
from ..core.scalars import UUID, PositiveDecimal
from ..core.types import common as common_types
from ..core.validators import validate_one_of_args_is_in_mutation
from ..meta.mutations import MetadataInput
from .types import Payment, PaymentInitialized
from .utils import metadata_contains_empty_key
if TYPE_CHECKING:
from ...checkout import models as checkout_models
def description(enum):
if enum is None:
return "Enum representing the type of a payment storage in a gateway."
elif enum == StorePaymentMethodEnum.NONE:
return "Storage is disabled. The payment is not stored."
elif enum == StorePaymentMethodEnum.ON_SESSION:
return (
"On session storage type. "
"The payment is stored only to be reused when "
"the customer is present in the checkout flow."
)
elif enum == StorePaymentMethodEnum.OFF_SESSION:
return (
"Off session storage type. "
"The payment is stored to be reused even if the customer is absent."
)
return None
StorePaymentMethodEnum = to_enum(
StorePaymentMethod, type_name="StorePaymentMethodEnum", description=description
)
class PaymentInput(graphene.InputObjectType):
gateway = graphene.Field(
graphene.String,
description="A gateway to use with that payment.",
required=True,
)
token = graphene.String(
required=False,
description=(
"Client-side generated payment token, representing customer's "
"billing data in a secure manner."
),
)
amount = PositiveDecimal(
required=False,
description=(
"Total amount of the transaction, including "
"all taxes and discounts. If no amount is provided, "
"the checkout total will be used."
),
)
return_url = graphene.String(
required=False,
description=(
"URL of a storefront view where user should be redirected after "
"requiring additional actions. Payment with additional actions will not be "
"finished if this field is not provided."
),
)
store_payment_method = StorePaymentMethodEnum(
description="Payment store type." + ADDED_IN_31,
required=False,
default_value=StorePaymentMethod.NONE,
)
metadata = common_types.NonNullList(
MetadataInput,
description="User public metadata." + ADDED_IN_31,
required=False,
)
class CheckoutPaymentCreate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description="Related checkout object.")
payment = graphene.Field(Payment, description="A newly created payment.")
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
input = PaymentInput(
description="Data required to create a new payment.", required=True
)
class Meta:
description = "Create a new payment for given checkout."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def clean_payment_amount(cls, info, checkout_total, amount):
if amount != checkout_total.gross.amount:
raise ValidationError(
{
"amount": ValidationError(
"Partial payments are not allowed, amount should be "
"equal checkout's total.",
code=PaymentErrorCode.PARTIAL_PAYMENT_NOT_ALLOWED,
)
}
)
@classmethod
def validate_gateway(cls, manager, gateway_id, currency):
"""Validate if given gateway can be used for this checkout.
Check if provided gateway_id is on the list of available payment gateways.
Gateway will be rejected if gateway_id is invalid or a gateway doesn't support
checkout's currency.
"""
if not is_currency_supported(currency, gateway_id, manager):
raise ValidationError(
{
"gateway": ValidationError(
f"The gateway {gateway_id} is not available for this checkout.",
code=PaymentErrorCode.NOT_SUPPORTED_GATEWAY.value,
)
}
)
@classmethod
def validate_token(cls, manager, gateway: str, input_data: dict, channel_slug: str):
token = input_data.get("token")
is_required = manager.token_is_required_as_payment_input(gateway, channel_slug)
if not token and is_required:
raise ValidationError(
{
"token": ValidationError(
f"Token is required for {gateway}.",
code=PaymentErrorCode.REQUIRED.value,
),
}
)
@classmethod
def validate_return_url(cls, input_data):
return_url = input_data.get("return_url")
if not return_url:
return
try:
validate_storefront_url(return_url)
except ValidationError as error:
raise ValidationError(
{"redirect_url": error}, code=PaymentErrorCode.INVALID
)
@classmethod
def validate_metadata_keys(cls, metadata_list: List[dict]):
if metadata_contains_empty_key(metadata_list):
raise ValidationError(
{
"input": ValidationError(
{
"metadata": ValidationError(
"Metadata key cannot be empty.",
code=MetadataErrorCode.REQUIRED.value,
)
}
)
}
)
@staticmethod
def validate_checkout_email(checkout: "checkout_models.Checkout"):
if not checkout.email:
raise ValidationError(
"Checkout email must be set.",
code=PaymentErrorCode.CHECKOUT_EMAIL_NOT_SET.value,
)
@classmethod
def perform_mutation(cls, _root, info, checkout_id=None, token=None, **data):
# DEPRECATED
validate_one_of_args_is_in_mutation(
PaymentErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
cls.validate_checkout_email(checkout)
data = data["input"]
gateway = data["gateway"]
manager = info.context.plugins
cls.validate_gateway(manager, gateway, checkout.currency)
cls.validate_return_url(data)
lines, unavailable_variant_pks = fetch_checkout_lines(checkout)
if unavailable_variant_pks:
not_available_variants_ids = {
graphene.Node.to_global_id("ProductVariant", pk)
for pk in unavailable_variant_pks
}
raise ValidationError(
{
"token": ValidationError(
"Some of the checkout lines variants are unavailable.",
code=PaymentErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL.value,
params={"variants": not_available_variants_ids},
)
}
)
if not lines:
raise ValidationError(
{
"lines": ValidationError(
"Cannot create payment for checkout without lines.",
code=PaymentErrorCode.NO_CHECKOUT_LINES.value,
)
}
)
checkout_info = fetch_checkout_info(
checkout, lines, info.context.discounts, manager
)
cls.validate_token(
manager, gateway, data, channel_slug=checkout_info.channel.slug
)
address = (
checkout.shipping_address or checkout.billing_address
) # FIXME: check which address we need here
checkout_total = calculate_checkout_total_with_gift_cards(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=info.context.discounts,
)
amount = data.get("amount", checkout_total.gross.amount)
clean_checkout_shipping(checkout_info, lines, PaymentErrorCode)
clean_billing_address(checkout_info, PaymentErrorCode)
cls.clean_payment_amount(info, checkout_total, amount)
extra_data = {
"customer_user_agent": info.context.META.get("HTTP_USER_AGENT"),
}
cancel_active_payments(checkout)
metadata = data.get("metadata")
if metadata is not None:
cls.validate_metadata_keys(metadata)
metadata = {data.key: data.value for data in metadata}
payment = None
if amount != 0:
payment = create_payment(
gateway=gateway,
payment_token=data.get("token", ""),
total=amount,
currency=checkout.currency,
email=checkout.get_customer_email(),
extra_data=extra_data,
# FIXME this is not a customer IP address. It is a client storefront ip
customer_ip_address=get_client_ip(info.context),
checkout=checkout,
return_url=data.get("return_url"),
store_payment_method=data["store_payment_method"],
metadata=metadata,
)
return CheckoutPaymentCreate(payment=payment, checkout=checkout)
class PaymentCapture(BaseMutation):
payment = graphene.Field(Payment, description="Updated payment.")
class Arguments:
payment_id = graphene.ID(required=True, description="Payment ID.")
amount = PositiveDecimal(description="Transaction amount.")
class Meta:
description = "Captures the authorized payment amount."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id, amount=None):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.capture(
payment, info.context.plugins, amount=amount, channel_slug=channel_slug
)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentCapture(payment=payment)
class PaymentRefund(PaymentCapture):
class Meta:
description = "Refunds the captured payment amount."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id, amount=None):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.refund(
payment, info.context.plugins, amount=amount, channel_slug=channel_slug
)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentRefund(payment=payment)
class PaymentVoid(BaseMutation):
payment = graphene.Field(Payment, description="Updated payment.")
class Arguments:
payment_id = graphene.ID(required=True, description="Payment ID.")
class Meta:
description = "Voids the authorized payment."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
channel_slug = (
payment.order.channel.slug
if payment.order
else payment.checkout.channel.slug
)
try:
gateway.void(payment, info.context.plugins, channel_slug=channel_slug)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentVoid(payment=payment)
class PaymentInitialize(BaseMutation):
initialized_payment = graphene.Field(PaymentInitialized, required=False)
class Arguments:
gateway = graphene.String(
description="A gateway name used to initialize the payment.",
required=True,
)
channel = graphene.String(
description="Slug of a channel for which the data should be returned.",
)
payment_data = JSONString(
required=False,
description=(
"Client-side generated data required to initialize the payment."
),
)
class Meta:
description = "Initializes payment process when it is required by gateway."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def validate_channel(cls, channel_slug):
try:
channel = Channel.objects.get(slug=channel_slug)
except Channel.DoesNotExist:
raise ValidationError(
{
"channel": ValidationError(
f"Channel with '{channel_slug}' slug does not exist.",
code=PaymentErrorCode.NOT_FOUND.value,
)
}
)
if not channel.is_active:
raise ValidationError(
{
"channel": ValidationError(
f"Channel with '{channel_slug}' is inactive.",
code=PaymentErrorCode.CHANNEL_INACTIVE.value,
)
}
)
return channel
@classmethod
def perform_mutation(cls, _root, info, gateway, channel, payment_data):
cls.validate_channel(channel_slug=channel)
try:
response = info.context.plugins.initialize_payment(
gateway, payment_data, channel_slug=channel
)
except PaymentError as e:
raise ValidationError(
{
"payment_data": ValidationError(
str(e), code=PaymentErrorCode.INVALID.value
)
}
)
return PaymentInitialize(initialized_payment=response)
class MoneyInput(graphene.InputObjectType):
currency = graphene.String(description="Currency code.", required=True)
amount = PositiveDecimal(description="Amount of money.", required=True)
class CardInput(graphene.InputObjectType):
code = graphene.String(
description=(
"Payment method nonce, a token returned "
"by the appropriate provider's SDK."
),
required=True,
)
cvc = graphene.String(description="Card security code.", required=False)
money = MoneyInput(
description="Information about currency and amount.", required=True
)
class PaymentCheckBalanceInput(graphene.InputObjectType):
gateway_id = graphene.types.String(
description="An ID of a payment gateway to check.", required=True
)
method = graphene.types.String(description="Payment method name.", required=True)
channel = graphene.String(
description="Slug of a channel for which the data should be returned.",
required=True,
)
card = CardInput(description="Information about card.", required=True)
class PaymentCheckBalance(BaseMutation):
data = JSONString(description="Response from the gateway.")
class Arguments:
input = PaymentCheckBalanceInput(
description="Fields required to check payment balance.", required=True
)
class Meta:
description = "Check payment balance."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
manager = info.context.plugins
gateway_id = data["input"]["gateway_id"]
money = data["input"]["card"].get("money", {})
cls.validate_gateway(gateway_id, manager)
cls.validate_currency(money.currency, gateway_id, manager)
channel = data["input"].pop("channel")
validate_channel(channel, PaymentErrorCode)
try:
data = manager.check_payment_balance(data["input"], channel)
except PaymentError as e:
raise ValidationError(
str(e), code=PaymentErrorCode.BALANCE_CHECK_ERROR.value
)
return PaymentCheckBalance(data=data)
@classmethod
def validate_gateway(cls, gateway_id, manager):
gateways_id = [gateway.id for gateway in manager.list_payment_gateways()]
if gateway_id not in gateways_id:
raise ValidationError(
{
"gateway_id": ValidationError(
f"The gateway_id {gateway_id} is not available.",
code=PaymentErrorCode.NOT_SUPPORTED_GATEWAY.value,
)
}
)
@classmethod
def validate_currency(cls, currency, gateway_id, manager):
if not is_currency_supported(currency, gateway_id, manager):
raise ValidationError(
{
"currency": ValidationError(
f"The currency {currency} is not available for {gateway_id}.",
code=PaymentErrorCode.NOT_SUPPORTED_GATEWAY.value,
)
}
)
|
"""gmail.py: Fetch queries from gmail & send replies back
Contains Gmail class which initializes the Gmail service, and has a method to watch
the Gmail mailbox for any changes. When a change occurs, unread messages are
retrieved. If any of the unread messages are from senders in the contact list,
the text of the messages is sent to the libarian as a query, then the reply
from the librarian is sent as a Gmail reply.
Also contains a gmail_send_reply method to send librarian replies to Gmail.
Copyright (c) 2020 by <NAME>.
License: MIT, see LICENSE for more details.
"""
import os
import csv
import sys
import base64
import pprint
import pickle # used for storing / reading back credentials
import logging
from time import sleep
from pathlib import Path
from datetime import datetime
from collections import namedtuple
from helpers.utils import Patience
from multiprocessing import Process
from email.mime.text import MIMEText
from imagezmq import ImageHub, ImageSender
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
log = logging.getLogger(__name__)
class QuerySender(ImageSender):
def __init__(self, connect_to='tcp://*:5555', REQ_REP = True):
ImageSender.__init__(self, connect_to=connect_to, REQ_REP = REQ_REP)
def send_query(self, query, buf=b'0'):
reply_b = self.send_jpg(query, buf) # send_jpg returns a bytestring
return reply_b # just 'OK' for gmail comm channel; don't need to use it
class Gmail:
""" Initialize gmail API, read and write messages
Sets up the Gmail service. Starts a Process() to watch Gmail for new
messages and send the ones that are queries to Librarian via ZMQ. Provides
a method to send replies back to Gmail service.
Parameters:
settings (str): settings & options from libarian.yaml.
details (dict): channel options & details specificed for Gmail channel
"""
def __init__(self, settings, details, use_q_s=True):
# pprint.pprint(details)
gmail_dir = settings.lib_dir / Path('gmail') # gmail directory
token = Path("token1.pickle") # token1 when use_q_s=False
self.token_file = str(gmail_dir / token)
creds = Path("credentials.json")
self.credentials_file = str(gmail_dir / creds)
# Use QuerySender if this instance of Gmail is sending messages via ZMQ
# Also use alternate directory and files for Gmail creds files
if use_q_s: # set up QuerySender to send messages via ZMQ
self.port = details.get('port', 5559) # gmail ZMQ port
self.address = 'tcp://127.0.0.1:' + str(self.port).strip()
# print('Self address:', self.address)
self.q_s = QuerySender(connect_to=self.address)
gmail_dir = settings.lib_dir / Path('gmail2') # gmail directory
token = Path("token2.pickle") # token2 when use_q_s=True
self.token_file = str(gmail_dir / token)
creds = Path("credentials.json")
self.credentials_file = str(gmail_dir / creds)
contacts = self.get_contacts(gmail_dir, details)
self.phones_OK_list = [contact.mobile_phone for contact in contacts]
self.emails_OK_list = [contact.email for contact in contacts]
self.mail_check_seconds = details.get('mail_check_seconds', 5)
self.patience = settings.patience
self.gmail, self.historyId = self.gmail_start_service()
def gmail_start_service(self):
""" gmail_start_service -- start the gmail service using credentials
Starts the gmail service using the 2 credential files (json and token).
See Gmail API docs and quickstart.py for details. Reads a message to
obtain a current historyId; used by gmail_monitor to watch
for changes in the gmail mailbox; polling mailbox history is much
cheaper in "points" than polling for new messages themselves.
Returns:
gmail: the Gmail service object with read, send, etc. methods.
historyId: a current historyId
"""
creds = self.get_credentials()
# initialize gmail service
gmail = build('gmail', 'v1', credentials=creds, cache_discovery=False)
# get list of messages: first step in getting a historyId
results = gmail.users().messages().list(userId='me',
maxResults=10,includeSpamTrash=False).execute()
num_msgs = results.get('resultSizeEstimate', -1)
messages = results.get('messages', [])
if not messages:
latestMessageId = None
else:
# get the first message in the list which should be the latest
latestMessageId = messages[3].get('id', None)
latestMessageThreadId = messages[3].get('threadId', None)
# print('Latest Message Id and Thread Id:')
# print(latestMessageId, latestMessageThreadId)
# print('Number of messages Estimate: ', num_msgs)
# print('Number of messages in message list: ', len(messages))
if not messages:
pass
# print('No messages retrieved')
else:
pass
# print()
# print('Received', len(messages), ' messages.')
# print('list of messages:')
# for message in messages:
# pprint.pprint(message)
# print()
# messages().list() returns a list of message & thread ids
# Id and threadId; if they are the same value
# then message is the first message in a new thread
# get a single message & get its historyId
# results is a dict of all the fields of a single message; see API docs
results = gmail.users().messages().get(userId='me',
id=latestMessageId, format='minimal').execute()
if not results:
# print('No message retrieved')
pass
else:
historyId = results.get('historyId', None)
# print('Retrieval of message: ', latestMessageId, 'of thread: ',
# latestMessageThreadId)
# pprint.pprint(results)
return gmail, historyId
def get_credentials(self):
"""Gets valid user credentials from token.pickle storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
The OAuth2 flow uses the Chrome browser; USE SDB broswer login!!!
(Because we are reading SDB email)
Returns:
creds, the obtained credentials.
"""
# If modifying these scopes, delete the file token.pickle.
# Then, next get_credentials() will build new token with new SCOPES.
SCOPES = ['https://www.googleapis.com/auth/gmail.modify']
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
token_file = self.token_file
credentials_file = self.credentials_file
# print('Creds file names:')
# print(' token:', token_file, ' type:', type(token_file))
# print(' creds:', credentials_file, ' type:', type(credentials_file))
if os.path.exists(token_file):
with open(token_file, 'rb') as token:
creds = pickle.load(token)
# print('Pickled token loaded.')
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
# print('Doing creds refresh:')
if creds and creds.expired and creds.refresh_token:
# print('Doing Refresh Credentials Request:')
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials_file, SCOPES)
# print("Used existing credentials_file OK.")
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_file, 'wb') as token:
# print('Saving token.pickle file.')
pickle.dump(creds, token)
return creds
def gmail_watcher(self, gmail, historyId, mail_check_seconds,
phones_OK_list, emails_OK_list):
# By putting historyId into a list, it becomes mutable and holds updates
history_list = [historyId] # historyId will be updated by below
next_page_token = ['empty<PASSWORD>'] # token for getting 2nd page of results
while True: # forever loop watching gmail mailbox for changes
if self.mailbox_changed(gmail, history_list, next_page_token,
mail_check_seconds):
# get new messages from gmail, but only the ones that are from
# senders on our OK lists; others are skipped.
new_messages = self.get_new_messages(gmail,
phones_OK_list,
emails_OK_list)
# print('New messages:')
# pprint.pprint(new_messages)
if new_messages: # there are some new messages
self.mark_as_read(gmail, new_messages) # move to below send_query!
for message in new_messages:
# each message is a tuple of values from get_new_messages()
# create a query from the tuple that is string of pure text
query = "|".join(list(message))
# need to add Patience() code here to recover from
# network or librarian outages, similar to how
# imagenodes do.
REP = self.q_s.send_query(query) # ZMQ REP b'OK'
def mailbox_changed(self,gmail, history_list, next_page_token, mail_check_seconds):
''' Use history().list() to check for changes to mailbox
Depending on how often the gmail api is "checked"
it is possible for the multiple calls per minute to watch
for new emails be very expensive. Using history().list() is better.
Google has Usage Limits measured in Quota Units.
messages().list() is 5 Quota Units and
history().list() is 2 Quota Units (less than half price)
drafts().send() is 100 Quota Units
This function implements a forever polling loop checking
history().list() for changes. Returns True when history
has changed, meaning something in the mailbox has changed.
There may be false positives (unimportant mailbox changes),
but other functions check for valid / important messages.
This function only watches for history().list() changes
Parameters:
gmail (service object): the Gmail service object
history_list (list): list of historyId's. Update to newest one each loop.
next_page_token (list): list whose 1st element is the nextPageToken
mail_check_seconds (int): how often to check the gmail history list
Returns:
Boolean True if mailbox change; False if not
'''
startHistoryId = history_list[0]
# users().history().list() requires a valid startHistoryId.
# The startHistoryId was obtained by gmail_start_service().
# print("startHistoryId: ", startHistoryId, "is type: ", type(startHistoryId))
last_results = gmail.users().history().list(userId='me',
startHistoryId=startHistoryId,
maxResults=10).execute()
i = 0 # number of history changes checks
num_err_results = 0
while True: # loop forever until a there is a change in mailbox history
# Do not check history more often than mail_check_seconds
sleep(mail_check_seconds)
try:
results = gmail.users().history().list(userId='me',
startHistoryId=startHistoryId,
maxResults=10).execute()
except Exception as ex:
num_err_results += 1
log.error("Error raised in gmail.history.list() num = " + str(num_err_results))
results = last_results # set to last non-error results
if num_err_results > 10: # too many; put into a variable?
raise # raise the exception up to main handler
else: # wait for timeout type error to clear
sleep(10) # need to edit this into a variable?
i += 1
if results == last_results: # no mailbox history changes
# print('Retrieved results #', i, ": No changes")
pass
else: # some changes in history().list()
# print('Retrieved results #', i, ": Some changes")
# last_results = results
nextPageToken = results.get("nextPageToken", "emptyToken")
historyId = results.get("historyId", "emptyId")
# print("nextPageToken: ", nextPageToken)
# print("historyId: ", historyId)
# set historyId and nextPageToken as new list elements
history_list[0] = historyId # save historId in list for next call
next_page_token[0] = nextPageToken
return True
# print("No history changes pass ", i)
def is_SMS(self, from_value):
# check that this message has address form of a text message
if ('<EMAIL>>' in from_value
and '(SMS)' in from_value
and '<1' in from_value):
return True
else:
return False
def get_new_messages(self, gmail, phones_OK_list,
emails_OK_list, n=25):
''' gets some new messages from gmail messages.list()
Parameters:
phones_OK_list (list): list of phone numbers OK to receive from
emails_OK_list (list): list of emails it is OK to receive from
n (int): number of emails to retrieve in a batch
'''
# print("Fetching message list")
results = gmail.users().messages().list(userId='me',
labelIds=['UNREAD', 'INBOX'],
maxResults=n).execute()
message_list = []
if 'messages' in results:
message_list.extend(results['messages'])
else:
return None
# print("Number of messages in results: ", len(message_list))
if len(message_list) == 0:
return None
new_messages = []
for message in message_list:
msg_id = message.get('id', None)
message = gmail.users().messages().get(userId='me',
id=msg_id).execute()
thread_id = message.get('threadId', None)
labels = message.get('labelIds', None)
message_internalDate = message['internalDate']
message_datetime = datetime.fromtimestamp(int(int(message_internalDate)/1000))
payload = message['payload']
headers = payload['headers']
# each header is a dictionary holding 2 tuples
# each tuple is (header name, header value)
# name and value are unicode strings
for header in headers:
name, value = header.items()
name_str = str(name[1])
from_str = u'From'
subject_str = u'Subject'
to_str = u'To'
if (name_str == from_str):
from_value = value[1]
elif (name_str == subject_str):
subject_value = value[1]
elif (name_str == to_str):
to_value = value[1]
# print("Debugging SMS:")
# print("From:", from_value)
# print("is_SMS value:", is_SMS(from_value))
if self.is_SMS(from_value):
# extract SMS sending phone number from "From" header
num_start = from_value.find('<1') + 14
num_end = num_start + 10
sms_from = from_value[num_start:num_end]
# print("sms_from: |" + sms_from + "|")
if sms_from not in phones_OK_list:
continue
message_text = message['snippet'][13:]
text_end = message_text.find(" YOUR ")
message_text = message_text[:text_end]
else: # a regular email; not SMS
sms_from = None
# print('Email from: ', from_value, type(from_value))
if from_value not in emails_OK_list:
continue
message_text = message['snippet']
# print("message_text:", message_text)
# line=line.decode('utf-8','ignore').encode("utf-8")
# bytes(line, 'utf-8').decode('utf-8','ignore')
# used encode to get rid of all non-ascii characters
message_text = bytes(message_text, 'utf-8').decode('utf-8','ignore')
# print('message_text:', message_text, 'type:', type(message_text))
# replace snippet encoding of apostrophe
# TODO Find out why can't find / replace '
message_text = message_text.replace("'", "'")
# append message tuple to new_messages, message_text first
new_messages.append((message_text, msg_id, thread_id,
from_value, subject_value,
to_value, sms_from),)
return new_messages
def mark_as_read(self, gmail, new_messages):
""" Mark gmail messages as read by removing UNREAD label
Parameters:
gmail (service object): gmail service object
new_message (list): list of messages to be marked as "READ"
"""
if new_messages is None: # no messages to mark
return
for message in new_messages:
msg_id = message[1]
gmail.users().messages().modify(userId='me',
id=msg_id,body={'removeLabelIds': ['UNREAD']}).execute()
def gmail_send_reply(self, gmail, reply_str):
""" gmail_send_reply: send reply from the Librarian back via gmail
This function is called from the librarian main loop.
It sends a single query reply back via gmail. Each query sent to the
librarian from gmail has header info appended to the text of the
message. This gmail reply sender uses that header info to reply to the
correct messageId, threadId, etc.
Structure of reply_str:
reply_text|msg_id|thread_id|to_value|subject_value|from_value|sms_from
(note that to_value and from_value are swapped from original message)
(reply protocol requires this swapping pattern to draft a reply)
Parameters:
gmail (Gmail service object): Gmail service object for Gmail API
reply (str): reply from Librarian to be sent back via gmail
"""
# First parse the reply into message text and gmail threadid, etc.
reply = reply_str.split('|') # reply is list of reply parts in reply_str
# then load the draft reply and send it
threadid = reply[2] # thread being replied to
to_send = MIMEText(reply[0]) # text of reply created by librarian
# to_send = reply[0] # text of reply created by librarian
to_send["To"] = reply[3] # replying to (whick was from_value in msg)
to_send["Subject"] = reply[4] # replying to subject
to_send["From"] = reply[5] # replying from (which was to_value in msg)
# example: bytesThing = stringThing.encode(encoding='UTF-8')
raw = base64.urlsafe_b64encode(to_send.as_string().encode(encoding='UTF-8'))
raw = raw.decode(encoding='UTF-8') # convert back to string
message = {'message': {'raw': raw, 'threadId': threadid}}
draft = gmail.users().drafts().create(userId="me", body=message).execute()
draftid = draft['id']
gmail.users().drafts().send(userId='me',
body={ 'id': draftid }).execute()
def gmail_send_SMS(self, phone_number, message_text):
""" gmail_send_SMS: send SMS text message via Gmail
It sends a single SMS text message. For security and other reasons, this
does not send a Gmail meessage. Instead it searches for a GMail
SMS message from the phone number. Then composes a reply_str. Then
Structure needed for reply_str:
reply_text|msg_id|thread_id|to_value|subject_value|from_value|sms_from
(note that to_value and from_value are swapped from original message)
(reply protocol requires this swapping pattern to draft a reply)
Parameters:
phone_number (str): phone number to send text message to
message (str): message to send to phone_number
"""
# use phone number to search for Gmail SMS messages from that number
gmail = self.gmail
p = phone_number.strip()
area_code = p[0:3]
first_3 = p[3:6]
last_4 = p[6:10]
search = ' '.join(['SMS', area_code, first_3, last_4])
# print('Search String Parts:')
# print(' area_code:', area_code)
# print(' first_3:', area_code)
# print(' last_4:', area_code)
# print('Search string for Gmail:', search)
results = gmail.users().messages().list(userId='me',
maxResults=10,includeSpamTrash=False,q=search).execute()
num_msgs = results.get('resultSizeEstimate', -1)
messages = results.get('messages', [])
num_messages = len(messages)
# print('Number of messages from Gmail SMS number query', num_messages)
if not messages:
latestMessageId = None
else:
# get the first message in the list which should be the latest
latestMessageId = messages[0].get('id', None)
latestMessageThreadId = messages[0].get('threadId', None)
msg_id = messages[0].get('id', None)
message = gmail.users().messages().get(userId='me',
id=msg_id).execute()
thread_id = message.get('threadId', None)
labels = message.get('labelIds', None)
message_internalDate = message['internalDate']
message_datetime = datetime.fromtimestamp(int(int(message_internalDate)/1000))
payload = message['payload']
headers = payload['headers']
# each header is a dictionary holding 2 tuples
# each tuple is (header name, header value)
# name and value are unicode strings
for header in headers:
name, value = header.items()
name_str = str(name[1])
from_str = u'From'
subject_str = u'Subject'
to_str = u'To'
if (name_str == from_str):
from_value = value[1]
elif (name_str == subject_str):
subject_value = value[1]
elif (name_str == to_str):
to_value = value[1]
# print("Debugging SMS:")
# print("From:", from_value)
# print("is_SMS value:", is_SMS(from_value))
# print("message_text:", message_text)
# line=line.decode('utf-8','ignore').encode("utf-8")
# bytes(line, 'utf-8').decode('utf-8','ignore')
# used encode to get rid of all non-ascii characters
# message_text = bytes(message_text, 'utf-8').decode('utf-8','ignore')
# print('message_text:', message_text, 'type:', type(message_text))
# replace snippet encoding of apostrophe
# TODO Find out why can't find / replace '
# message_text = message_text.replace("'", "'")
# append message tuple to new_messages, message_text first
if 'SMS' in to_value:
to_value, from_value = from_value, to_value
time_str = datetime.now().strftime("%I:%M %p").lstrip("0")
message_text = message_text + " (" + time_str + ")"
message_tuple = (message_text, msg_id, thread_id,
from_value, subject_value,
to_value, search)
msg_string = "|".join(list(message_tuple))
# print("The message string with msg_id, thread_id, etc:")
# print(msg_string)
self.gmail_send_reply(gmail, msg_string)
def close(self):
""" close: close the QueryReceiver ZMQ port and context
"""
self.q_r.close()
def get_contacts(self, gmail_dir, details):
"""Gets contacts from contacts data file
Example lines from contacts.txt for reference
name|full_name|canonical_name|mobile_phone|email
Jeff|<NAME>|jeff_bass|8885551212|<EMAIL>
Returns:
contacts, a list of named tuples of contact info
Example:
>>> [contact.mobile_phone for contact in contacts if contact.name=='Jeff']
['8885551212']
"""
contacts_file = details.get('contacts', 'contacts.txt')
contacts_file = gmail_dir / Path(contacts_file)
# print('contacts file:', contacts_file )
with open(contacts_file, 'r') as f:
# read header line and set up namedtuple
lines = csv.reader(f, delimiter='|')
# fields = lines.next() # field names list from first line in file
fields = next(lines) # field names list from first line in file
Contact = namedtuple('Contact', fields)
# read all lines in file, creating a named tuple for each line in file
# if len(line) > 0 avoids TypeError due to any blank lines at end of file
contacts = [Contact(*line) for line in lines if len(line) > 0]
return contacts
def fix_comm_link(self):
""" Evaluate, repair and restart communications link with librarian.
Restart link if possible, else restart program.
"""
# TODO add some of the ongoing experiments to this code when it has
# progressed in development and testing
# Current protocol:
# just sys.exit() for now.
# Because this program is started
# and restarted by systemd as a service with restart option on, it
# will restart the program with a delay and try communicating again.
# It will be logged in systemctl journald.
#
# Other ideas that might be worth trying:
# 1. Just wait longer one time and try sending again
# 2. Doing 1 repeatedly with exponential time increases
# 3. Stopping and closing ZMQ context; restarting and sending
# last message
# 4. Check WiFi ping; stop and restart WiFi service
#
raise KeyboardInterrupt
|
from .mock_response import MockHTTPResponse
from datetime import datetime
from requests.models import PreparedRequest, Response
from requests.packages.urllib3 import HTTPResponse
from requests.structures import CaseInsensitiveDict
from requests.status_codes import _codes
from requests.cookies import RequestsCookieJar
try:
from requests.packages.urllib3._collections import HTTPHeaderDict
except ImportError:
from .headers import HTTPHeaderDict
import base64
import io
import sys
def coerce_content(content, encoding=None):
if hasattr(content, 'decode'):
content = content.decode(encoding or 'utf-8', 'replace')
return content
def body_io(string, encoding=None):
if hasattr(string, 'encode'):
string = string.encode(encoding or 'utf-8')
return io.BytesIO(string)
def from_list(value):
if isinstance(value, list):
return value[0]
return value
def add_body(r, preserve_exact_body_bytes, body_dict):
"""Simple function which takes a response or request and coerces the body.
This function adds either ``'string'`` or ``'base64_string'`` to
``body_dict``. If ``preserve_exact_body_bytes`` is ``True`` then it
encodes the body as a base64 string and saves it like that. Otherwise,
it saves the plain string.
:param r: This is either a PreparedRequest instance or a Response
instance.
:param preserve_exact_body_bytes bool: Either True or False.
:param body_dict dict: A dictionary already containing the encoding to be
used.
"""
body = getattr(r, 'raw', getattr(r, 'body', None))
if hasattr(body, 'read'):
body = body.read()
if not body:
body = ''
if (preserve_exact_body_bytes or
'gzip' in r.headers.get('Content-Encoding', '')):
if sys.version_info >= (3, 0) and hasattr(body, 'encode'):
body = body.encode(body_dict['encoding'] or 'utf-8')
body_dict['base64_string'] = base64.b64encode(body).decode()
else:
body_dict['string'] = coerce_content(body, body_dict['encoding'])
def serialize_prepared_request(request, preserve_exact_body_bytes):
headers = request.headers
body = {'encoding': 'utf-8'}
add_body(request, preserve_exact_body_bytes, body)
return {
'body': body,
'headers': dict(
(coerce_content(k, 'utf-8'), [v]) for (k, v) in headers.items()
),
'method': request.method,
'uri': request.url,
}
def deserialize_prepared_request(serialized):
p = PreparedRequest()
p._cookies = RequestsCookieJar()
body = serialized['body']
if isinstance(body, dict):
original_body = body.get('string')
p.body = original_body or base64.b64decode(
body.get('base64_string', '').encode())
else:
p.body = body
h = [(k, from_list(v)) for k, v in serialized['headers'].items()]
p.headers = CaseInsensitiveDict(h)
p.method = serialized['method']
p.url = serialized['uri']
return p
def serialize_response(response, preserve_exact_body_bytes):
body = {'encoding': response.encoding}
add_body(response, preserve_exact_body_bytes, body)
header_map = HTTPHeaderDict(response.raw.headers)
headers = {}
for header_name in header_map.keys():
headers[header_name] = header_map.getlist(header_name)
return {
'body': body,
'headers': headers,
'status': {'code': response.status_code, 'message': response.reason},
'url': response.url,
}
def deserialize_response(serialized):
r = Response()
r.encoding = serialized['body']['encoding']
header_dict = HTTPHeaderDict()
for header_name, header_list in serialized['headers'].items():
if isinstance(header_list, list):
for header_value in header_list:
header_dict.add(header_name, header_value)
else:
header_dict.add(header_name, header_list)
r.headers = CaseInsensitiveDict(header_dict)
r.url = serialized.get('url', '')
if 'status' in serialized:
r.status_code = serialized['status']['code']
r.reason = serialized['status']['message']
else:
r.status_code = serialized['status_code']
r.reason = _codes[r.status_code][0].upper()
add_urllib3_response(serialized, r, header_dict)
return r
def add_urllib3_response(serialized, response, headers):
if 'base64_string' in serialized['body']:
body = io.BytesIO(
base64.b64decode(serialized['body']['base64_string'].encode())
)
else:
body = body_io(**serialized['body'])
h = HTTPResponse(
body,
status=response.status_code,
reason=response.reason,
headers=headers,
preload_content=False,
original_response=MockHTTPResponse(headers)
)
# NOTE(sigmavirus24):
# urllib3 updated it's chunked encoding handling which breaks on recorded
# responses. Since a recorded response cannot be streamed appropriately
# for this handling to work, we can preserve the integrity of the data in
# the response by forcing the chunked attribute to always be False.
# This isn't pretty, but it is much better than munging a response.
h.chunked = False
response.raw = h
def timestamp():
stamp = datetime.utcnow().isoformat()
try:
i = stamp.rindex('.')
except ValueError:
return stamp
else:
return stamp[:i]
_SENTINEL = object()
def _option_from(option, kwargs, defaults):
value = kwargs.get(option, _SENTINEL)
if value is _SENTINEL:
value = defaults.get(option)
return value
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
def tools_repositories():
excludes = native.existing_rules().keys()
if "buildifier" not in excludes:
http_file(
name = "buildifier",
executable = True,
sha256 = "4c985c883eafdde9c0e8cf3c8595b8bfdf32e77571c369bf8ddae83b042028d6",
urls = ["https://github.com/bazelbuild/buildtools/releases/download/0.29.0/buildifier"],
)
if "buildifier_osx" not in excludes:
http_file(
name = "buildifier_osx",
executable = True,
sha256 = "9b108decaa9a624fbac65285e529994088c5d15fecc1a30866afc03a48619245",
urls = ["https://github.com/bazelbuild/buildtools/releases/download/0.29.0/buildifier.mac"],
)
if "buildozer" not in excludes:
http_file(
name = "buildozer",
executable = True,
sha256 = "2a5c3e3390de07248704f21ed38495062fb623c9b0aef37deda257a917891ea6",
urls = ["https://github.com/bazelbuild/buildtools/releases/download/0.29.0/buildozer"],
)
if "buildozer_osx" not in excludes:
http_file(
name = "buildozer_osx",
executable = True,
sha256 = "316d24478f3be8a076b7901810dbfff79e305b3ac73a93b56f30a92950e5d0d0",
urls = ["https://github.com/bazelbuild/buildtools/releases/download/0.29.0/buildozer.mac"],
)
if "unused_deps" not in excludes:
http_file(
name = "unused_deps",
executable = True,
sha256 = "3562f6453eb433be5477b5d11c197ff95c7f359fa752c8a89e619af00da2c8fd",
urls = ["https://github.com/bazelbuild/buildtools/releases/download/0.29.0/unused_deps"],
)
if "unused_deps_osx" not in excludes:
http_file(
name = "unused_deps_osx",
executable = True,
sha256 = "b255362bacddb0b523a122efcc92a1c13f579cd09bce55bed905ef1b9d4a9514",
urls = ["https://github.com/bazelbuild/buildtools/releases/download/0.29.0/unused_deps.mac"],
)
if "shfmt" not in excludes:
http_file(
name = "shfmt",
executable = True,
sha256 = "86892020280d923976ecaaad1e7db372d37dce3cfaad44a7de986f7eb728eae7",
urls = ["https://github.com/mvdan/sh/releases/download/v3.0.1/shfmt_v3.0.1_linux_amd64"],
)
if "shfmt_osx" not in excludes:
http_file(
name = "shfmt_osx",
executable = True,
sha256 = "e470d216818a107078fbaf34807079c4857cb98610d67c96bf4dece43a56b66c",
urls = ["https://github.com/mvdan/sh/releases/download/v3.0.1/shfmt_v3.0.1_darwin_amd64"],
)
if "prototool" not in excludes:
http_file(
name = "prototool",
executable = True,
sha256 = "e1e3d81228f7d157d9476034dd1b21005926c96dcd8b7d220074ac071304545e",
urls = ["https://github.com/uber/prototool/releases/download/v1.9.0/prototool-Linux-x86_64"],
)
if "prototool_osx" not in excludes:
http_file(
name = "prototool_osx",
executable = True,
sha256 = "8cb668edbd51a13f057535d8ca7e4b0574fe7ae092338714b3d4b513cf188220",
urls = ["https://github.com/uber/prototool/releases/download/v1.9.0/prototool-Darwin-x86_64"],
)
if "helm" not in excludes:
http_archive(
name = "helm",
build_file = "@//:tools/helm/helm.BUILD",
sha256 = "538f85b4b73ac6160b30fd0ab4b510441aa3fa326593466e8bf7084a9c288420",
urls = ["https://get.helm.sh/helm-v3.4.1-linux-amd64.tar.gz"],
)
if "helm_osx" not in excludes:
http_archive(
name = "helm_osx",
build_file = "@//:tools/helm/helm.BUILD",
sha256 = "71d213d63e1b727d6640c4420aee769316f0a93168b96073d166edcd3a425b3d",
urls = ["https://get.helm.sh/helm-v3.4.1-darwin-amd64.tar.gz"],
)
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Lambda function to enables ultrawarm on ElasticSearch
@author iftik
"""
import base64
import json
import logging
import os
from datetime import datetime
from typing import Optional
import boto3
from elasticsearch import Elasticsearch
logger = logging.getLogger()
logger.setLevel(logging.INFO)
session = boto3.Session()
ssm_client = boto3.client('secretsmanager', region_name=os.environ['REGION'])
def lambda_handler(event, context):
es = get_elasticsearch_client()
my_indices = get_all_log_indices(es)
for index in my_indices:
migrating_index_to_warm(es, index, int(os.environ['HOT_INDEX_AGE_LIMIT']))
return
def migrating_index_to_warm(es, index, hot_index_age_limit):
logger.info("Evaluating index for ultrawarm migration: " + index)
result = es.indices.get(index=index)
flat_json = flatten_json(result)
is_write_index = json_search(flat_json, 'is_write_index')
creation_date = int(int(json_search(flat_json, 'creation_date')) / 1000)
box_type = json_search(flat_json, 'box_type')
if is_write_index != None and not is_write_index:
if box_type == None or box_type != 'warm':
index_age = datetime.now() - datetime.fromtimestamp(creation_date)
if index_age.days >= hot_index_age_limit:
logger.info("Migrating following index to warm: " + index)
es.indices.migrate_to_ultrawarm(index=index)
def flatten_json(nested_json):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(nested_json)
return out
def json_search(json_object, key):
for attribute, value in json_object.items():
if key in attribute:
return value
def get_all_log_indices(es):
result = []
print('Retrieving all indices...')
indices = es.cat.indices(params={
'format': 'json',
'h': 'index'
})
for index in indices:
index_name = index['index']
if index_name.startswith('my'):
result += [index_name]
return result
def get_elasticsearch_client():
# TODO: Prepare environment variable
es_endpoint = 'https://' + os.environ['ELASTICSEARCH_ENDPOINT']
# TODO: Prepare environment variable
es_credential_user = get_secret(os.environ['SSM_PATH'], "username")
es_credential_pass = get_secret(os.environ['SSM_PATH'], "password")
logger.info('Preparing ES client...')
es = Elasticsearch([es_endpoint], http_auth=(es_credential_user, es_credential_pass))
return es
def get_secret(secret_name: str, sec_key: str) -> Optional[str]:
get_secret_value_response = ssm_client.get_secret_value(SecretId=secret_name,
VersionStage="AWSCURRENT")
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secret = base64.b64decode(get_secret_value_response['SecretBinary'])
if sec_key:
secret = json.loads(secret)[sec_key]
return secret
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from cleverhans.attacks import FastGradientMethod
from cleverhans.model import CallableModelWrapper
import sys
import os
import os.path
import tensorflow as tf
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'labsheets', 'CIFAR10'))
import cifar10 as cf
FLAGS = tf.app.flags.FLAGS
# Runtime parameters
tf.app.flags.DEFINE_string('data-dir', os.getcwd() + '/dataset/',
'Directory where the dataset will be stored and checkpoint. (default: %(default)s)')
tf.app.flags.DEFINE_integer('max-steps', 10000,
'Number of mini-batches to train on. (default: %(default)d)')
tf.app.flags.DEFINE_integer('log-frequency', 10,
'Number of steps between logging results to the console and saving summaries (default: %(default)d)')
tf.app.flags.DEFINE_integer('save-model', 1000,
'Number of steps between model saves (default: %(default)d)')
# Optimisation hyperparameters
tf.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)')
tf.app.flags.DEFINE_float('learning-rate', 1e-3, 'Learning rate (default: %(default)d)')
tf.app.flags.DEFINE_float('learning-rate-decay', 0.8, 'Rate of decay of learning rate (default: %(default)d)')
tf.app.flags.DEFINE_float('fgsm-eps', 0.05, 'FSGM EPS value (default: %(default)d')
# Data parameters
tf.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)')
tf.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)')
tf.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)')
tf.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)')
tf.app.flags.DEFINE_string('log-dir', '{cwd}/logs/'.format(cwd=os.getcwd()),
'Directory where to write event logs and checkpoint. (default: %(default)s)')
run_log_dir = os.path.join(FLAGS.log_dir,
'Lab_5_{bs}_lr_{lr}_adv'.format(bs=FLAGS.batch_size,
lr=FLAGS.learning_rate))
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name='weights')
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name='biases')
xavier_initializer = tf.contrib.layers.xavier_initializer(uniform=True)
def deepnn(x_image, train):
"""deepnn builds the graph for a deep net for classifying CIFAR10 images.
Args:
x: an input tensor with the dimensions (N_examples, 3072), where 3072 is the
number of pixels in a standard CIFAR10 image.
Returns:
y: is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the object images into one of 10 classes
(airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck)
img_summary: a string tensor containing sampled input images.
"""
x_image = tf.cond(train, lambda: tf.map_fn(tf.image.random_flip_left_right, x_image), lambda: x_image)
x_image = tf.cond(train, lambda: tf.map_fn(lambda x: tf.image.random_brightness(x, 0.5), x_image), lambda: x_image)
conv1 = tf.layers.conv2d(
inputs=x_image,
filters=32,
kernel_size=[5,5],
kernel_initializer=xavier_initializer,
padding='same',
use_bias=True,
name='conv1'
)
conv1_bn = tf.nn.relu(tf.layers.batch_normalization(conv1, training=train, name="conv1_bn"))
pool1 = tf.layers.max_pooling2d(
inputs=conv1_bn,
pool_size=[2, 2],
strides=2,
name='pool1'
)
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5,5],
kernel_initializer=xavier_initializer,
padding='same',
use_bias=True,
name='conv2'
)
conv2_bn = tf.nn.relu(tf.layers.batch_normalization(conv2, training=train, name="conv2_bn"))
pool2 = tf.layers.max_pooling2d(
inputs=conv2_bn,
pool_size=[2, 2],
strides=2,
name='pool2'
)
v = tf.reshape(pool2, [-1, 4096])
fc1 = tf.layers.dense(
inputs=v,
units=1024,
activation=tf.nn.relu,
use_bias=True,
name='fc1'
)
fc2 = tf.layers.dense(
inputs=fc1,
units=1024,
activation=tf.nn.relu,
use_bias=True,
name='fc2'
)
out = tf.layers.dense(
inputs=fc2,
units=10,
activation=None,
use_bias=True,
name='out'
)
return out
def main(_):
tf.reset_default_graph()
# Import data
cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir)
cifar.preprocess()
with tf.variable_scope('inputs'):
# Create the model
x = tf.placeholder(tf.float32, [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes])
# Whether model is training
train = tf.placeholder(tf.bool, [])
# Reshape to use within a convolutional neural net. Last dimension is for
# 'features' - it would be 1 one for a grayscale image, 3 for an RGB image,
# 4 for RGBA, etc.
x_image = tf.reshape(x, [-1, FLAGS.img_width, FLAGS.img_height, FLAGS.img_channels])
with tf.variable_scope('model'):
# Build the graph for the deep net
y_conv = deepnn(x_image, train)
model = CallableModelWrapper(lambda _x: deepnn(_x, train), 'logits')
# Define your loss function - softmax_cross_entropy
with tf.variable_scope('x_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# Define your AdamOptimiser, using FLAGS.learning_rate to minimixe the loss function
decayed_learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, tf.Variable(0, trainable=False), 1000, FLAGS.learning_rate_decay)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy)
# calculate the prediction and the accuracy
accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1))
adv_accuracy, adv_acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1))
# summaries for TensorBoard visualisation
loss_summary = tf.summary.scalar('Loss', cross_entropy)
adv_loss_summary = tf.summary.scalar('Adversarial Loss', cross_entropy)
acc_summary = tf.summary.scalar('Accuracy', accuracy)
adv_acc_summary = tf.summary.scalar('Adv Accuracy', adv_accuracy)
image_summary = tf.summary.image('Test Images', x_image)
# saver for checkpoints
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.Session() as sess:
with tf.variable_scope('model', reuse=True):
fgsm = FastGradientMethod(model, sess=sess)
adv_image_op = fgsm.generate(x_image, eps=FLAGS.fgsm_eps, clip_min=0.0, clip_max=1.0)
summary_writer_train = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5)
summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5)
summary_writer_adversarial = tf.summary.FileWriter(run_log_dir + '_adversarial', sess.graph, flush_secs=5)
summary_writer_images = tf.summary.FileWriter(run_log_dir + '_images', sess.graph, flush_secs=5)
summary_writer_images_adversarial = tf.summary.FileWriter(run_log_dir + '_images_adversarial', sess.graph, flush_secs=5)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Training and validation
for step in range(FLAGS.max_steps):
# Training: Backpropagation using train set
(train_images, train_labels) = cifar.getTrainBatch()
(test_images, test_labels) = cifar.getTestBatch()
_, summary_str, train_images_adv = sess.run([optimiser, loss_summary, adv_image_op], feed_dict={x: train_images, y_: train_labels, train: True})
_, summary_str_adv = sess.run([optimiser, adv_loss_summary], feed_dict={x_image: train_images_adv, y_: train_labels, train: True})
if step % (FLAGS.log_frequency + 1) == 0:
summary_writer_train.add_summary(summary_str, step)
summary_writer_train.add_summary(summary_str_adv, step)
## Validation: Monitoring accuracy using validation set
if step % FLAGS.log_frequency == 0:
accuracy, summary_str, image_str, test_images_adv = sess.run([acc_op, acc_summary, image_summary, adv_image_op], feed_dict={x: test_images, y_: test_labels, train: False})
adv_accuracy, adv_summary_str, adv_image_str = sess.run([adv_acc_op, adv_acc_summary, image_summary], feed_dict={x_image: test_images_adv, y_: test_labels, train: False})
print('step %d, accuracy on validation batch: %g' % (step, accuracy))
summary_writer_validation.add_summary(summary_str, step)
summary_writer_images.add_summary(image_str)
summary_writer_adversarial.add_summary(adv_summary_str, step)
summary_writer_images_adversarial.add_summary(adv_image_str)
## Save the model checkpoint periodically.
if step % FLAGS.save_model == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(run_log_dir + '_train', 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
# Testing
# resetting the internal batch indexes
cifar.reset()
evaluated_images = 0
test_accuracy = 0
adv_test_accuracy = 0
batch_count = 0
# don't loop back when we reach the end of the test set
while evaluated_images != cifar.nTestSamples:
(testImages, testLabels) = cifar.getTestBatch(allowSmallerBatches=True)
test_accuracy_temp, _, adv_images = sess.run([acc_op, acc_summary, adv_image_op], feed_dict={x: testImages, y_: testLabels, train: False})
adv_test_accuracy_temp = sess.run(adv_acc_op, feed_dict={x_image: adv_images, y_: testLabels, train: False})
batch_count = batch_count + 1
test_accuracy = test_accuracy + test_accuracy_temp
adv_test_accuracy = adv_test_accuracy + adv_test_accuracy_temp
evaluated_images = evaluated_images + testLabels.shape[0]
test_accuracy = test_accuracy / batch_count
adv_test_accuracy = adv_test_accuracy / batch_count
print('test set: accuracy on test set: %0.3f' % test_accuracy)
print('test set: accuracy on adversarial test set: %0.3f' % adv_test_accuracy)
if __name__ == '__main__':
tf.app.run(main=main)
|
import pandas as pd
import pandas.io.data as web
from pandas.tseries.offsets import BDay
import numpy as np
from scipy.stats import itemfreq
today = pd.datetime.today()
yesterday = today - BDay(5000)
np.set_printoptions(precision=2)
np.set_printoptions(suppress=True)
def hist(x):
if (x>=1.0):
y = 1.05
else:
y = x
return (y//0.05)*0.05
class stock(object):
def __init__(self, stock="SPY", start=pd.datetime.today() - BDay(5000), end=pd.datetime.today()):
self.Ticker = stock
self.Start = start
self.End = end
self.Np = 0#number of gap up
self.Nm = 0#number of gap down
self.Cp = 0#number of filled gap up
self.Cm = 0#number of filled gap down
self.unfilledp = []#ranges of unfilled gap up
self.unfilledp_percent = []#% relative to previous day range
self.filledp = []#ranges of unfilled gap up
self.filledp_percent = []#% relative to previous day range
self.unfilledm = []#ranges of unfilled gap donw
self.unfilledm_percent = []#% relative to previous day range
self.filledm = []#ranges of unfilled gap donw
self.filledm_percent = []#% relative to previous day range
def findGaps(self):
p = web.DataReader(self.Ticker, "yahoo",self.Start,self.End)
for i in range(len(p)-1):
drange = p.at[p.index[i],'High'] - p.at[p.index[i],'Low']
if p.at[p.index[i+1],'Open'] > p.at[p.index[i],'High']:
#"Gap up!"
gap = float(p.at[p.index[i+1],'Open'] - p.at[p.index[i],'High'])
self.Np += 1
if p.at[p.index[i+1],'Low'] <= p.at[p.index[i],'High']:
#Filled
self.Cp += 1
self.filledp.append((p.index[i+1],gap))
self.filledp_percent.append(float(gap/drange))
else:
#Unfilled
self.unfilledp.append((p.index[i+1],gap))
self.unfilledp_percent.append(gap/drange)
if p.at[p.index[i+1],'Open'] < p.at[p.index[i],'Low']:
#"Gap down!"
gap = float(p.at[p.index[i],'Low'] - p.at[p.index[i+1],'Open'])
self.Nm += 1
if p.at[p.index[i+1],'High'] >= p.at[p.index[i],'Low']:
#Filled
self.Cm += 1
self.filledm.append((p.index[i+1],gap))
self.filledm_percent.append(float(gap/drange))
else:
#Unfilled
self.unfilledm.append((p.index[i+1],gap))
self.unfilledm_percent.append(gap/drange)
def generateHist(self):
temp = []
for x in self.unfilledp_percent:
temp.append(hist(x))
up = np.array(temp)
print "unfilled:"
print itemfreq(up)
ftemp = []
for x in self.filledp_percent:
ftemp.append(hist(x))
fup = np.array(ftemp)
print "filled:"
print itemfreq(fup)
def printStats(self):
print "# Gap + :", self.Np
print "# Gap + filled :", self.Cp
print "% Gap + filled :", float(self.Cp)/float(self.Np)
print "# Gap - :", self.Nm
print "# Gap - filled :", self.Cm
print "% Gap - filled :", float(self.Cm)/float(self.Nm)
print "Minimun range of unfilled gap up:",min(self.unfilledp),"(",min(self.unfilledp_percent),")"
print "Minimun range of unfilled gap down:",min(self.unfilledm),"(",min(self.unfilledm_percent),")"
print "Maximum range of unfilled gap up:",max(self.unfilledp),"(",max(self.unfilledp_percent),")"
print "Mamimum range of unfilled gap down:",max(self.unfilledm),"(",max(self.unfilledm_percent),")"
|
import re
import unittest
import urllib
from flask.ext.testing import TestCase
from leash import app, db, mail, views
from leash.models import Puppy, Shelter, User
from leash.forms import (
AccountForm,
EmailForm,
EmailPasswordForm,
PasswordForm,
PuppyForm,
PuppyProfileForm,
ShelterForm,
UserForm
)
from StringIO import StringIO
test_email = app.config['TEST_EMAIL']
class LeashTestCase(TestCase):
def create_app(self):
app.testing = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'
app.config['PRESERVE_CONTEXT_ON_EXCEPTION'] = False
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
app.config['WTF_CSRF_ENABLED'] = False
with app.app_context():
db.drop_all()
db.create_all()
self.db = db
user = User(
name='admin',
email='<EMAIL>',
password='password'
)
shelter = Shelter(
name='Swell Friendly Place'
)
puppy = Puppy(
name='Ralph',
gender='female',
picture='dog-187817_640.jpg',
shelter=shelter
)
db.session.add(user)
db.session.add(shelter)
db.session.add(puppy)
db.session.commit()
return app
def login(self, email, password, _next=None):
url = '/login/'
if _next:
url += '?next=' + _next
return self.client.post(
url,
data={
'email': email,
'password': password
},
follow_redirects=True
)
def test_get_user(self):
with app.app_context():
user = User.query.filter_by(email='<EMAIL>').one()
self.assertTrue(user.is_correct_password('password'))
def test_leash_home(self):
response = self.client.get('/')
self.assert_200(response)
self.assert_template_used('index.html')
def test_home_show_puppies(self):
shelter = Shelter.query.first()
response = self.client.get('/shelter/0/')
self.assert_200(response)
self.assert_template_used('shelters.html')
self.assertEqual(self.get_context_variable('shelter'), shelter.name)
self.assertEqual(self.get_context_variable('shelter_id'), shelter.id)
p_list = self.get_context_variable('puppies')
self.assertEqual(len(p_list), 1)
s_list = self.get_context_variable('shelters')
self.assertEqual(len(s_list), 1)
def test_home_shelter_info(self):
shelter = Shelter.query.first()
url = '/shelter/' + str(shelter.id) + '/info/'
response = self.client.get(url)
self.assert_200(response)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['Shelter']['name'], shelter.name)
def test_home_puppy_info(self):
puppy = Puppy.query.first()
url = '/puppy/' + str(puppy.id) + '/info/'
response = self.client.get(url)
self.assert_200(response)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['Puppy']['name'], puppy.name)
self.assertEqual(response.json['Puppy']['gender'], puppy.gender)
puppy_pic = '/media/' + puppy.picture
self.assertEqual(response.json['Puppy']['picture'], puppy_pic)
def test_home_edit_puppy_profile(self):
puppy = Puppy.query.first()
url = '/puppy/' + str(puppy.id) + '/edit_profile/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_200(response)
form = self.get_context_variable('form')
self.assertEqual(self.get_context_variable('puppy'), puppy)
self.assertEqual(type(form), PuppyProfileForm)
desc = 'One helluva dog'
needs = 'To be noted as one helluva dog'
response = self.client.post(
url,
data={
'description': desc,
'special_needs': needs
},
follow_redirects=True
)
self.assertIn('Puppy info saved.', response.data)
revised_puppy = Puppy.query.first()
self.assertEqual(revised_puppy.profile.description, desc)
self.assertEqual(revised_puppy.profile.special_needs, needs)
self.client.get('/logout/')
def test_home_adopt_puppy(self):
puppy = Puppy.query.first()
user = User.query.first()
url = '/puppy/' + str(puppy.id) + '/adopt/'
response = self.client.get(url)
self.assert_redirects(response, '/signup/?next=' + url)
with app.app_context():
response = self.login(user.email, 'password', url)
self.assert_template_used('account_not_activated.html')
url2 = '/activate/bad-token/'
response = self.client.get(url2)
self.assert_404(response)
url2 = '/activate/' + self.get_context_variable('token') + '/'
with mail.record_messages() as outbox:
response = self.client.get(url2)
self.assert_redirects(response, '/')
response = self.client.get('/')
self.assertIn('Account activation email sent.', response.data)
self.assertEqual(len(outbox), 1)
email = str(outbox[0])
link = re.search(r'/confirm/(.+)/\?next', email)
url2 = '/confirm/' + link.group(1) + '/'
response = self.client.get(url2)
self.assert_redirects(response, '/login/')
response = self.client.get('/login/')
self.assertIn(
'Thanks! Your account has been activated.', response.data)
self.client.get(url)
self.assert_template_used('adopt.html')
revised_puppy = Puppy.query.first()
self.assertEqual(revised_puppy.adopter[0], user)
self.assertEqual(self.get_context_variable('puppy'), revised_puppy)
self.client.get('/logout/')
def test_home_site_admin_shelter(self):
user = User.query.first()
shelter = Shelter.query.first()
url = '/site_admin/shelters/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
shelter2 = Shelter(name='<NAME>', operator=[user, ])
db.session.add(shelter2)
response = self.login(user.email, 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('site_admin.html')
self.assertEqual(
self.get_context_variable('admin_area'), 'shelters')
self.assertEqual(len(self.get_context_variable('obj_list')), 1)
self.assertEqual(
self.get_context_variable('obj_list')[0], shelter2)
self.assertEqual(len(self.get_context_variable('col_list')), 9)
self.assertEqual(
self.get_context_variable('add_object'), '/shelter/add/')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assertEqual(len(self.get_context_variable('obj_list')), 2)
self.assertEqual(
self.get_context_variable('obj_list')[0], shelter2)
self.assertEqual(self.get_context_variable('obj_list')[1], shelter)
self.client.get('/logout/')
def test_home_site_admin_puppy(self):
user = User.query.first()
puppy = Puppy.query.first()
url = '/site_admin/puppies/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
shelter2 = Shelter(name='<NAME>', operator=[user, ])
puppy2 = Puppy(
name='Jake',
gender='male',
picture='chihuahua-621112_640.jpg',
shelter=shelter2
)
db.session.add(shelter2)
db.session.add(puppy2)
response = self.login(user.email, 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('site_admin.html')
self.assertEqual(
self.get_context_variable('admin_area'), 'puppies')
self.assertEqual(len(self.get_context_variable('obj_list')), 1)
self.assertEqual(
self.get_context_variable('obj_list')[0], puppy2)
self.assertEqual(len(self.get_context_variable('col_list')), 6)
self.assertEqual(
self.get_context_variable('add_object'), '/puppy/add/')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assertEqual(len(self.get_context_variable('obj_list')), 2)
self.assertEqual(self.get_context_variable('obj_list')[0], puppy)
self.assertEqual(
self.get_context_variable('obj_list')[1], puppy2)
self.client.get('/logout/')
def test_home_site_admin_user(self):
user = User.query.first()
url = '/site_admin/users/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login(user.email, 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('site_admin.html')
self.assertEqual(
self.get_context_variable('admin_area'), 'users')
self.assertEqual(len(self.get_context_variable('obj_list')), 1)
self.assertEqual(self.get_context_variable('obj_list')[0], user)
self.assertEqual(
self.get_context_variable('add_object'), '/user/add/')
self.client.get('/logout/')
def test_home_edit_shelter(self):
shelter = Shelter.query.first()
user = User.query.first()
url = '/shelter/' + str(shelter.id) + '/edit/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
shelter.operator = [user, ]
db.session.add(shelter)
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('edit_shelter.html')
form = self.get_context_variable('form')
self.assertEqual(self.get_context_variable('shelter'), shelter)
self.assertEqual(self.get_context_variable('form_action'), url)
self.assertEqual(
self.get_context_variable('form_header'), 'Edit Shelter')
self.assertEqual(type(form), ShelterForm)
city = 'Xanadu'
state = 'CA'
response = self.client.post(
url,
data={
'name': shelter.name,
'city': city,
'state': state
},
follow_redirects=True
)
self.assertIn('Shelter info saved.', response.data)
revised_shelter = Shelter.query.first()
self.assertEqual(revised_shelter.city, city)
self.assertEqual(revised_shelter.state, state)
self.client.get('/logout/')
def test_home_add_shelter(self):
user = User.query.first()
url = '/shelter/add/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('edit_shelter.html')
form = self.get_context_variable('form')
self.assertEqual(self.get_context_variable('form_action'), url)
self.assertEqual(
self.get_context_variable('form_header'), 'Add Shelter')
self.assertEqual(type(form), ShelterForm)
s_name = '<NAME>'
city = 'Xanadu'
state = 'CA'
response = self.client.post(
url,
data={
'name': s_name,
'city': city,
'state': state
},
follow_redirects=True
)
self.assertIn('Shelter info saved.', response.data)
revised_shelter = Shelter.query.first()
self.assertEqual(revised_shelter.name, s_name)
self.assertEqual(revised_shelter.city, city)
self.assertEqual(revised_shelter.state, state)
self.client.get('/logout/')
def test_home_delete_shelter(self):
user = User.query.first()
shelter2 = Shelter(name='<NAME>')
db.session.add(shelter2)
db.session.commit()
url = '/shelter/' + str(shelter2.id) + '/delete/'
url2 = '/site_admin/shelters/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assert_redirects(response, url2)
response = self.client.get(url2)
self.assertIn('Shelter deleted.', response.data)
shelters = Shelter.query.all()
self.assertEqual(len(shelters), 1)
self.assertNotEqual(shelters[0], shelter2)
self.client.get('/logout/')
def test_home_edit_puppy(self):
puppy = Puppy.query.first()
shelter = Shelter.query.first()
user = User.query.first()
url = '/puppy/' + str(puppy.id) + '/edit/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
shelter.operator = [user, ]
db.session.add(shelter)
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('edit_puppy.html')
form = self.get_context_variable('form')
self.assertEqual(self.get_context_variable('puppy'), puppy)
self.assertEqual(self.get_context_variable('form_action'), url)
self.assertEqual(
self.get_context_variable('form_header'), 'Edit Puppy')
self.assertEqual(
self.get_context_variable('breed_choices'),
app.config['DOG_BREEDS']
)
self.assertEqual(type(form), PuppyForm)
weight = 7.2
desc = 'A real peach'
picture = (StringIO('Cute dog photo'), 'dog.png')
needs = 'To ride in handbags'
response = self.client.post(
url,
data={
'name': puppy.name,
'weight': weight,
'picture': picture,
'description': desc,
'special_needs': needs
},
follow_redirects=True
)
self.assertIn('Puppy info saved.', response.data)
revised_puppy = Puppy.query.first()
self.assertEqual(float(revised_puppy.weight), weight)
self.assertEqual(revised_puppy.profile.description, desc)
self.assertEqual(revised_puppy.profile.special_needs, needs)
self.client.get('/logout/')
def test_home_add_puppy(self):
user = User.query.first()
shelter = Shelter.query.first()
shelter.operator = [user, ]
db.session.add(shelter)
url = '/puppy/add/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('edit_puppy.html')
form = self.get_context_variable('form')
self.assertEqual(self.get_context_variable('form_action'), url)
self.assertEqual(
self.get_context_variable('form_header'), 'Add Puppy')
self.assertEqual(type(form), PuppyForm)
p_name = 'Jake'
gender = 'male'
picture = (StringIO('Cute dog photo'), 'dog.png')
response = self.client.post(
url,
data={
'name': p_name,
'gender': gender,
'picture': picture,
'shelter': shelter.id
},
follow_redirects=True
)
self.assertIn('Puppy info saved.', response.data)
puppy = Puppy.query.filter_by(name='Jake').one()
self.assertEqual(puppy.name, p_name)
self.assertEqual(puppy.gender, gender)
self.assertIn('_dog.png', puppy.picture)
self.client.get('/logout/')
def test_home_delete_puppy(self):
user = User.query.first()
puppy2 = Puppy(name='Balthasar', gender='female', picture='dog.png')
db.session.add(puppy2)
db.session.commit()
url = '/puppy/' + str(puppy2.id) + '/delete/'
url2 = '/site_admin/puppies/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assert_redirects(response, url2)
response = self.client.get(url2)
self.assertIn('Puppy deleted.', response.data)
puppies = Puppy.query.all()
self.assertEqual(len(puppies), 1)
self.assertNotEqual(puppies[0], puppy2)
self.client.get('/logout/')
def test_home_edit_user(self):
user = User.query.first()
url = '/user/' + str(user.id) + '/edit/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('edit_user.html')
form = self.get_context_variable('form')
self.assertEqual(self.get_context_variable('user'), user)
self.assertEqual(self.get_context_variable('form_action'), url)
self.assertEqual(
self.get_context_variable('form_header'), 'Edit User')
self.assertEqual(type(form), UserForm)
u_name = '<NAME>'
response = self.client.post(
url,
data={
'name': u_name,
'email': user.email,
'password': 'password'
},
follow_redirects=True
)
self.assertIn('User info saved.', response.data)
revised_user = User.query.first()
self.assertEqual(revised_user.name, u_name)
self.client.get('/logout/')
def test_home_add_user(self):
user = User.query.first()
url = '/user/add/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('edit_user.html')
form = self.get_context_variable('form')
self.assertEqual(self.get_context_variable('form_action'), url)
self.assertEqual(
self.get_context_variable('form_header'), 'Add User')
self.assertEqual(type(form), UserForm)
u_name = '<NAME>'
email = '<EMAIL>'
password = '<PASSWORD>'
role = 'default'
picture = None
response = self.client.post(
url,
data={
'name': u_name,
'email': email,
'password': password,
'role': role,
'picture': picture
},
follow_redirects=True
)
self.assertIn('User saved.', response.data)
user2 = User.query.filter_by(name='<NAME>').one()
self.assertEqual(user2.name, u_name)
self.assertEqual(user2.email, email)
self.assertTrue(user2.is_correct_password('<PASSWORD>'))
self.client.get('/logout/')
def test_home_delete_user(self):
user = User.query.first()
user2 = User(
name='<NAME>', email='<EMAIL>', password='A')
db.session.add(user2)
db.session.commit()
url = '/user/' + str(user2.id) + '/delete/'
url2 = '/site_admin/users/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assert_redirects(response, url2)
response = self.client.get(url2)
self.assertIn('User deleted.', response.data)
users = User.query.all()
self.assertEqual(len(users), 1)
self.assertEqual(users[0], user)
self.assertNotEqual(users[0], user2)
self.client.get('/logout/')
def test_home_distribute_puppies(self):
user = User.query.first()
shelter2 = Shelter(name='Shiny Happy Place')
shelter3 = Shelter(name='Super Awesome Place')
puppy2 = Puppy(
name='Alice',
gender='female',
picture='dog2.png',
shelter=shelter2
)
puppy3 = Puppy(
name='Benny',
gender='male',
picture='dog3.png',
shelter=shelter2
)
puppy4 = Puppy(
name='Cosmo',
gender='male',
picture='dog4.png',
shelter=shelter2
)
puppy5 = Puppy(
name='Diana',
gender='female',
picture='dog5.png',
shelter=shelter2
)
puppy6 = Puppy(
name='Elise',
gender='female',
picture='dog6.png',
shelter=shelter2
)
db.session.add(shelter2)
db.session.add(shelter3)
db.session.add(puppy2)
db.session.add(puppy3)
db.session.add(puppy4)
db.session.add(puppy5)
db.session.add(puppy6)
url = '/puppy/distribute/'
url2 = '/site_admin/shelters/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
response = self.login('<EMAIL>', 'password', url)
self.assert_template_used('index.html')
user.role = 'operator'
db.session.add(user)
response = self.client.get(url)
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
response = self.client.get(url)
self.assert_redirects(response, url2)
msg = 'Puppies distributed. Consolatory chew toys '
msg += 'have been provided.'
response = self.client.get(url2)
self.assertIn(msg, response.data)
s = Shelter.query.filter_by(name='Swell Friendly Place').one()
self.assertEqual(s.puppy_count, 2)
s = Shelter.query.filter_by(name='Shiny Happy Place').one()
self.assertEqual(s.puppy_count, 2)
s = Shelter.query.filter_by(name='Super Awesome Place').one()
self.assertEqual(s.puppy_count, 2)
self.client.get('/logout/')
def test_auth_signup(self):
url = '/signup/'
response = self.client.get(url)
self.assert_template_used('signup.html')
form = self.get_context_variable('form')
self.assertEqual(type(form), EmailPasswordForm)
response = self.client.post(
url,
data={
'email': test_email,
'password': 'password'
},
follow_redirects=True
)
self.assert_200(response)
self.assert_template_used('index.html')
u = User.query.filter_by(email=test_email).one()
self.assertTrue(u.is_correct_password('password'))
self.assertFalse(u.email_confirmed)
def test_auth_send_email_confirmation(self):
user = User(email=test_email, password='password')
db.session.add(user)
response = self.client.get('/confirm/bad-token/')
self.assert_404(response)
with mail.record_messages() as outbox:
views.auth.send_email_confirmation(user)
self.assertEqual(len(outbox), 1)
email = str(outbox[0])
link = re.search(r'/confirm/(.+)/\?next', email)
url = '/confirm/' + link.group(1) + '/'
response = self.client.get(url)
self.assert_redirects(response, '/login/')
response = self.client.get('/login/')
self.assertIn(
'Thanks! Your account has been activated.', response.data)
def test_auth_login(self):
user = User.query.first()
url = '/login/'
response = self.client.get(url)
form = self.get_context_variable('form')
self.assert_200(response)
self.assertEqual(type(form), EmailPasswordForm)
self.assert_template_used('login.html')
response = self.client.post(
url,
data={
'email': user.email,
'password': '<PASSWORD>'
}
)
self.assert_redirects(response, '/login/')
response = self.client.post(
url,
data={
'email': user.email,
'password': 'password'
}
)
self.assert_redirects(response, '/')
response = self.client.post(
url + '?next=/shelter/0/',
data={
'email': user.email,
'password': 'password'
}
)
self.assert_redirects(response, '/shelter/0/')
def test_auth_logout(self):
user = User.query.first()
with app.app_context():
self.login(user.email, 'password')
self.assert_template_used('index.html')
user.role = 'admin'
db.session.add(user)
url = '/site_admin/shelters/'
response = self.client.get(url)
self.assert_200(response)
self.assert_template_used('site_admin.html')
response = self.client.get('/logout/')
self.assert_redirects(response, '/')
url = '/site_admin/shelters/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
def test_auth_change_password(self):
user = User.query.first()
url = '/change_password/'
response = self.client.get(url)
self.assert_redirects(
response, '/login/?next=' + urllib.quote_plus(url))
with app.app_context():
self.login(user.email, 'password')
self.assert_template_used('index.html')
response = self.client.get(url)
form = self.get_context_variable('form')
self.assert_200(response)
self.assert_template_used('change_password.html')
self.assertEqual(type(form), PasswordForm)
response = self.client.post(
url,
data={
'password': '<PASSWORD>!'
},
follow_redirects=True
)
self.assert_template_used('account_profile.html')
self.assertIn('Password changed successfully.', response.data)
self.client.get('/logout/')
self.login(user.email, '<PASSWORD>!')
self.assert_template_used('index.html')
self.client.get('/logout/')
def test_auth_reset_password(self):
user = User.query.first()
url = '/reset_password/'
response = self.client.get(url)
form = self.get_context_variable('form')
self.assert_200(response)
self.assert_template_used('reset_password.html')
self.assertEqual(type(form), EmailForm)
response = self.client.post(
url,
data={
'email': user.email
},
follow_redirects=True
)
self.assert_template_used('account_not_activated.html')
user.email_confirmed = True
db.session.add(user)
with mail.record_messages() as outbox:
response = self.client.post(
url,
data={
'email': user.email
},
follow_redirects=True
)
self.assertEqual(len(outbox), 1)
email = str(outbox[0])
link = re.search(r'/reset_confirm/(.+)/">', email)
url = '/reset_confirm/' + link.group(1) + '/'
response = self.client.get(url)
form = self.get_context_variable('form')
self.assert_template_used('new_password.html')
self.assertEqual(type(form), PasswordForm)
response = self.client.post(
url,
data={
'password': '<PASSWORD>'
},
follow_redirects=True
)
self.assert_template_used('login.html')
msg = 'Password changed successfully. Please '
msg += 'login with your new password.'
self.assertIn(msg, response.data)
self.login(user.email, 'NewPassword')
self.assert_template_used('index.html')
self.client.get('/logout/')
def test_auth_confirm_password_reset(self):
response = self.client.get('/reset_confirm/bad-token/')
self.assert_404(response)
def test_auth_view_account(self):
user = User.query.first()
puppy = Puppy.query.first()
puppy.adopter = [user, ]
db.session.add(puppy)
with app.app_context():
self.login(user.email, 'password')
url = '/account/'
response = self.client.get(url)
form = self.get_context_variable('form')
self.assert_200(response)
self.assert_template_used('account_profile.html')
self.assertEqual(type(form), AccountForm)
self.assertEqual(form.name.data, user.name)
self.assertEqual(form.email.data, user.email)
p_list = self.get_context_variable('puppies')
self.assertEqual(p_list[0], puppy)
u_name = 'Admin User'
u_email = '<EMAIL>'
response = self.client.post(
url,
data={
'name': u_name,
'email': u_email
},
follow_redirects=True
)
form = self.get_context_variable('form')
self.assert_template_used('account_profile.html')
self.assertIn('Account info changed successfully.', response.data)
self.assertEqual(form.name.data, u_name)
self.assertEqual(form.email.data, u_email)
revised_user = User.query.first()
self.assertEqual(revised_user.name, u_name)
self.assertEqual(revised_user.email, u_email)
self.client.get('/logout/')
if __name__ == '__main__':
unittest.main()
|
<filename>test/cli/package_command/pkg_info/test_pkg_info_s.py
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
# -*- coding:utf-8 -*-
"""
test get single source package info
"""
from pathlib import Path
from requests import RequestException, Response
from packageship.application.cli.commands.singlepkg import SingleCommand
from packageship.application.common.exc import ElasticSearchQueryException
from test.cli import DATA_BASE_INFO
from test.cli.package_command import PackageTestBase
MOCK_DATA_FOLDER = Path(Path(__file__).parent, "mock_data")
EXPECTED_DATA_FOLDER = Path(Path(__file__).parent, "mock_data", "expected_data")
class TestSinglePackage(PackageTestBase):
"""
class for test single package
"""
cmd_class = SingleCommand
def test_true_params(self):
"""test true params"""
self.excepted_str = self.read_file_content(
"src_true_params.txt",
folder=EXPECTED_DATA_FOLDER,
is_json=False)
self.command_params = ["Judy", "os-version", "-s"]
self.mock_es_search(side_effect=self.read_file_content(
"pkg_info_s.json",
folder=MOCK_DATA_FOLDER))
self.assert_result()
def test_wrong_dbs(self):
"""test wrong dbs"""
self.excepted_str = """
ERROR_CONTENT :Request parameter error
HINT :Please check the parameter is valid and query again"""
self.command_params = ["Judy", "version123", "-s"]
self.mock_es_search(side_effect=self.read_file_content(
"pkg_info_s.json",
folder=MOCK_DATA_FOLDER))
self.assert_result()
def test_not_exists_package(self):
"""test not exists package"""
self.excepted_str = """
ERROR_CONTENT :The querying package does not exist in the databases
HINT :Use the correct package name and try again"""
self.command_params = ["Judy", "os-version", "-s"]
single_package_not_exists_info = self.read_file_content("pkg_info_s.json",
folder=MOCK_DATA_FOLDER)[:1]
single_package_not_exists_info.append({})
self.mock_es_search(side_effect=single_package_not_exists_info)
self.assert_result()
def test_empty_subpack_for_src(self):
"""test empty subpack for src"""
def generate_empty_subpack_data():
"""generate empty subpack data"""
empty_requires_single_src = self.read_file_content(
"pkg_info_s.json",
folder=MOCK_DATA_FOLDER)
empty_requires_single_src[1]["hits"]["hits"][0]["_source"]["subpacks"] = []
return empty_requires_single_src
self.excepted_str = self.read_file_content(
"src_empty_subpack.txt",
folder=EXPECTED_DATA_FOLDER,
is_json=False)
self.command_params = ["Judy", "os-version", "-s"]
self.mock_es_search(side_effect=generate_empty_subpack_data())
self.assert_result()
def test_empty_provides_requires(self):
"""test empty provides requires"""
self.excepted_str = self.read_file_content(
"src_empty_provides_requires.txt",
folder=EXPECTED_DATA_FOLDER,
is_json=False)
self.command_params = ["Judy", "os-version", "-s"]
empty_requires_single_src = self.read_file_content("empty_provides_requires.json",
folder=MOCK_DATA_FOLDER)
self.mock_es_search(side_effect=empty_requires_single_src)
self.assert_result()
def test_error_single_src_package(self):
"""test error single src package"""
self.excepted_str = """
ERROR_CONTENT :The querying package does not exist in the databases
HINT :Use the correct package name and try again
"""
self.command_params = ["Judy", "os-version", "-s"]
error_single_src_info = self.read_file_content("pkg_info_s.json",
folder=MOCK_DATA_FOLDER)
error_single_src_info[1] = {None}
self.mock_es_search(side_effect=error_single_src_info)
self.assert_result()
def test_raise_es_error(self):
"""test_raise_es_error"""
self.command_params = ["Judy", "os-version", "-s"]
self.mock_es_search(side_effect=[DATA_BASE_INFO, ElasticSearchQueryException])
self.excepted_str = """
ERROR_CONTENT :Failed to Connect the database
HINT :Check the connection
"""
self.assert_result()
def test_request_raise_requestexception(self):
"""test_request_raise_requestexception"""
self.command_params = ["Judy", "os-version", "-s"]
self.mock_es_search(side_effect=self.read_file_content("pkg_info_s.json",
folder=MOCK_DATA_FOLDER))
self.excepted_str = """
ERROR_CONTENT :
HINT :The remote connection is abnormal, please check the 'remote_host' parameter value to ensure the connectivity of the remote address
"""
self.mock_requests_get(side_effect=[RequestException])
self.assert_result()
def test_request_text_raise_jsonerror(self):
"""test_request_text_raise_jsonerror"""
self.command_params = ["Judy", "os-version", "-s"]
class Resp:
text = """{"test":\'123\',}"""
status_code = 200
self.excepted_str = """
ERROR_CONTENT :{"test":'123',}
HINT :The content is not a legal json format,please check the parameters is valid
"""
self.mock_requests_get(return_value=Resp())
self.assert_result()
def test_request_status_429(self):
"""test_request_status_429"""
class Resp:
status_code = 429
self.command_params = ["Judy", "os-version", "-s"]
self.excepted_str = """
Too many requests in a short time, please request again later
"""
self.mock_requests_get(return_value=Resp())
self.assert_result()
def test_request_status_500(self):
"""test_request_status_500"""
self.command_params = ["Judy", "os-version", "-s"]
self.excepted_str = """
ERROR_CONTENT :500 Server Error: None for url: None
HINT :The remote connection is abnormal, please check the 'remote_host' parameter value to ensure the connectivity of the remote address
"""
r = Response()
r.status_code = 500
self.mock_requests_get(return_value=r)
self.assert_result()
|
"""Script for plotting the results of the 'suite' benchmark.
Invoke without parameters for usage hints.
:Author: <NAME>
:Date: 2010-06-01
"""
from __future__ import print_function
import matplotlib as mpl
from pylab import *
KB_ = 1024
MB_ = 1024*KB_
GB_ = 1024*MB_
NCHUNKS = 128 # keep in sync with bench.c
linewidth=2
#markers= ['+', ',', 'o', '.', 's', 'v', 'x', '>', '<', '^']
#markers= [ 'x', '+', 'o', 's', 'v', '^', '>', '<', ]
markers= [ 's', 'o', 'v', '^', '+', 'x', '>', '<', '.', ',' ]
markersize = 8
def get_values(filename):
f = open(filename)
values = {"memcpyw": [], "memcpyr": []}
for line in f:
if line.startswith('-->'):
tmp = line.split('-->')[1]
parts = tmp.split(', ')
nthreads, size, elsize, sbits, codec, shuffle = parts[:6]
nthreads, size, elsize, sbits = map(int, (nthreads, size, elsize, sbits))
values["size"] = size / MB_
values["elsize"] = elsize
values["sbits"] = sbits
values["codec"] = codec
values["shuffle"] = shuffle
# New run for nthreads
(ratios, speedsw, speedsr) = ([], [], [])
# Add a new entry for (ratios, speedw, speedr)
values[nthreads] = (ratios, speedsw, speedsr)
#print("-->", nthreads, size, elsize, sbits)
elif line.startswith('memcpy(write):'):
tmp = line.split(',')[1]
memcpyw = float(tmp.split(' ')[1])
values["memcpyw"].append(memcpyw / 1024)
elif line.startswith('memcpy(read):'):
tmp = line.split(',')[1]
memcpyr = float(tmp.split(' ')[1])
values["memcpyr"].append(memcpyr / 1024)
elif line.startswith('comp(write):'):
tmp = line.split(',')[1]
speedw = float(tmp.split(' ')[1])
ratio = float(line.split(':')[-1])
speedsw.append(speedw / 1024)
ratios.append(ratio)
elif line.startswith('decomp(read):'):
tmp = line.split(',')[1]
speedr = float(tmp.split(' ')[1])
speedsr.append(speedr / 1024)
if "OK" not in line:
print("WARNING! OK not found in decomp line!")
f.close()
return nthreads, values
def show_plot(plots, yaxis, legends, gtitle, xmax=None, ymax=None):
xlabel('Compresssion ratio')
ylabel('Speed (GB/s)')
title(gtitle)
xlim(0, xmax)
ylim(0, ymax)
grid(True)
# legends = [f[f.find('-'):f.index('.out')] for f in filenames]
# legends = [l.replace('-', ' ') for l in legends]
#legend([p[0] for p in plots], legends, loc = "upper left")
legend([p[0] for p in plots
if not isinstance(p, mpl.lines.Line2D)],
legends, loc = "best")
#subplots_adjust(bottom=0.2, top=None, wspace=0.2, hspace=0.2)
if outfile:
print("Saving plot to:", outfile)
savefig(outfile, dpi=64)
else:
show()
if __name__ == '__main__':
from optparse import OptionParser
usage = "usage: %prog [-r] [-o outfile] [-t title ] [-d|-c] filename"
compress_title = 'Compression speed'
decompress_title = 'Decompression speed'
yaxis = 'No axis name'
parser = OptionParser(usage=usage)
parser.add_option('-o',
'--outfile',
dest='outfile',
help=('filename for output (many extensions '
'supported, e.g. .png, .jpg, .pdf)'))
parser.add_option('-t',
'--title',
dest='title',
help='title of the plot',)
parser.add_option('-l',
'--limit',
dest='limit',
help='expression to limit number of threads shown',)
parser.add_option('-x',
'--xmax',
dest='xmax',
help='limit the x-axis',
default=None)
parser.add_option('-y',
'--ymax',
dest='ymax',
help='limit the y-axis',
default=None)
parser.add_option('-r', '--report', action='store_true',
dest='report',
help='generate file for reporting ',
default=False)
parser.add_option('-d', '--decompress', action='store_true',
dest='dspeed',
help='plot decompression data',
default=False)
parser.add_option('-c', '--compress', action='store_true',
dest='cspeed',
help='plot compression data',
default=False)
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("No input arguments")
elif len(args) > 1:
parser.error("Too many input arguments")
else:
pass
if options.report and options.outfile:
parser.error("Can only select one of [-r, -o]")
if options.dspeed and options.cspeed:
parser.error("Can only select one of [-d, -c]")
elif options.cspeed:
options.dspeed = False
plot_title = compress_title
else: # either neither or dspeed
options.dspeed = True
plot_title = decompress_title
filename = args[0]
cspeed = options.cspeed
dspeed = options.dspeed
if options.outfile:
outfile = options.outfile
elif options.report:
if cspeed:
outfile = filename[:filename.rindex('.')] + '-compr.png'
else:
outfile = filename[:filename.rindex('.')] + '-decompr.png'
else:
outfile = None
plots = []
legends = []
nthreads, values = get_values(filename)
#print("Values:", values)
if options.limit:
thread_range = eval(options.limit)
else:
thread_range = range(1, nthreads+1)
if options.title:
plot_title = options.title
else:
plot_title += " (%(size).1f MB, %(elsize)d bytes, %(sbits)d bits), %(codec)s %(shuffle)s" % values
gtitle = plot_title
for nt in thread_range:
#print("Values for %s threads --> %s" % (nt, values[nt]))
(ratios, speedw, speedr) = values[nt]
if cspeed:
speed = speedw
else:
speed = speedr
#plot_ = semilogx(ratios, speed, linewidth=2)
plot_ = plot(ratios, speed, linewidth=2)
plots.append(plot_)
nmarker = nt
if nt >= len(markers):
nmarker = nt%len(markers)
setp(plot_, marker=markers[nmarker], markersize=markersize,
linewidth=linewidth)
legends.append("%d threads" % nt)
# Add memcpy lines
if cspeed:
mean = np.mean(values["memcpyw"])
message = "memcpy (write to memory)"
else:
mean = np.mean(values["memcpyr"])
message = "memcpy (read from memory)"
plot_ = axhline(mean, linewidth=3, linestyle='-.', color='black')
text(4.0, mean+.4, message)
plots.append(plot_)
show_plot(plots, yaxis, legends, gtitle,
xmax=int(options.xmax) if options.xmax else None,
ymax=int(options.ymax) if options.ymax else None)
|
import os
import sys
import math
import argparse
import numpy as np
from tqdm import tqdm
import torch
from torch.multiprocessing import Queue, Process
sys.path.insert(0, '../lib')
sys.path.insert(0, '../model')
# from data.CrowdHuman import CrowdHuman
from data.CrowdHuman_json import CrowdHuman
from utils import misc_utils, nms_utils
from evaluate import compute_JI, compute_APMR
from evaluate import compute_MMR
from det_oprs.bbox_opr import Pointlist_dis, matcher
from scipy.optimize import linear_sum_assignment
MAX_VAL = 8e6
def eval_all(args, config, network):
# model_path
saveDir = os.path.join('../model', args.model_dir, config.model_dir)
evalDir = os.path.join('../model', args.model_dir, config.eval_dir)
misc_utils.ensure_dir(evalDir)
if 'pth' not in args.resume_weights:
model_file = os.path.join(saveDir,
'dump-{}.pth'.format(args.resume_weights))
else:
model_file = args.resume_weights
assert os.path.exists(model_file)
# get devices
str_devices = args.devices
devices = misc_utils.device_parser(str_devices)
# load data
crowdhuman = CrowdHuman(config, if_train=False)
#crowdhuman.records = crowdhuman.records[:10]
# multiprocessing
num_devs = len(devices)
len_dataset = len(crowdhuman)
num_image = math.ceil(len_dataset / num_devs)
result_queue = Queue(500)
result_queue_match = Queue(500)
procs = []
all_results = []
all_results_match = []
for i in range(num_devs):
start = i * num_image
end = min(start + num_image, len_dataset)
if config.network == 'pos':
proc = Process(target=inference_pos, args=(
config, network, model_file, devices[i], crowdhuman, start, end, result_queue, result_queue_match))
else:
proc = Process(target=inference_bfj, args=(
config, network, model_file, devices[i], crowdhuman, start, end, result_queue, result_queue_match))
proc.start()
procs.append(proc)
pbar = tqdm(total=len_dataset, ncols=50)
for i in range(len_dataset):
t = result_queue.get()
all_results.append(t)
t_match = result_queue_match.get()
all_results_match.extend(t_match)
pbar.update(1)
pbar.close()
for p in procs:
p.join()
# fpath = os.path.join(evalDir, 'dump-{}.json'.format(args.resume_weights))
fpath = os.path.join(evalDir, 'dump-{}.json'.format(30))
misc_utils.save_json_lines(all_results, fpath)
fpath_match = os.path.join(evalDir, 'bf_match_bbox.json')
misc_utils.save_json(all_results_match, fpath_match)
# evaluation
# res_line, JI = compute_JI.evaluation_all(fpath, 'box')
print('processing body...')
AP, MR = compute_APMR.compute_APMR(fpath, config.eval_source, 'box')
line = 'BODY-->AP:{:.4f}, MR:{:.4f}.'.format(AP, MR)
print(line)
print('processing face...')
AP, MR = compute_APMR.compute_APMR(fpath, config.eval_source, 'box', if_face=True)
line = 'FACE-->AP:{:.4f}, MR:{:.4f}.'.format(AP, MR)
print(line)
MMR = compute_MMR.compute_MMR(fpath_match, config.eval_source)
def inference_pos(config, network, model_file, device, dataset, start, end, result_queue, result_queue_match):
torch.set_default_tensor_type('torch.FloatTensor')
torch.multiprocessing.set_sharing_strategy('file_system')
# init model
net = network()
net.cuda(device)
net = net.eval()
check_point = torch.load(model_file)
net.load_state_dict(check_point['state_dict'])
# init data
dataset.records = dataset.records[start:end];
data_iter = torch.utils.data.DataLoader(dataset=dataset, shuffle=False)
# inference
for (image, gt_boxes, im_info, ID, image_id) in data_iter:
pred_boxes, class_num = net(image.cuda(device), im_info.cuda(device))
scale = im_info[0, 2]
if config.test_nms_method == 'set_nms':
assert pred_boxes.shape[-1] > 6, "Not EMD Network! Using normal_nms instead."
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
top_k = pred_boxes.shape[-1] // 6
n = pred_boxes.shape[0]
pred_boxes = pred_boxes.reshape(-1, 6)
idents = np.tile(np.arange(n)[:,None], (1, top_k)).reshape(-1, 1)
pred_boxes = np.hstack((pred_boxes, idents))
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
result = []
for classid in range(class_num):
keep = pred_boxes[:, 5] == (classid + 1)
class_boxes = pred_boxes[keep]
keep = nms_utils.set_cpu_nms(class_boxes, 0.5)
class_boxes = class_boxes[keep]
result.append(class_boxes)
pred_boxes = np.vstack(result)
elif config.test_nms_method == 'normal_nms':
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 6)
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
result = []
for classid in range(class_num):
keep = pred_boxes[:, 5] == (classid + 1)
class_boxes = pred_boxes[keep]
keep = nms_utils.cpu_nms(class_boxes, config.test_nms)
class_boxes = class_boxes[keep]
result.append(class_boxes)
pred_boxes = np.vstack(result)
elif config.test_nms_method == 'none':
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 6)
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
else:
raise ValueError('Unknown NMS method.')
#if pred_boxes.shape[0] > config.detection_per_image and \
# config.test_nms_method != 'none':
# order = np.argsort(-pred_boxes[:, 4])
# order = order[:config.detection_per_image]
# pred_boxes = pred_boxes[order]
# recovery the scale
pred_boxes[:, :4] /= scale
pred_boxes[:, 2:4] -= pred_boxes[:, :2]
gt_boxes = gt_boxes[0].numpy()
gt_boxes[:, 2:4] -= gt_boxes[:, :2]
match_result = match_body_face_pos(pred_boxes, image_id)
result_dict = dict(ID=ID[0], height=int(im_info[0, -3]), width=int(im_info[0, -2]),
dtboxes=boxes_dump(pred_boxes), gtboxes=boxes_dump(gt_boxes))
result_queue.put_nowait(result_dict)
result_queue_match.put_nowait(match_result)
def inference_bfj(config, network, model_file, device, dataset, start, end, result_queue, result_queue_match):
torch.set_default_tensor_type('torch.FloatTensor')
torch.multiprocessing.set_sharing_strategy('file_system')
# init model
net = network()
net.cuda(device)
net = net.eval()
check_point = torch.load(model_file)
net.load_state_dict(check_point['state_dict'])
# init data
dataset.records = dataset.records[start:end];
data_iter = torch.utils.data.DataLoader(dataset=dataset, shuffle=False)
# inference
for (image, gt_boxes, im_info, ID, image_id) in data_iter:
pred_boxes, pred_emb, class_num = net(image.cuda(device), im_info.cuda(device))
scale = im_info[0, 2]
if config.test_nms_method == 'set_nms':
assert pred_boxes.shape[-1] > 6, "Not EMD Network! Using normal_nms instead."
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
top_k = pred_boxes.shape[-1] // 6
n = pred_boxes.shape[0]
pred_boxes = pred_boxes.reshape(-1, 6)
idents = np.tile(np.arange(n)[:,None], (1, top_k)).reshape(-1, 1)
pred_boxes = np.hstack((pred_boxes, idents))
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
keep = nms_utils.set_cpu_nms(pred_boxes, 0.5)
pred_boxes = pred_boxes[keep]
elif config.test_nms_method == 'normal_nms':
assert pred_boxes.shape[-1] % 8 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 8)
pred_emb = pred_emb.reshape(-1, 32)
keep = pred_boxes[:, 6] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
pred_emb = pred_emb[keep]
result = []
result_emb = []
for classid in range(class_num):
keep = pred_boxes[:, 7] == (classid + 1)
class_boxes = pred_boxes[keep]
class_emb = pred_emb[keep]
keep = nms_utils.cpu_nms(class_boxes, config.test_nms)
class_boxes = class_boxes[keep]
class_emb = class_emb[keep]
result.append(class_boxes)
result_emb.append(class_emb)
pred_boxes = np.vstack(result)
pred_emb = np.vstack(result_emb)
elif config.test_nms_method == 'none':
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 6)
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
else:
raise ValueError('Unknown NMS method.')
#if pred_boxes.shape[0] > config.detection_per_image and \
# config.test_nms_method != 'none':
# order = np.argsort(-pred_boxes[:, 4])
# order = order[:config.detection_per_image]
# pred_boxes = pred_boxes[order]
# recovery the scale
pred_boxes[:, :6] /= scale
pred_boxes[:, 2:4] -= pred_boxes[:, :2]
gt_boxes = gt_boxes[0].numpy()
gt_boxes[:, 2:4] -= gt_boxes[:, :2]
match_result = match_body_face_bfj(pred_boxes, pred_emb, image_id)
# match_result = match_body_face_bfj(pred_boxes, image_id)
result_dict = dict(ID=ID[0], height=int(im_info[0, -3]), width=int(im_info[0, -2]),
dtboxes=boxes_dump(pred_boxes, pred_emb), gtboxes=boxes_dump(gt_boxes))
result_queue.put_nowait(result_dict)
result_queue_match.put_nowait(match_result)
def match_body_face_bfj(pred_boxes, pred_emb, image_id):
keep_body = pred_boxes[:, 7] == 1
keep_face = pred_boxes[:, 7] == 2
body_boxes = pred_boxes[keep_body]
body_embs = pred_emb[keep_body]
face_boxes = pred_boxes[keep_face]
face_embs = pred_emb[keep_face]
wof_flag=False
if len(face_boxes) == 0:
wof_flag = True
base_body_boxes = body_boxes[:, :4]
base_body_scores = body_boxes[:, 6]
base_body_hooks = body_boxes[:, 4:6]
base_face_boxes = face_boxes[:, :4]
base_face_scores = face_boxes[:, 6]
base_face_hooks = face_boxes[:, 4:6]
inds_conf_base_body = (base_body_scores > 0.3).nonzero()
if not inds_conf_base_body[0].size:
inds_conf_base_body = np.argmax(base_body_scores)[None]
wof_flag = True
inds_conf_base_face = (base_face_scores > 0.3).nonzero()
if not inds_conf_base_face[0].size and (not wof_flag):
inds_conf_base_face = np.argmax(base_face_scores)[None]
wof_flag = True
base_body_boxes = base_body_boxes[inds_conf_base_body]
base_body_hooks = base_body_hooks[inds_conf_base_body]
base_body_scores = base_body_scores[inds_conf_base_body]
base_body_embeddings = body_embs[inds_conf_base_body]
if not wof_flag:
base_face_boxes = base_face_boxes[inds_conf_base_face]
base_face_scores = base_face_scores[inds_conf_base_face]
base_face_hooks = base_face_hooks[inds_conf_base_face]
base_face_embeddings = face_embs[inds_conf_base_face]
if wof_flag:
face_boxes = np.zeros_like(base_body_boxes)
face_scores = np.zeros_like(base_body_scores)
else:
score_matrix = (base_face_scores[:, None] + base_body_scores) / 2
distance_matrix = Pointlist_dis(base_face_hooks, base_body_hooks, base_body_boxes)
embedding_matrix = np.sqrt(np.square(base_face_embeddings[:, None] - base_body_embeddings).sum(-1))
distance_matrix_max = np.max(distance_matrix, axis=0)
distance_matrix = distance_matrix / distance_matrix_max
embedding_matrix_max = np.max(embedding_matrix, axis=0)
embedding_matrix = embedding_matrix / embedding_matrix_max
match_merge_matrix = distance_matrix * score_matrix * score_matrix + embedding_matrix * (1 - score_matrix * score_matrix)
match_merge_matrix = np.exp(-match_merge_matrix)
matched_vals = np.max(match_merge_matrix, axis=0)
matched_indices = np.argmax(match_merge_matrix, axis=0)
ignore_indices = (matched_vals < 0.98).nonzero()
dummy_tensor = np.array([0.0, 0.0, 0.0, 0.0])
face_boxes = base_face_boxes[matched_indices]
face_scores = base_face_scores[matched_indices]
if ignore_indices[0].size:
face_boxes[ignore_indices] = dummy_tensor
face_scores[ignore_indices] = 0
bodylist = np.hstack((base_body_boxes, base_body_scores[:, None]))
facelist = np.hstack((face_boxes, face_scores[:, None]))
result = []
for body, face in zip(bodylist, facelist):
body = body.tolist()
face = face.tolist()
content = {
'image_id': int(image_id),
'category_id': 1,
'bbox':[round(i, 1) for i in body[:4]],
'score':round(float(body[4]), 5),
'f_bbox':[round(i, 1) for i in face[:4]],
'f_score':round(float(face[4]), 5)
}
result.append(content)
return result
def match_body_face_pos(pred_boxes, image_id):
keep_body = pred_boxes[:, 5] == 1
keep_face = pred_boxes[:, 5] == 2
body_boxes = pred_boxes[keep_body]
face_boxes = pred_boxes[keep_face]
wof_flag=False
if len(face_boxes) == 0:
wof_flag = True
base_body_boxes = body_boxes[:, :4]
base_body_scores = body_boxes[:, 4]
base_face_boxes = face_boxes[:, :4]
base_face_scores = face_boxes[:, 4]
inds_conf_base_body = (base_body_scores > 0.3).nonzero()
if not inds_conf_base_body[0].size:
inds_conf_base_body = np.argmax(base_body_scores)[None]
wof_flag = True
inds_conf_base_face = (base_face_scores > 0.3).nonzero()
if not inds_conf_base_face[0].size and (not wof_flag):
inds_conf_base_face = np.argmax(base_face_scores)[None]
wof_flag = True
base_body_boxes = base_body_boxes[inds_conf_base_body]
base_body_scores = base_body_scores[inds_conf_base_body]
if not wof_flag:
base_face_boxes = base_face_boxes[inds_conf_base_face]
base_face_scores = base_face_scores[inds_conf_base_face]
if wof_flag:
face_boxes = np.zeros_like(base_body_boxes)
face_scores = np.zeros_like(base_body_scores)
else:
body_face_distance_matrix = cal_body_face_distance_matrix(base_body_boxes, base_face_boxes)
base_body_boxes_filter = []
base_body_scores_filter = []
base_face_boxes_filter = []
base_face_scores_filter = []
body_row_idxs, face_col_idxs = linear_sum_assignment(body_face_distance_matrix)
for body_idx in body_row_idxs:
f_idx = np.where(body_row_idxs == body_idx)[0][0]
col_face_idx = face_col_idxs[f_idx]
if body_face_distance_matrix[body_idx, col_face_idx] != MAX_VAL:
# for body_idx in body_row_idxs:
# f_idx = np.where(body_row_idxs == body_idx)[0][0]
# col_face_idx = face_col_idxs[f_idx]
# if body_face_distance_matrix[body_idx, col_face_idx] != MAX_VAL:
base_body_boxes_filter.append(base_body_boxes[body_idx])
base_body_scores_filter.append(base_body_scores[body_idx])
base_face_boxes_filter.append(base_face_boxes[col_face_idx])
base_face_scores_filter.append(base_face_scores[col_face_idx])
if base_body_boxes_filter == []:
face_boxes = np.zeros_like(base_body_boxes)
face_scores = np.zeros_like(base_body_scores)
wof_flag = True
else:
base_body_boxes = np.vstack(base_body_boxes_filter)
base_body_scores = np.hstack(base_body_scores_filter)
face_boxes = np.vstack(base_face_boxes_filter)
face_scores = np.hstack(base_face_scores_filter)
bodylist = np.hstack((base_body_boxes, base_body_scores[:, None]))
facelist = np.hstack((face_boxes, face_scores[:, None]))
result = []
for body, face in zip(bodylist, facelist):
body = body.tolist()
face = face.tolist()
content = {
'image_id': int(image_id),
'category_id': 1,
'bbox':[round(i, 1) for i in body[:4]],
'score':round(float(body[4]), 5),
'f_bbox':[round(i, 1) for i in face[:4]],
'f_score':round(float(face[4]), 5)
}
result.append(content)
return result
def cal_body_face_distance_matrix(body_boxes, face_boxes):
body_boxes_nums = len(body_boxes)
face_boxes_nums = len(face_boxes)
body_face_distance_matrix = np.zeros((body_boxes_nums, face_boxes_nums))
for body_idx in range(body_boxes_nums):
body_box = body_boxes[body_idx]
for face_idx in range(face_boxes_nums):
face_box = face_boxes[face_idx]
face_iou_in_body = one_side_iou(face_box, body_box)
if face_iou_in_body > 0.2:
body_face_distance_matrix[body_idx, face_idx] = 1 / face_iou_in_body
else:
body_face_distance_matrix[body_idx, face_idx] = MAX_VAL
return body_face_distance_matrix
def one_side_iou(box1, box2):
# 1. to corner box
# box1[2:4] = box1[0:2] + box1[2:4]
# box2[2:4] = box2[0:2] + box2[2:4]
x1 = max(box1[0], box2[0])
x2 = min(box1[2] + box1[0], box2[2] + box2[0])
y1 = max(box1[1], box2[1])
y2 = min(box1[3] + box1[1], box2[3] + box2[1])
intersection = max(x2 - x1, 0) * max(y2 - y1, 0)
# a1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
a1 = box1[2] * box1[3]
iou = intersection / a1 # intersection over box 1
return iou
def boxes_dump(boxes, embs=None):
if boxes.shape[-1] == 8: # v2 or v3
if embs is not None:
result = [{'box':[round(i, 1) for i in box[:6].tolist()],
'score':round(float(box[6]), 5),
'tag':int(box[7]),
'emb':emb.tolist()} for box, emb in zip(boxes, embs)]
else:
result = [{'box':[round(i, 1) for i in box[:4].tolist()],
'score':round(float(box[6]), 5),
'tag':int(box[7])} for box in boxes]
elif boxes.shape[-1] == 7:
result = [{'box':[round(i, 1) for i in box[:4]],
'score':round(float(box[4]), 5),
'tag':int(box[5]),
'proposal_num':int(box[6])} for box in boxes]
elif boxes.shape[-1] == 6: # v1
result = [{'box':[round(i, 1) for i in box[:4].tolist()],
'score':round(float(box[4]), 5),
'tag':int(box[5])} for box in boxes]
elif boxes.shape[-1] == 5:
result = [{'box':[round(i, 1) for i in box[:4]],
'tag':int(box[4])} for box in boxes]
else:
raise ValueError('Unknown box dim.')
return result
def run_test():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', '-md', default=None, required=True, type=str)
parser.add_argument('--config', '-c', default=None,required=True,type=str)
parser.add_argument('--resume_weights', '-r', default=None, required=True, type=str)
parser.add_argument('--devices', '-d', default='0', type=str)
os.environ['NCCL_IB_DISABLE'] = '1'
args = parser.parse_args()
# import libs
model_root_dir = os.path.join('../model/', args.model_dir)
sys.path.insert(0, model_root_dir)
if args.config == 'pos':
from config_pos import config
elif args.config == 'bfj':
from config_bfj import config
else:
raise Exception('Error - only support for bfj or pos.')
if config.network == 'pos':
from network_pos import Network
elif config.network == 'bfj':
from network_bfj import Network
else:
raise Exception('Error - only support for bfj or pos.')
eval_all(args, config, Network)
if __name__ == '__main__':
run_test()
|
<filename>tests/test_population.py<gh_stars>100-1000
from time import sleep, time
import os
from copy import copy
from pytest import raises, mark
from random import random, choices, seed
from evol import Population, ContestPopulation
from evol.helpers.groups import group_duplicate, group_stratified
from evol.helpers.pickers import pick_random
from evol.population import Contest
class TestPopulationSimple:
def test_filter_works(self, simple_chromosomes, simple_evaluation_function):
pop = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
assert len(pop.filter(func=lambda i: random() > 0.5)) < 200
def test_population_init(self, simple_chromosomes):
pop = Population(simple_chromosomes, eval_function=lambda x: x)
assert len(pop) == len(simple_chromosomes)
assert pop.intended_size == len(pop)
def test_population_generate(self, simple_evaluation_function):
def init_func():
return 1
pop = Population.generate(init_function=init_func, eval_function=simple_evaluation_function, size=200)
assert len(pop) == 200
assert pop.intended_size == 200
assert pop.individuals[0].chromosome == 1
def test_is_evaluated(self, any_population):
assert not any_population.is_evaluated
assert any_population.evaluate().is_evaluated
class TestPopulationCopy:
def test_population_copy(self, any_population):
copied_population = copy(any_population)
for key in any_population.__dict__.keys():
if key not in ('id', 'individuals'):
assert copied_population.__dict__[key] == any_population.__dict__[key]
def test_population_is_evaluated(self, any_population):
evaluated_population = any_population.evaluate()
copied_population = copy(evaluated_population)
assert evaluated_population.is_evaluated
assert copied_population.is_evaluated
class TestPopulationEvaluate:
cpus = os.cpu_count()
latency = 0.005
def test_individuals_are_not_initially_evaluated(self, any_population):
assert all([i.fitness is None for i in any_population])
def test_evaluate_lambda(self, simple_chromosomes):
# without concurrency (note that I'm abusing a boolean operator to introduce some latency)
pop = Population(simple_chromosomes, eval_function=lambda x: (sleep(self.latency) or x))
t0 = time()
pop.evaluate()
t1 = time()
single_proc_time = t1 - t0
for individual in pop:
assert individual.chromosome == individual.fitness
# with concurrency
pop = Population(simple_chromosomes, eval_function=lambda x: (sleep(self.latency) or x),
concurrent_workers=self.cpus)
t0 = time()
pop.evaluate()
t1 = time()
multi_proc_time = t1 - t0
for individual in pop:
assert individual.chromosome == individual.fitness
if self.cpus > 1:
assert multi_proc_time < single_proc_time
def test_evaluate_func(self, simple_chromosomes):
def evaluation_function(x):
sleep(self.latency)
return x * x
pop = Population(simple_chromosomes, eval_function=evaluation_function)
t0 = time()
pop.evaluate()
t1 = time()
single_proc_time = t1 - t0
for individual in pop:
assert evaluation_function(individual.chromosome) == individual.fitness
# with concurrency
pop = Population(simple_chromosomes, eval_function=evaluation_function, concurrent_workers=self.cpus)
t0 = time()
pop.evaluate()
t1 = time()
multi_proc_time = t1 - t0
for individual in pop:
assert evaluation_function(individual.chromosome) == individual.fitness
if self.cpus > 1:
assert multi_proc_time < single_proc_time
def test_evaluate_lazy(self, any_population):
pop = any_population
pop.evaluate(lazy=True) # should evaluate
def raise_function(_):
raise Exception
pop.eval_function = raise_function
pop.evaluate(lazy=True) # should not evaluate
with raises(Exception):
pop.evaluate(lazy=False)
class TestPopulationSurvive:
def test_survive_n_works(self, simple_chromosomes, simple_evaluation_function):
pop1 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
pop2 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
pop3 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
assert len(pop1) == len(simple_chromosomes)
assert len(pop2.survive(n=50)) == 50
assert len(pop3.survive(n=75, luck=True)) == 75
def test_survive_p_works(self, simple_chromosomes, simple_evaluation_function):
pop1 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
pop2 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
pop3 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
assert len(pop1) == len(simple_chromosomes)
assert len(pop2.survive(fraction=0.5)) == len(simple_chromosomes) * 0.5
assert len(pop3.survive(fraction=0.1, luck=True)) == len(simple_chromosomes) * 0.1
def test_survive_n_and_p_works(self, simple_evaluation_function):
chromosomes = list(range(200))
pop1 = Population(chromosomes=chromosomes, eval_function=simple_evaluation_function)
pop2 = Population(chromosomes=chromosomes, eval_function=simple_evaluation_function)
pop3 = Population(chromosomes=chromosomes, eval_function=simple_evaluation_function)
assert len(pop1.survive(fraction=0.5, n=200)) == 100
assert len(pop2.survive(fraction=0.9, n=10)) == 10
assert len(pop3.survive(fraction=0.5, n=190, luck=True)) == 100
def test_breed_increases_generation(self, any_population):
assert any_population.breed(parent_picker=pick_random, combiner=lambda mom, dad: mom).generation == 1
def test_survive_throws_correct_errors(self, any_population):
"""If the resulting population is zero or larger than initial we need to see errors."""
with raises(RuntimeError):
any_population.survive(n=0)
with raises(ValueError):
any_population.survive(n=250)
with raises(ValueError):
any_population.survive()
class TestPopulationBreed:
def test_breed_amount_works(self, simple_chromosomes, simple_evaluation_function):
pop1 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
pop1.survive(n=50).breed(parent_picker=lambda population: choices(population, k=2),
combiner=lambda mom, dad: (mom + dad) / 2)
assert len(pop1) == len(simple_chromosomes)
pop2 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
pop2.survive(n=50).breed(parent_picker=lambda population: choices(population, k=2),
combiner=lambda mom, dad: (mom + dad) / 2, population_size=400)
assert len(pop2) == 400
assert pop2.intended_size == 400
assert pop1.generation == 1
assert pop2.generation == 1
def test_breed_works_with_kwargs(self, simple_chromosomes, simple_evaluation_function):
pop1 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
pop1.survive(n=50).breed(parent_picker=pick_random,
combiner=lambda mom, dad: (mom + dad) / 2,
n_parents=2)
assert len(pop1) == len(simple_chromosomes)
pop2 = Population(chromosomes=simple_chromosomes, eval_function=simple_evaluation_function)
pop2.survive(n=50).breed(parent_picker=pick_random,
combiner=lambda *parents: sum(parents)/len(parents),
population_size=400, n_parents=3)
assert len(pop2) == 400
assert pop2.intended_size == 400
def test_breed_raises_with_multiple_values_for_kwarg(self, simple_population):
(simple_population
.survive(fraction=0.5)
.breed(parent_picker=pick_random,
combiner=lambda x, y: x + y))
with raises(TypeError):
(simple_population
.survive(fraction=0.5)
.breed(parent_picker=pick_random,
combiner=lambda x, y: x + y, y=2))
class TestPopulationMutate:
def test_mutate_lambda(self):
pop = Population([1]*100, eval_function=lambda x: x).mutate(lambda x: x+1)
for chromosome in pop.chromosomes:
assert chromosome == 2
assert len(pop) == 100
def test_mutate_inplace(self):
pop = Population([1]*100, eval_function=lambda x: x)
pop.mutate(lambda x: x+1)
for chromosome in pop.chromosomes:
assert chromosome == 2
def test_mutate_func(self):
def mutate_func(x):
return -x
population = Population([1]*100, eval_function=lambda x: x)
population.mutate(mutate_func)
for chromosome in population.chromosomes:
assert chromosome == -1
assert len(population) == 100
def test_mutate_probability(self):
seed(0)
pop = Population([1]*100, eval_function=lambda x: x).mutate(lambda x: x+1, probability=0.5).evaluate()
assert min(individual.chromosome for individual in pop.individuals) == 1
assert max(individual.chromosome for individual in pop.individuals) == 2
assert pop.current_best.fitness == 2
assert pop.documented_best.fitness == 2
assert len(pop) == 100
def test_mutate_zero_probability(self):
pop = Population([1]*100, eval_function=lambda x: x).mutate(lambda x: x+1, probability=0)
for chromosome in pop.chromosomes:
assert chromosome == 1
def test_mutate_func_kwargs(self):
def mutate_func(x, y=0):
return x+y
pop = Population([1]*100, eval_function=lambda x: x).mutate(mutate_func, y=16)
for chromosome in pop.chromosomes:
assert chromosome == 17
def test_mutate_elitist(self):
pop = Population([1, 1, 3], eval_function=lambda x: x).evaluate().mutate(lambda x: x + 1, elitist=True)
for chromosome in pop.chromosomes:
assert chromosome > 1
assert len(pop) == 3
class TestPopulationWeights:
def test_weights(self, simple_chromosomes, simple_evaluation_function):
for maximize in (False, True):
pop = Population(chromosomes=simple_chromosomes,
eval_function=simple_evaluation_function, maximize=maximize)
with raises(RuntimeError):
assert min(pop._individual_weights) >= 0
pop.evaluate()
assert max(pop._individual_weights) == 1
assert min(pop._individual_weights) == 0
if maximize:
assert pop._individual_weights[0] == 0
else:
assert pop._individual_weights[0] == 1
class TestPopulationBest:
def test_current_best(self, simple_chromosomes):
for maximize, best in ((True, max(simple_chromosomes)), (False, min(simple_chromosomes))):
pop = Population(chromosomes=simple_chromosomes, eval_function=float, maximize=maximize)
assert pop.current_best is None
pop.evaluate()
assert pop.current_best.chromosome == best
def test_current_worst(self, simple_chromosomes):
for maximize, worst in ((False, max(simple_chromosomes)), (True, min(simple_chromosomes))):
pop = Population(chromosomes=simple_chromosomes, eval_function=float, maximize=maximize)
assert pop.current_worst is None
pop.evaluate()
assert pop.current_worst.chromosome == worst
def test_mutate_resets(self):
pop = Population(chromosomes=[1, 1, 1], eval_function=float, maximize=True)
assert pop.current_best is None and pop.current_worst is None
pop.evaluate()
assert pop.current_best.fitness == 1 and pop.current_worst.fitness == 1
pop.mutate(lambda x: x)
assert pop.current_best is None and pop.current_worst is None
def test_documented_best(self):
pop = Population(chromosomes=[100, 100, 100], eval_function=lambda x: x*2, maximize=True)
assert pop.documented_best is None
pop.evaluate()
assert pop.documented_best.fitness == pop.current_best.fitness
pop.mutate(mutate_function=lambda x: x - 10, probability=1).evaluate()
assert pop.documented_best.fitness - 20 == pop.current_best.fitness
class TestPopulationIslands:
@mark.parametrize('n_groups', [1, 2, 3, 4])
def test_groups(self, simple_population, n_groups):
groups = simple_population.group(group_duplicate, n_groups=n_groups)
assert len(groups) == n_groups
assert type(groups) == list
assert all(type(group) is Population for group in groups)
def test_no_groups(self, simple_population):
with raises(ValueError):
simple_population.group(group_duplicate, n_groups=0)
def test_empty_group(self, simple_population):
def rogue_grouping_function(*args):
return [[1, 2, 3], []]
with raises(ValueError):
simple_population.group(rogue_grouping_function)
@mark.parametrize('result, error', [
(['a', 'b', 'c'], TypeError),
([None, None], TypeError),
([10, 100, 1000], IndexError)
])
def test_invalid_group(self, simple_population, result, error):
def rogue_grouping_function(*args):
return [result]
with raises(error):
simple_population.group(rogue_grouping_function)
def test_not_evaluated(self, simple_population):
with raises(RuntimeError):
simple_population.group(group_stratified, n_groups=3)
def test_combine(self, simple_population):
groups = simple_population.evaluate().group(group_stratified, n_groups=3)
combined = Population.combine(*groups)
assert combined.intended_size == simple_population.intended_size
def test_combine_nothing(self):
with raises(ValueError):
Population.combine()
class TestContest:
def test_assign_score(self, simple_individuals):
contest = Contest(simple_individuals)
contest.assign_scores(range(len(simple_individuals)))
for score, individual in zip(range(len(simple_individuals)), simple_individuals):
assert individual.fitness == score
@mark.parametrize('individuals_per_contest,contests_per_round', [(2, 1), (5, 1), (7, 1), (2, 5), (5, 4), (3, 3)])
def test_generate_n_contests(self, simple_individuals, individuals_per_contest, contests_per_round):
contests = Contest.generate(simple_individuals, contests_per_round=contests_per_round,
individuals_per_contest=individuals_per_contest)
for contest in contests:
contest.assign_scores([1]*individuals_per_contest) # Now the fitness equals the number of contests played
# All individuals competed in the same number of contests
assert len({individual.fitness for individual in simple_individuals}) == 1
# The number of contests is _at least_ contests_per_round
assert all([individual.fitness >= contests_per_round for individual in simple_individuals])
# The number of contests is smaller than contests_per_round + individuals_per_contest
assert all([individual.fitness < contests_per_round + individuals_per_contest
for individual in simple_individuals])
class TestContestPopulation:
def test_init(self):
cp = ContestPopulation([0, 1, 2], lambda x: x, contests_per_round=15, individuals_per_contest=15)
assert cp.contests_per_round == 15
assert cp.individuals_per_contest == 15
class TestContestPopulationBest:
def test_no_documented(self):
pop = ContestPopulation([0, 1, 2], lambda x, y: [0, 0], contests_per_round=100, individuals_per_contest=2)
pop.evaluate()
assert pop.documented_best is None
# with concurrency
pop = ContestPopulation([0, 1, 2], lambda x, y: [0, 0], contests_per_round=100, individuals_per_contest=2,
concurrent_workers=3)
pop.evaluate()
assert pop.documented_best is None
pop = ContestPopulation([0, 1, 2],
lambda x, y: [x, y],
contests_per_round=100, individuals_per_contest=2)
pop.evaluate()
assert pop.documented_best is None
# with concurrency
pop = ContestPopulation([0, 1, 2],
lambda x, y: [x, y],
contests_per_round=100, individuals_per_contest=2,
concurrent_workers=3)
pop.evaluate()
assert pop.documented_best is None
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# <https://apache.org/licenses/LICENSE-2.0>.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a heavily modified version of V8's tool at:
# https://chromium.googlesource.com/v8/v8/+/master/tools/bigint-tester.py
# Original license:
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file [1].
#
# [1] https://chromium.googlesource.com/v8/v8/+/master/LICENSE
import argparse
import math
import multiprocessing
import os
import random
import subprocess
import sys
import tempfile
# Configuration.
kChars = "0123456789ABCDEF"
kBase = 16
kLineLengthBig = 70 # A bit less than 80.
kLineLengthSmall = 16 # Generating 64-bit values.
kNumInputsGenerate = 100
kNumInputsStress = 1000
# Internally used sentinels.
kNo = 0
kYes = 1
kRandom = 2
TEST_HEADER = """
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the “License”);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// <https://apache.org/licenses/LICENSE-2.0>.
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an “AS IS” BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import JSB from '../jsbigint.mjs';
const JSBigInt = JSB.BigInt;
let errorCount = 0;
const dataBigNative = [];
const dataBigJsb = [];
const dataSmallNative = [];
const dataSmallJsb = [];
function hexParseN(s) {
if (s.charCodeAt(0) === 0x2D) return -BigInt(s.slice(1));
return BigInt(s);
}
function parseNative(d) {
if (d.b) {
return {a: hexParseN(d.a), b: hexParseN(d.b), r: hexParseN(d.r)};
}
return {a: hexParseN(d.a), r: hexParseN(d.r)};
}
function hexParse(s) {
if (s.charCodeAt(0) === 0x2D) {
const result = JSBigInt(s.slice(1));
result.sign = true;
return result;
}
return JSBigInt(s);
}
function parseJSB(d) {
if (d.b) {
return {a: hexParse(d.a), b: hexParse(d.b), r: hexParse(d.r)};
}
return {a: hexParse(d.a), r: hexParse(d.r)};
}
function prepareData(src, dstNative, dstJsb) {
for (let i = 0; i < src.length; i++) {
const d = src[i];
dstNative.push(parseNative(d));
dstJsb.push(parseJSB(d));
}
}
prepareData(dataBig, dataBigNative, dataBigJsb);
prepareData(dataSmall, dataSmallNative, dataSmallJsb);
"""
TEST_BODY = """
function test%(variant)s(data, reps) {
for (let i = 0; i < data.length; i++) {
const d = data[i];
let r = %(testop)s;
if (%(comparison)s) {%(inputs)s
console.log('Result: ' + r.toString(16));
console.log('Expected: ' + d.r.toString(16));
console.log('Op: %(opname)s');
errorCount++;
}
}
let r;
const t1 = Date.now();
for (let j = 0; j < reps; j++) {
for (let i = 0; i < data.length; i++) {
const d = data[i];
r = %(testop)s;
}
}
return Date.now() - t1;
}
"""
INPUT_PRINTER_BINARY = """
console.log('Input A: ' + d.a.toString(16));
console.log('Input B: ' + d.b.toString(16));"""
INPUT_PRINTER_UNARY = """
console.log('Input: ' + d.a.toString(16));"""
TEST_FOOTER = """
const kRepsBig = %d;
const kRepsSmall = %d;
console.log('Native/big: ' + testNative(dataBigNative, kRepsBig));
console.log('JSB/big: ' + testJsb(dataBigJsb, kRepsBig));
console.log('Native/small: ' + testNative(dataSmallNative, kRepsSmall));
console.log('JSB/small: ' + testJsb(dataSmallJsb, kRepsSmall));
if (errorCount !== 0) {
console.error('Finished with ' + errorCount + ' errors.');
// Support both d8 (`quit`) and Node.js (`process.exit`).
(typeof quit === undefined ? process.exit : quit)(1);
}"""
def GenRandom(length, negative=kRandom):
if length == 0: return "'0x0'"
s = ["'"]
if negative == kYes or (negative == kRandom and (random.randint(0, 1) == 0)):
s.append("-") # 50% chance of negative.
s.append("0x")
s.append(kChars[random.randint(1, kBase - 1)]) # No leading zero.
for i in range(1, length):
s.append(kChars[random.randint(0, kBase - 1)])
s.append("'")
return "".join(s)
def Parse(x):
if x.startswith("'0x"):
return int(x[3:-1], kBase)
if x.startswith("'-0x"):
return -int(x[4:-1], kBase)
return int(x, kBase)
def Format(x):
original = x
negative = False
if x == 0: return "'0x0'"
if x < 0:
negative = True
x = -x
s = ""
while x > 0:
s = kChars[x % kBase] + s
x = x / kBase
s = "0x" + s
if negative:
s = "-" + s
assert Parse(s) == original
return "'" + s + "'"
class TestGenerator(object):
# Subclasses must implement these.
# Returns a JSON snippet defining inputs and expected output for one test.
def EmitOne(self, line_length): raise NotImplementedError
def GetTestOpNative(self): raise NotImplementedError
def GetTestOpJSB(self): raise NotImplementedError
def GetOpString(self): raise NotImplementedError
def GetInputPrinter(self): raise NotImplementedError
def EmitHeader(self):
return TEST_HEADER
def EmitFooter(self):
return TEST_FOOTER % (self.GetReps(), 3 * self.GetReps())
def EmitData(self, count):
big = []
small = []
for i in range(count):
big.append(self.EmitOne(kLineLengthBig))
small.append(self.EmitOne(kLineLengthSmall))
return ("const dataBig = [" + ", ".join(big) + "];\n\nconst dataSmall = [" +
", ".join(small) + "];")
def EmitTestBodyNative(self):
return TEST_BODY % {"variant": "Native",
"testop": self.GetTestOpNative(),
"comparison": "d.r !== r",
"inputs": self.GetInputPrinter(),
"opname": self.GetOpString()}
def EmitTestBodyJSB(self):
return TEST_BODY % {"variant": "Jsb",
"testop": self.GetTestOpJSB(),
"comparison": "!r.equal(d.r)",
"inputs": self.GetInputPrinter(),
"opname": self.GetOpMethod()}
def PrintTest(self, count):
print(self.EmitData(count))
print(self.EmitHeader())
print(self.EmitTestBodyNative())
print(self.EmitTestBodyJSB())
print(self.EmitFooter())
def RunTest(self, count, binary):
try:
fd, path = tempfile.mkstemp(suffix=".mjs", prefix="bigint-test-")
with open(path, "w") as f:
f.write(self.EmitData(count))
f.write(self.EmitHeader())
f.write(self.EmitTestBodyJSB())
f.write(self.EmitFooter())
return subprocess.call("%s %s" % (binary, path), shell=True)
finally:
os.close(fd)
os.remove(path)
class UnaryOp(TestGenerator):
# Subclasses must implement these two.
def GetOpString(self): raise NotImplementedError
def GenerateResult(self, x): raise NotImplementedError
# Subclasses may override this:
def GenerateInput(self, line_length):
min_length = 0 if line_length != kLineLengthBig else line_length / 2;
return GenRandom(random.randint(min_length, line_length))
def GetReps(self): return 40000
# Subclasses should not override anything below.
def EmitOne(self, line_length):
x_str = self.GenerateInput(line_length)
x_num = Parse(x_str)
result_num = self.GenerateResult(x_num)
result_str = Format(result_num)
return "{\n a: %s,\n r: %s\n}" % (x_str, result_str)
def GetInputPrinter(self):
return INPUT_PRINTER_UNARY
def GetTestOpNative(self):
return "d.a; r = %sr" % self.GetOpString()
def GetTestOpJSB(self):
return "d.a.%s()" % self.GetOpMethod()
class BinaryOp(TestGenerator):
# Subclasses must implement these.
def GetOpString(self): raise NotImplementedError
def GetOpMethod(self): raise NotImplementedError
def GenerateResult(self, left, right): raise NotImplementedError
# Subclasses may override these:
def GenerateInputLengths(self, line_length):
min_length = 0 if line_length != kLineLengthBig else line_length / 2;
return (random.randint(min_length, line_length),
random.randint(min_length, line_length))
def GenerateInputs(self, line_length):
left_length, right_length = self.GenerateInputLengths(line_length)
return GenRandom(left_length), GenRandom(right_length)
def GetReps(self): return 40000
# Subclasses should not override anything below.
def EmitOne(self, line_length):
left_str, right_str = self.GenerateInputs(line_length)
left_num = Parse(left_str)
right_num = Parse(right_str)
result_num = self.GenerateResult(left_num, right_num)
result_str = Format(result_num)
return ("{\n a: %s,\n b: %s,\n r: %s\n}" %
(left_str, right_str, result_str))
def GetInputPrinter(self):
return INPUT_PRINTER_BINARY
def GetTestOpNative(self):
return "d.a %s d.b" % self.GetOpString()
def GetTestOpJSB(self):
return "d.a.%s(d.b)" % self.GetOpMethod()
class Neg(UnaryOp):
def GetOpString(self): return "-"
def GetOpMethod(self): return "unaryMinus"
def GenerateResult(self, x): return -x
class BitNot(UnaryOp):
def GetOpString(self): return "~"
def GetOpMethod(self): return "bitwiseNot"
def GenerateResult(self, x): return ~x
class Inc(UnaryOp):
def GetOpString(self): return "++"
def GetOpMethod(self): return "increment"
def GenerateResult(self, x): return x + 1
class Dec(UnaryOp):
def GetOpString(self): return "--"
def GetOpMethod(self): return "decrement"
def GenerateResult(self, x): return x - 1
class Add(BinaryOp):
def GetOpString(self): return "+"
def GetOpMethod(self): return "add"
def GenerateResult(self, a, b): return a + b
class Sub(BinaryOp):
def GetOpString(self): return "-"
def GetOpMethod(self): return "subtract"
def GenerateResult(self, a, b): return a - b
class Mul(BinaryOp):
def GetOpString(self): return "*"
def GetOpMethod(self): return "multiply"
def GenerateResult(self, a, b): return a * b
def GenerateInputLengths(self, line_length):
left_length = random.randint(1, line_length)
return left_length, line_length - left_length
class Div(BinaryOp):
def GetOpString(self): return "/"
def GetOpMethod(self): return "divide"
def GetReps(self): return 10000
def GenerateResult(self, a, b):
result = abs(a) / abs(b)
if (a < 0) != (b < 0): result = -result
return result
def GenerateInputLengths(self, line_length):
# Let the left side be longer than the right side with high probability,
# because that case is more interesting.
min_left = line_length * 6 / 10
max_right = line_length * 7 / 10
return random.randint(min_left, line_length), random.randint(1, max_right)
class Mod(Div): # Sharing GenerateInputLengths.
def GetOpString(self): return "%"
def GetOpMethod(self): return "remainder"
def GenerateResult(self, a, b):
result = a % b
if a < 0 and result > 0:
result -= abs(b)
if a > 0 and result < 0:
result += abs(b)
return result
class Shl(BinaryOp):
def GetOpString(self): return "<<"
def GetOpMethod(self): return "leftShift"
def GenerateInputsInternal(self, line_length, small_shift_positive):
left_length = random.randint(0, line_length - 1)
left = GenRandom(left_length)
small_shift = random.randint(0, 1) == 0
if small_shift:
right_length = 1 + int(math.log((line_length - left_length), kBase))
neg = kNo if small_shift_positive else kYes
else:
right_length = random.randint(0, 3)
neg = kYes if small_shift_positive else kNo
right = GenRandom(right_length, negative=neg)
return left, right
def GenerateInputs(self, line_length):
return self.GenerateInputsInternal(line_length, True)
def GenerateResult(self, a, b):
if b < 0: return a >> -b
return a << b
class Sar(Shl): # Sharing GenerateInputsInternal.
def GetOpString(self): return ">>"
def GetOpMethod(self): return "signedRightShift"
def GenerateInputs(self, line_length):
return self.GenerateInputsInternal(line_length, False)
def GenerateResult(self, a, b):
if b < 0: return a << -b
return a >> b
class BitAnd(BinaryOp):
def GetOpString(self): return "&"
def GetOpMethod(self): return "bitwiseAnd"
def GenerateResult(self, a, b): return a & b
class BitOr(BinaryOp):
def GetOpString(self): return "|"
def GetOpMethod(self): return "bitwiseOr"
def GenerateResult(self, a, b): return a | b
class BitXor(BinaryOp):
def GetOpString(self): return "^"
def GetOpMethod(self): return "bitwiseXor"
def GenerateResult(self, a, b): return a ^ b
OPS = {
"add": Add,
"sub": Sub,
"mul": Mul,
"div": Div,
"mod": Mod,
"inc": Inc,
"dec": Dec,
"neg": Neg,
"not": BitNot,
"shl": Shl,
"sar": Sar,
"and": BitAnd,
"or": BitOr,
"xor": BitXor
}
OPS_NAMES = ", ".join(sorted(OPS.keys()))
def RunOne(op, num_inputs, binary):
return OPS[op]().RunTest(num_inputs, binary)
def WrapRunOne(args):
return RunOne(*args)
def RunAll(args):
for op in args.op:
for r in xrange(args.runs):
yield (op, args.num_inputs, args.binary)
def Main():
parser = argparse.ArgumentParser(
description="Helper for generating or running BigInt tests.")
parser.add_argument(
"action", help="Action to perform: 'generate' or 'stress'")
parser.add_argument(
"op", nargs="+",
help="Operation(s) to test, one or more of: %s. In 'stress' mode, "
"special op 'all' tests all ops." % OPS_NAMES)
parser.add_argument(
"-n", "--num-inputs", type=int, default=-1,
help="Number of input/output sets in each generated test. Defaults to "
"%d for 'generate' and '%d' for 'stress' mode." %
(kNumInputsGenerate, kNumInputsStress))
stressopts = parser.add_argument_group("'stress' mode arguments")
stressopts.add_argument(
"-r", "--runs", type=int, default=1000,
help="Number of tests (with NUM_INPUTS each) to generate and run. "
"Default: %(default)s.")
stressopts.add_argument(
"-b", "--binary", default="out/x64.debug/d8",
help="The 'd8' binary to use. Default: %(default)s.")
args = parser.parse_args()
for op in args.op:
if op not in OPS.keys() and op != "all":
print("Invalid op '%s'. See --help." % op)
return 1
if len(args.op) == 1 and args.op[0] == "all":
args.op = OPS.keys()
if args.action == "generate":
if args.num_inputs < 0: args.num_inputs = kNumInputsGenerate
for op in args.op:
OPS[op]().PrintTest(args.num_inputs)
elif args.action == "stress":
if args.num_inputs < 0: args.num_inputs = kNumInputsStress
result = 0
pool = multiprocessing.Pool(multiprocessing.cpu_count())
for r in pool.imap_unordered(WrapRunOne, RunAll(args)):
result = result or r
return result
else:
print("Invalid action '%s'. See --help." % args.action)
return 1
if __name__ == "__main__":
sys.exit(Main())
|
<gh_stars>10-100
"""
Command line reference manager with a single source of truth: the .bib file.
Inspired by beets.
"""
import pkg_resources
import click
import click_constraints
import click_plugins # type: ignore
import pybibs
import pyperclip # type: ignore
import requests
from . import cite
from . import internals
from . import query
FILE_OPTIONS = internals.combine_decorators(
[
click.option(
"--file",
help="Path to file to link to this entry.",
type=click.Path(exists=True, readable=True, dir_okay=False),
),
click.option(
"--destination",
help="A folder to put the file in.",
type=click.Path(exists=True, readable=True, dir_okay=True, file_okay=False),
),
click.option(
"--no-copy",
help="Add the specified file in its current location without copying.",
is_flag=True,
),
click_constraints.constrain("destination", depends=["file"]),
click_constraints.constrain(
"no_copy",
depends=["file"],
conflicts=["destination"],
),
]
)
SEARCH_TERMS_OPTION = click.argument(
"search_term",
nargs=-1,
shell_complete=internals.complete_key,
)
@click_plugins.with_plugins(pkg_resources.iter_entry_points("bibo.plugins"))
@click.group(help=__doc__)
@click.version_option()
@click.option(
"--database",
envvar=internals.BIBO_DATABASE_ENV_VAR,
help="""
A path to a .bib file. Overrides the BIBO_DATABASE environment variable.
""",
required=True,
type=click.Path(
file_okay=True,
dir_okay=False,
writable=True,
readable=True,
resolve_path=True,
),
)
@click.pass_context
def cli(ctx, database):
ctx.ensure_object(dict)
ctx.obj["database"] = database
ctx.obj["data"] = internals.load_database(database)
@cli.command("list", short_help="List entries.")
@click.option("--raw", is_flag=True, help="Format as raw .bib entries.")
@click.option(
"--bibstyle",
default="plain",
help="""
Bibtex bibliography style to use for citation formatting.
For more information check https://www.overleaf.com/learn/latex/Bibtex_bibliography_styles.
""",
)
@click.option(
"--format",
help="""
Custom format pattern.
Use ``$`` in front of a key, type, or field to create custom formatter.
For example: ``--format '$author ($year) - $title'``.
""",
)
@click.option("--verbose", is_flag=True, help="Show verbose information.")
@SEARCH_TERMS_OPTION
@click.pass_context
def list_(ctx, search_term, raw, bibstyle, verbose, **kwargs):
"""
List entries in the database.
A SEARCH_TERM matches an entry if it appears in the type, key, or any
of the fields of the entry.
If multiple search terms are provided an entry should match all of them.
It is possible to match against a specific key, type, or field as
follows: ``author:einstein``, ``year:2018`` or ``type:book``.
Note that search terms are case insensitive.
"""
format_pattern = kwargs.pop("format")
assert not kwargs
results = query.search(ctx.obj["data"], search_term)
if raw:
_list_raw((r.entry for r in results))
elif format_pattern:
_list_format_pattern((r.entry for r in results), format_pattern)
else:
_list_citations(results, ctx.obj["database"], bibstyle, verbose)
def _list_raw(entries):
for entry in entries:
click.echo(pybibs.write_string([entry]))
def _list_format_pattern(entries, format_pattern):
for entry in entries:
click.echo(internals.format_entry(entry, format_pattern))
def _list_citations(results, database, bibstyle, verbose):
results = list(results)
keys = [r.entry["key"] for r in results]
exception = None
try:
citations = cite.cite(keys, database, bibstyle, verbose)
except cite.BibtexException as e:
exception = e
for result in results:
header = internals.header(result.entry)
if exception:
citation = cite.fallback(result.entry)
else:
citation = citations[result.entry["key"]]
text = "\n".join([header, citation])
text, extra_match_info = internals.highlight_match(text, result)
click.echo(text)
if extra_match_info:
click.secho("Search matched by", underline=True)
for key, val in extra_match_info.items():
click.echo("{}: {}".format(key, val))
if exception is not None:
parts = [str(exception), "Using a fallback citation method"]
if exception.use_verbose:
parts.append("Use --verbose for more information")
click.secho(". ".join(parts), fg="red")
@cli.command("open", short_help="Open the file, URL, or doi associated with an entry.")
@SEARCH_TERMS_OPTION
@click.pass_context
def open_(ctx, search_term):
"""
Open an entry in the database if a ``file``, ``url``, or ``doi`` field
exists (with precedence in this order).
A file will be open by the application defined by your system according
to the file extension.
For example, a PDF should be opened by a PDF reader and a folder should
be opened by a file browser.
URLs and DOIs should be opened in the web browser
A SEARCH_TERM matches an entry if it appears in the type, key, or any
of the fields of the entry.
If multiple search terms are provided an entry should match all of them.
It is possible to match against a specific key, type, or field as
follows: ``author:einstein``, ``year:2018`` or ``type:book``.
Note that search terms are case insensitive.
This command fails if the number of entries that match the search is
different than one.
"""
entry = query.get(ctx.obj["data"], search_term).entry
for field_name in ["file", "url", "doi"]:
value = entry.get("fields", {}).get(field_name)
if value:
if field_name == "doi":
value = "https://doi.org/" + value
click.launch(value)
break
else:
raise click.ClickException("No file, url, or doi is associated with this entry")
@cli.command(short_help="Add a new entry.")
@FILE_OPTIONS
@click.option("--doi", help="Add entry by DOI.")
@click.pass_context
def add(ctx, destination, doi, no_copy, **kwargs):
"""
Add a new entry to the database.
Find a bib entry you would like to add.
Copy it to the clipboard, and run the command.
It will be opened in your editor for validation or manual editing.
Upon saving, the entry is added to the database.
Don't forget to set the EDITOR environment variable for this command
to work properly.
"""
file_ = kwargs.pop("file")
data = ctx.obj["data"]
if doi is not None:
url = "http://dx.doi.org/{}".format(doi)
headers = {"Accept": "application/x-bibtex"}
resp = requests.get(url, headers=headers)
assert resp.status_code == 200
raw_bib = resp.text
else:
raw_bib = pyperclip.paste()
bib = internals.editor(text=raw_bib)
entry = pybibs.read_entry_string(bib)
internals.unique_key_validation(entry["key"], data)
data.append(entry)
if file_:
internals.set_file(data, entry, file_, destination, no_copy)
pybibs.write_file(data, ctx.obj["database"])
@cli.command(short_help="Remove an entry or a field.")
@click.argument("key", shell_complete=internals.complete_key)
@click.argument("field", nargs=-1)
@click.pass_context
def remove(ctx, key, field):
"""
Remove an entry from the database or remove a field from an entry.
To remove an entry specify its key.
To fields specify the key and list all fields for removal.
"""
data = ctx.obj["data"]
entry = query.get_by_key(data, key)
if not field:
data.remove(entry)
elif "fields" in entry:
for f in field:
if f in entry["fields"]:
del entry["fields"][f]
else:
click.echo('"{}" has no field "{}"'.format(key, f))
else:
click.echo('"{}" has no fields'.format(key))
pybibs.write_file(data, ctx.obj["database"])
@cli.command(short_help="Edit an entry.")
@click.argument("key", shell_complete=internals.complete_key)
@click.argument("field_value", nargs=-1)
@FILE_OPTIONS
@click.pass_context
def edit(ctx, key, field_value, destination, no_copy, **kwargs):
"""
Edit an entry.
Use FIELD_VALUE to set fields as follows: ``author=Einstein``, or
``tags=interesting``.
Leave the value empty to open in editor.
Set the key or type in the same way.
Don't forget to set the EDITOR environment variable for this command
to work properly.
"""
file_ = kwargs.pop("file")
data = ctx.obj["data"]
entry = query.get_by_key(data, key)
if file_:
internals.set_file(data, entry, file_, destination, no_copy)
for fv in field_value:
if "=" in fv:
field, value = fv.split("=")
else:
field = fv
current_value = entry["fields"].get(field, "")
value = internals.editor(text=current_value).strip()
if field == "key":
internals.unique_key_validation(value, data)
entry["key"] = value
elif field == "type":
entry["type"] = value
else:
entry["fields"][field] = value
pybibs.write_file(data, ctx.obj["database"])
if __name__ == "__main__":
cli()
|
<reponame>shreyasnagare/Brick
import csv
import logging
from collections import defaultdict
from rdflib import Graph, Literal, BNode, URIRef
from rdflib.namespace import XSD
from rdflib.collection import Collection
from bricksrc.ontology import define_ontology
from bricksrc.namespaces import BRICK, RDF, OWL, RDFS, TAG, SOSA, SKOS, QUDT, QUDTQK
from bricksrc.namespaces import bind_prefixes
from bricksrc.setpoint import setpoint_definitions
from bricksrc.sensor import sensor_definitions
from bricksrc.alarm import alarm_definitions
from bricksrc.status import status_definitions
from bricksrc.command import command_definitions
from bricksrc.parameter import parameter_definitions
from bricksrc.system import system_subclasses
from bricksrc.location import location_subclasses
from bricksrc.equipment import (
equipment_subclasses,
hvac_subclasses,
valve_subclasses,
security_subclasses,
safety_subclasses,
)
from bricksrc.substances import substances
from bricksrc.quantities import quantity_definitions, get_units
from bricksrc.properties import properties
from bricksrc.tags import tags
logging.basicConfig(
format="%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d:%H:%M:%S",
level=logging.INFO,
)
G = Graph()
bind_prefixes(G)
A = RDF.type
tag_lookup = defaultdict(set)
intersection_classes = {}
def add_restriction(klass, definition):
"""
Defines OWL.Restrictions linked to Brick classes
through OWL.equivalentClass.
This populates the property-object pairs (OWL.onProperty, 'property'),
(OWL.hasValue, 'value'). The intersection of these properties is made to be
equivalent to the given class.
Args:
klass: the URI of the Brick class to be modeled
definition: a list of (property, value) pairs
"""
if len(definition) == 0:
return
elements = []
equivalent_class = BNode()
list_name = BNode()
for idnum, item in enumerate(definition):
restriction = BNode()
elements.append(restriction)
G.add((restriction, A, OWL.Restriction))
G.add((restriction, OWL.onProperty, item[0]))
G.add((restriction, OWL.hasValue, item[1]))
G.add((klass, OWL.equivalentClass, equivalent_class))
G.add((equivalent_class, OWL.intersectionOf, list_name))
Collection(G, list_name, elements)
def add_tags(klass, definition):
"""
Adds the definition of tags to the given class. This method adds two
group of triples. The first group models the class as a subclass
of entities that have all of the given tags (the 'OWL.intersectionOf'
the OWL.Restriction classes modeled as entities that have a given tag).
The second group of triples uses the BRICK.hasAssociatedTag property
to associate the tags with this class. While this is duplicate information,
it is much easier to query for.
Args:
klass: the URI of the Brick class to be modeled
definition: a list of BRICK.Tag instances (e.g. TAG.Air)
"""
if len(definition) == 0:
return
all_restrictions = []
equivalent_class = BNode()
list_name = BNode()
for tag in definition:
G.add((klass, BRICK.hasAssociatedTag, tag))
for idnum, item in enumerate(definition):
restriction = BNode(f"has_{item.split('#')[-1]}")
all_restrictions.append(restriction)
G.add((restriction, A, OWL.Restriction))
G.add((restriction, OWL.onProperty, BRICK.hasTag))
G.add((restriction, OWL.hasValue, item))
G.add((item, A, BRICK.Tag)) # make sure the tag is declared as such
G.add(
(item, RDFS.label, Literal(item.split("#")[-1]))
) # make sure the tag is declared as such
# tag index
tagset = tuple(sorted([item.split("#")[-1] for item in definition]))
tag_lookup[tagset].add(klass)
# if we've already mapped this class, don't map it again
if klass in intersection_classes:
return
if len(all_restrictions) == 1:
G.add((klass, RDFS.subClassOf, all_restrictions[0]))
if len(all_restrictions) > 1:
G.add((klass, RDFS.subClassOf, equivalent_class))
G.add((equivalent_class, OWL.intersectionOf, list_name))
Collection(G, list_name, all_restrictions)
intersection_classes[klass] = tuple(sorted(definition))
def define_concept_hierarchy(definitions, typeclasses, broader=None, related=None):
"""
Generates triples to define the SKOS hierarchy of concepts given by
'definitions', which are all instances of the class given by 'typeclass'.
'broader', if provided, is the skos:broader concept
'related', if provided, is the skos:related concept
Currently this is used for Brick Quantities
"""
for concept, defn in definitions.items():
concept = BRICK[concept]
for typeclass in typeclasses:
G.add((concept, A, typeclass))
# mark broader concept if one exists
if broader is not None:
G.add((concept, SKOS.broader, broader))
# mark related concept if one exists
if related is not None:
G.add((concept, SKOS.related, related))
# add label
class_label = concept.split("#")[-1].replace("_", " ")
G.add((concept, RDFS.label, Literal(class_label)))
# define mapping to tags if it exists
# "tags" property is a list of URIs naming Tags
taglist = defn.get("tags", [])
assert isinstance(taglist, list)
if len(taglist) == 0:
logging.warning(f"Property 'tags' not defined for {concept}")
add_tags(concept, taglist)
# define mapping to substances + quantities if it exists
# "substances" property is a list of (predicate, object) pairs
substancedef = defn.get("substances", [])
assert isinstance(substancedef, list)
add_restriction(concept, substancedef)
# define concept hierarchy
# this is a nested dictionary
narrower_defs = defn.get(SKOS.narrower, {})
if narrower_defs is not None and isinstance(narrower_defs, dict):
define_concept_hierarchy(
narrower_defs, [BRICK.Quantity, QUDT.QuantityKind], broader=concept
)
related_defs = defn.get(SKOS.related, {})
if related_defs is not None and isinstance(related_defs, dict):
define_concept_hierarchy(
related_defs, [BRICK.Quantity, QUDT.QuantityKind], related=concept
)
# handle 'parents' subconcepts (links outside of tree-based hierarchy)
parents = defn.get("parents", [])
assert isinstance(parents, list)
for _parent in parents:
G.add((concept, SKOS.broader, _parent))
# all other key-value pairs in the definition are
# property-object pairs
expected_properties = ["parents", "tags", "substances"]
other_properties = [
prop for prop in defn.keys() if prop not in expected_properties
]
for propname in other_properties:
propval = defn[propname]
if isinstance(propval, list):
for pv in propval:
G.add((concept, propname, pv))
elif not isinstance(propval, dict):
G.add((concept, propname, propval))
def define_classes(definitions, parent, pun_classes=False):
"""
Generates triples for the hierarchy given by 'definitions', rooted
at the class given by 'parent'
- class hierarchy ('subclasses')
- tag mappings
- substance + quantity modeling
If pun_classes is True, then create punned instances of the classes
"""
for classname, defn in definitions.items():
classname = BRICK[classname]
# class is a owl:Class
G.add((classname, A, OWL.Class))
# subclass of parent
G.add((classname, RDFS.subClassOf, parent))
# add label
class_label = classname.split("#")[-1].replace("_", " ")
G.add((classname, RDFS.label, Literal(class_label)))
if pun_classes:
G.add((classname, A, classname))
# define mapping to tags if it exists
# "tags" property is a list of URIs naming Tags
taglist = defn.get("tags", [])
assert isinstance(taglist, list)
if len(taglist) == 0:
logging.warning(f"Property 'tags' not defined for {classname}")
add_tags(classname, taglist)
# define mapping to substances + quantities if it exists
# "substances" property is a list of (predicate, object) pairs
substancedef = defn.get("substances", [])
assert isinstance(substancedef, list)
add_restriction(classname, substancedef)
# define class structure
# this is a nested dictionary
subclassdef = defn.get("subclasses", {})
assert isinstance(subclassdef, dict)
define_classes(subclassdef, classname, pun_classes=pun_classes)
# handle 'parents' subclasses (links outside of tree-based hierarchy)
parents = defn.get("parents", [])
assert isinstance(parents, list)
for _parent in parents:
G.add((classname, RDFS.subClassOf, _parent))
# all other key-value pairs in the definition are
# property-object pairs
expected_properties = ["parents", "tags", "substances", "subclasses"]
other_properties = [
prop for prop in defn.keys() if prop not in expected_properties
]
for propname in other_properties:
propval = defn[propname]
if isinstance(propval, list):
for pv in propval:
G.add((classname, propname, pv))
else:
G.add((classname, propname, propval))
def define_properties(definitions, superprop=None):
"""
Define BRICK properties
"""
if len(definitions) == 0:
return
for prop, propdefn in definitions.items():
G.add((BRICK[prop], A, OWL.ObjectProperty))
if superprop is not None:
G.add((BRICK[prop], RDFS.subPropertyOf, superprop))
# define property types
prop_types = propdefn.get(A, [])
assert isinstance(prop_types, list)
for prop_type in prop_types:
G.add((BRICK[prop], A, prop_type))
# define any subproperties
subproperties_def = propdefn.get("subproperties", {})
assert isinstance(subproperties_def, dict)
define_properties(subproperties_def, BRICK[prop])
# define other properties of the Brick property
for propname, propval in propdefn.items():
# all other key-value pairs in the definition are
# property-object pairs
expected_properties = ["subproperties", A]
other_properties = [
prop for prop in propdefn.keys() if prop not in expected_properties
]
for propname in other_properties:
propval = propdefn[propname]
G.add((BRICK[prop], propname, propval))
def add_definitions():
"""
Adds definitions for Brick subclasses through SKOS.definitions.
This parses the definitions from ./bricksrc/definitions.csv and
adds it to the graph. If available, adds the source information of
through RDFS.seeAlso.
"""
with open("./bricksrc/definitions.csv") as dictionary_file:
dictionary = csv.reader(dictionary_file)
# skip the header
next(dictionary)
# add definitions, citations to the graph
for definition in dictionary:
term = URIRef(definition[0])
if len(definition[1]):
G.add((term, SKOS.definition, Literal(definition[1], lang="en")))
if len(definition[2]):
G.add((term, RDFS.seeAlso, URIRef(definition[2])))
qstr = """
select ?param where {
?param rdfs:subClassOf* brick:Limit.
}
"""
limit_def_template = "A parameter that places {direction} bound on the range of permitted values of a {setpoint}."
params = [row["param"] for row in G.query(qstr)]
for param in params:
words = param.split("#")[-1].split("_")
prefix = words[0]
# define "direction" component of Limit definition
if prefix == "Min":
direction = "a lower"
elif prefix == "Max":
direction = "an upper"
else:
prefix = None
direction = "a lower or upper"
# define the "setpoint" component of a Limit definition
if param.split("#")[-1] in ["Max_Limit", "Min_Limit", "Limit"]:
setpoint = "Setpoint"
else:
if prefix:
setpoint = "_".join(words[1:-1])
else:
setpoint = "_".join(words[:-1])
if setpoint.split("_")[-1] != "Setpoint":
# While Limits are a boundary of a Setpoint, the associated
# Setpoint names are not explicit in class's names. Thus needs
# to be explicily added for the definition text.
setpoint = setpoint + "_Setpoint"
logging.info(f"Inferred setpoint: {setpoint}")
limit_def = limit_def_template.format(direction=direction, setpoint=setpoint)
G.add((param, SKOS.definition, Literal(limit_def, lang="en")))
class_exists = G.query(
f"""select ?class where {{
BIND(brick:{setpoint} as ?class)
?class rdfs:subClassOf* brick:Class.
}}
"""
).bindings
if not class_exists:
logging.warning(f"WARNING: {setpoint} does not exist in Brick for {param}.")
logging.info("Beginning BRICK Ontology compilation")
# handle ontology definition
define_ontology(G)
# Declare root classes
G.add((BRICK.Class, A, OWL.Class))
G.add((BRICK.Tag, A, OWL.Class))
roots = {
"Equipment": {"tags": [TAG.Equipment]},
"Location": {"tags": [TAG.Location]},
"Point": {"tags": [TAG.Point]},
"Measurable": {},
"System": {
SKOS.definition: Literal(
"A System is a combination of equipment and auxiliary devices (e.g., controls, accessories, interconnecting means, and terminal elements) by which energy is transformed so it performs a specific function such as HVAC, service water heating, or lighting. (ASHRAE Dictionary)."
),
"tags": [TAG.System],
},
}
define_classes(roots, BRICK.Class)
logging.info("Defining properties")
# define BRICK properties
define_properties(properties)
logging.info("Defining Point subclasses")
# define Point subclasses
define_classes(setpoint_definitions, BRICK.Point)
define_classes(sensor_definitions, BRICK.Point)
define_classes(alarm_definitions, BRICK.Point)
define_classes(status_definitions, BRICK.Point)
define_classes(command_definitions, BRICK.Point)
define_classes(parameter_definitions, BRICK.Point)
# make points disjoint
pointclasses = ["Alarm", "Status", "Command", "Setpoint", "Sensor", "Parameter"]
for pc in pointclasses:
for o in filter(lambda x: x != pc, pointclasses):
G.add((BRICK[pc], OWL.disjointWith, BRICK[o]))
logging.info("Defining Equipment, System and Location subclasses")
# define other root class structures
define_classes(location_subclasses, BRICK.Location)
define_classes(equipment_subclasses, BRICK.Equipment)
define_classes(system_subclasses, BRICK.System)
define_classes(hvac_subclasses, BRICK.HVAC)
define_classes(valve_subclasses, BRICK.Valve)
define_classes(security_subclasses, BRICK.Security_Equipment)
define_classes(safety_subclasses, BRICK.Safety_Equipment)
logging.info("Defining Measurable hierarchy")
# define measurable hierarchy
G.add((BRICK.Measurable, RDFS.subClassOf, BRICK.Class))
# set up Quantity definition
G.add((BRICK.Quantity, RDFS.subClassOf, SOSA.ObservableProperty))
G.add((BRICK.Quantity, RDFS.subClassOf, BRICK.Measurable))
G.add((BRICK.Quantity, A, OWL.Class))
G.add((BRICK.Quantity, RDFS.subClassOf, SKOS.Concept))
# set up Substance definition
G.add((BRICK.Substance, RDFS.subClassOf, SOSA.FeatureOfInterest))
G.add((BRICK.Substance, RDFS.subClassOf, BRICK.Measurable))
G.add((BRICK.Substance, A, OWL.Class))
# We make the punning explicit here. Any subclass of brick:Substance
# is itself a substance or quantity. There is one canonical instance of
# each class, which is indicated by referencing the class itself.
#
# bldg:tmp1 a brick:Air_Temperature_Sensor;
# brick:measures brick:Air ,
# brick:Temperature .
#
# This makes Substance metaclasses.
define_classes(substances, BRICK.Substance, pun_classes=True)
# this defines the SKOS-based concept hierarchy for BRICK Quantities
define_concept_hierarchy(quantity_definitions, [BRICK.Quantity, QUDT.QuantityKind])
# for all Quantities, copy part of the QUDT unit definitions over
res = G.query(
"""SELECT ?quantity ?qudtquant WHERE {
?quantity rdf:type brick:Quantity .
?quantity owl:sameAs ?qudtquant
}"""
)
for r in res:
for unit, symb in get_units(r[1]):
G.add((r[0], QUDT.applicableUnit, unit))
G.add((unit, QUDT.symbol, symb))
logging.info("Finishing Tag definitions")
# declares that all tags are pairwise different; i.e. no two tags refer
# to the same tag
different_tag_list = []
for tag, definition in tags.items():
different_tag_list.append(TAG[tag])
G.add((TAG[tag], A, BRICK.Tag))
different_tag = BNode("tags_are_different")
G.add((BRICK.Tag, A, OWL.AllDifferent))
G.add((BRICK.Tag, OWL.distinctMembers, different_tag))
Collection(G, different_tag, different_tag_list)
logging.info("Adding class definitions")
add_definitions()
logging.info(f"Brick ontology compilation finished! Generated {len(G)} triples")
# serialize to output
with open("Brick.ttl", "wb") as fp:
fp.write(G.serialize(format="turtle").rstrip())
fp.write(b"\n")
|
#! /usr/bin/env python
# coding=utf-8
# Authors: Hanxiaoyang <<EMAIL>>
# simple naive bayes classifier to classify sohu news topic
# data can be downloaded in http://www.sogou.com/labs/dl/cs.html
# 代码功能:简易朴素贝叶斯分类器,用于对搜狐新闻主题分类,数据可在http://www.sogou.com/labs/dl/cs.html下载(精简版)
# 详细说明参见博客http://blog.csdn.net/han_xiaoyang/article/details/50629608
# 作者:寒小阳<<EMAIL>>
import os,sys, math, random, collections
def shuffle(inFile):
'''
简单的乱序操作,用于生成训练集和测试集
'''
textLines = [line.strip() for line in open(inFile)]
print "正在准备训练和测试数据,请稍后..."
random.shuffle(textLines)
num = len(textLines)
trainText = textLines[:3 * num / 5]
testText = textLines[3 * num / 5:]
print "准备训练和测试数据准备完毕,下一步..."
return trainText, testText
# 总共有9种新闻类别,我们给每个类别一个编号
labels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
def label2id(label):
for i in xrange(len(labels)):
if label == labels[i]:
return i
raise Exception('Error lable %s' % (label))
def doc_dict():
'''
构造和类别数等长的0向量
'''
return [0] * len(labels)
def mutual_info(N, Nij, Ni_, N_j):
'''
计算互信息,这里log的底取为2
'''
return Nij * 1.0 / N * math.log(N * (Nij + 1) * 1.0 / (Ni_ * N_j)) / math.log(2)
def count_for_cates(trainText, featureFile):
'''
遍历文件,统计每个词在每个类别出现的次数,和每类的文档数
并写入结果特征文件
'''
docCount = [0] * len(labels)
wordCount = dict() #collections.defaultdict(list)
# 扫描文件和计数
for line in trainText:
label, text = line.strip().split(' ', 1)
index = label2id(label[0])
words = text.strip().split(' ')
for word in words:
word = word.strip()
if word == '':continue
if word not in wordCount:
wordCount[word] = doc_dict()
wordCount[word][index] += 1
docCount[index] += 1
# 计算互信息值
print "计算互信息,提取关键/特征词中,请稍后..."
miDict = dict() #collections.defaultdict(list)
N = sum(docCount)
for k, vs in wordCount.items():
miDict[k] = doc_dict()
for i in xrange(len(vs)):
N11 = vs[i]
N10 = sum(vs) - N11
N01 = docCount[i] - N11
N00 = N - N11 - N10 - N01
mi = mutual_info(N, N11, N10 + N11, N01 + N11) + \
mutual_info(N, N10, N10 + N11, N00 + N10) + \
mutual_info(N, N01, N01 + N11, N01 + N00) + \
mutual_info(N, N00, N00 + N10, N00 + N01)
miDict[k][i] = mi
fWords = set()
for i in xrange(len(docCount)):
keyf = lambda x: x[1][i]
sortedDict = sorted(miDict.items(), key=keyf, reverse=True)
for j in xrange(250):
fWords.add(sortedDict[j][0])
out = open(featureFile, 'w')
# 输出各个类的文档数目
out.write(str(docCount) + "\n")
# 输出互信息最高的词作为特征词
for fword in fWords:
out.write(fword + "\n")
print "特征词写入完毕..."
out.close()
def load_feature_words(featureFile):
'''
从特征文件导入特征词
'''
f = open(featureFile)
# 各个类的文档数目
docCounts = eval(f.readline())
features = set()
# 读取特征词
for line in f:
features.add(line.strip())
f.close()
return docCounts, features
def train_bayes(featureFile, trainText, modelFile):
'''
训练贝叶斯模型,实际上计算每个类中特征词的出现次数
'''
print "使用朴素贝叶斯训练中..."
docCounts, features = load_feature_words(featureFile)
wordCount = dict() #collections.defaultdict(list)
for word in features:
wordCount[word] = doc_dict()
# 每类文档特征词出现的次数
tCount = [0] * len(docCounts)
for line in open(trainText):
lable, text = line.strip().split(' ', 1)
index = label2id(lable[0])
words = text.split(' ')
for word in words:
if word in features:
tCount[index] += 1
wordCount[word][index] += 1
outModel = open(modelFile, 'w')
# 拉普拉斯平滑
print "训练完毕,写入模型..."
for k, v in wordCount.items():
scores = [(v[i] + 1) * 1.0 / (tCount[i] + len(wordCount)) for i in xrange(len(v))]
outModel.write(k + "\t" + str(scores) + "\n")
outModel.close()
def load_model(modelFile):
'''
从模型文件中导入计算好的贝叶斯模型
'''
print "加载模型中..."
f = open(modelFile)
scores = {}
for line in f:
word,counts = line.strip().rsplit('\t',1)
scores[word] = eval(counts)
f.close()
return scores
def predict(featureFile, modelFile, testText):
'''
预测文档的类标,标准输入每一行为一个文档
'''
docCounts, features = load_feature_words(featureFile)
docScores = [math.log(count * 1.0 / sum(docCounts)) for count in docCounts]
scores = load_model(modelFile)
rCount = 0
docCount = 0
print "正在使用测试数据验证模型效果..."
for line in testText:
lable, text = line.strip().split(' ', 1)
index = label2id(lable[0])
words = text.split(' ')
preValues = list(docScores)
for word in words:
if word in features:
for i in xrange(len(preValues)):
preValues[i] += math.log(scores[word][i])
m = max(preValues)
pIndex = preValues.index(m)
if pIndex == index:
rCount += 1
# print lable,lables[pIndex],text
docCount += 1
print("总共测试文本量: %d , 预测正确的类别量: %d, 朴素贝叶斯分类器准确度:%f" % (rCount, docCount, rCount * 1.0 / docCount))
if __name__=="__main__":
"""
if len(sys.argv) != 4:
print "Usage: python naive_bayes_text_classifier.py sougou_news.txt feature_file.out model_file.out"
sys.exit()
inFile = sys.argv[1]
featureFile = sys.argv[2]
modelFile = sys.argv[3]
"""
abs_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir))
root_path = os.path.join(abs_path, "data\\SogouC.reduced\\")
inFile = os.path.join(root_path, 'splited_text.txt')
featureFile = os.path.join(root_path, 'feature.txt')
modelFile = os.path.join(abs_path, 'model/nb_model')
trainText, testText = shuffle(inFile)
count_for_cates(trainText, featureFile)
train_bayes(featureFile, inFile, modelFile)
predict(featureFile, modelFile, testText) |
<filename>hwilib/coldcardi.py
# Trezor interaction script
from .hwwclient import HardwareWalletClient
from ckcc.client import ColdcardDevice
from ckcc.protocol import CCProtocolPacker
from ckcc.constants import MAX_BLK_LEN
from .base58 import xpub_main_2_test
from hashlib import sha256
import base64
import json
import io
import time
# This class extends the HardwareWalletClient for ColdCard specific things
class ColdCardClient(HardwareWalletClient):
# device is an HID device that has already been opened.
def __init__(self, device):
super(ColdCardClient, self).__init__(device)
self.device = ColdcardDevice(dev=device)
# Must return a dict with the xpub
# Retrieves the public key at the specified BIP 32 derivation path
def get_pubkey_at_path(self, path):
path = path.replace('h', '\'')
path = path.replace('H', '\'')
xpub = self.device.send_recv(CCProtocolPacker.get_xpub(path), timeout=None)
if self.is_testnet:
return {'xpub':xpub_main_2_test(xpub)}
else:
return {'xpub':xpub}
# Must return a hex string with the signed transaction
# The tx must be in the combined unsigned transaction format
def sign_tx(self, tx):
self.device.check_mitm()
# Get psbt in hex and then make binary
fd = io.BytesIO(base64.b64decode(tx.serialize()))
# learn size (portable way)
offset = 0
sz = fd.seek(0, 2)
fd.seek(0)
left = sz
chk = sha256()
for pos in range(0, sz, MAX_BLK_LEN):
here = fd.read(min(MAX_BLK_LEN, left))
if not here: break
left -= len(here)
result = self.device.send_recv(CCProtocolPacker.upload(pos, sz, here))
assert result == pos
chk.update(here)
# do a verify
expect = chk.digest()
result = self.device.send_recv(CCProtocolPacker.sha256())
assert len(result) == 32
if result != expect:
raise ValueError("Wrong checksum:\nexpect: %s\n got: %s" % (b2a_hex(expect).decode('ascii'), b2a_hex(result).decode('ascii')))
# start the signing process
ok = self.device.send_recv(CCProtocolPacker.sign_transaction(sz, expect), timeout=None)
assert ok == None
print("Waiting for OK on the Coldcard...")
while 1:
time.sleep(0.250)
done = self.device.send_recv(CCProtocolPacker.get_signed_txn(), timeout=None)
if done == None:
continue
break
if len(done) != 2:
raise ValueError('Failed: %r' % done)
result_len, result_sha = done
result = self.device.download_file(result_len, result_sha, file_number=1)
return {'psbt':base64.b64encode(result).decode()}
# Must return a base64 encoded string with the signed message
# The message can be any string. keypath is the bip 32 derivation path for the key to sign with
def sign_message(self, message, keypath):
raise NotImplementedError('The HardwareWalletClient base class does not '
'implement this method')
# Display address of specified type on the device. Only supports single-key based addresses.
def display_address(self, keypath, p2sh_p2wpkh, bech32):
raise NotImplementedError('The HardwareWalletClient base class does not '
'implement this method')
# Setup a new device
def setup_device(self):
raise NotImplementedError('The HardwareWalletClient base class does not '
'implement this method')
# Wipe this device
def wipe_device(self):
raise NotImplementedError('The HardwareWalletClient base class does not '
'implement this method')
# Close the device
def close(self):
self.device.close()
|
<reponame>jiahuei/sparse-image-captioning<filename>tests/test_train.py
# -*- coding: utf-8 -*-
"""
Created on 08 Jan 2021 17:39:15
@author: jiahuei
"""
import unittest
import os
from sparse_caption.opts import parse_opt
from sparse_caption.utils.config import Config
from .paths import TEST_DIRPATH, TEST_DATA_DIRPATH
class TestTrain(unittest.TestCase):
def setUp(self) -> None:
self.common_args = (
"--dataset mscoco_testing "
f"--dataset_dir {TEST_DATA_DIRPATH} "
f"--log_dir {os.path.join(TEST_DIRPATH, 'experiments')} "
"--learning_rate 0.01 "
"--optim_epsilon 0.01 "
"--batch_size 2 "
"--batch_size_eval 2 "
"--save_checkpoint_every 2 "
"--cache_min_free_ram 1.0 "
"--max_epochs 1 "
"--vocab_size 10 "
)
self.model_args = dict(
up_down_lstm=(
"--caption_model up_down_lstm "
"--id TESTING_UpDownLSTM "
"--lr_scheduler cosine "
"--rnn_size 8 "
"--input_encoding_size 8 "
"--att_hid_size 8 "
),
transformer=(
"--caption_model transformer "
"--id TESTING_Trans "
"--lr_scheduler noam "
"--d_model 8 "
"--dim_feedforward 8 "
"--num_layers 2 "
),
relation_transformer=(
"--caption_model relation_transformer "
"--id TESTING_RTrans "
"--lr_scheduler noam "
"--d_model 8 "
"--dim_feedforward 8 "
"--num_layers 2 "
),
)
self.prune_args = dict(
supermask=(
"--prune_type supermask " "--prune_sparsity_target 0.9 " "--prune_supermask_sparsity_weight 120 "
),
# mag_grad_uniform=(
# "--prune_type mag_grad_uniform "
# "--prune_sparsity_target 0.9 "
# ),
snip=("--prune_type snip " "--prune_sparsity_target 0.9 "),
mag_blind=("--prune_type mag_blind " "--prune_sparsity_target 0.9 "),
mag_uniform=("--prune_type mag_uniform " "--prune_sparsity_target 0.9 "),
mag_dist=("--prune_type mag_dist " "--prune_sparsity_target 0.9 "),
)
def _test_model(self, config, main_fn):
name = f"{config.caption_model} with prune type: {config.get('prune_type', None)}"
with self.subTest(f"Training model: {name}"):
try:
main_fn(config)
except FileNotFoundError as e:
if not ("model_best.pth" in str(e) or "model_best_pruned_sparse.pth" in str(e)):
self.fail(f"Training failed: {name}")
except Exception:
self.fail(f"Training failed: {name}")
# noinspection PyTypeChecker
def test_train_regular(self):
from scripts.train_transformer import main
for model_args in self.model_args.values():
args = self.common_args + model_args
print(args)
args = parse_opt(args.split())
self._test_model(Config(**vars(args)), main)
# noinspection PyTypeChecker
def test_train_prune(self):
from scripts.train_n_prune_transformer import main
for model, model_args in self.model_args.items():
if model == "transformer":
continue
model_args = model_args.replace(model, f"{model}_prune")
for prune, prune_args in self.prune_args.items():
args = self.common_args + model_args + prune_args
print(args)
args = parse_opt(args.split())
args.log_dir += f"_{prune}"
self._test_model(Config(**vars(args)), main)
if __name__ == "__main__":
unittest.main()
|
"""
Implementation of binary search trees.
"""
from __future__ import annotations
class Node:
def __init__(self, val: int) -> None:
self.val = val
self.left: Node | None = None
self.right: Node | None = None
class BinarySearchTree:
def __init__(self) -> None:
self.root: Node | None = None
def insert(self, val: int) -> None:
if self.root is None:
self.root = Node(val)
else:
self.__insert(self.root, val)
def __insert(self, node: Node, val: int) -> None:
"""
Recursive function for the above implementation of insert_node
:param node: current node being checked
:param val: value of node to be inserted
:return: None
"""
# Go into left subtree
if val <= node.val:
# Try to insert a new node as the left
# child of the current node
if node.left is None:
node.left = Node(val)
elif node.left is not None:
self.__insert(node.left, val)
# Go into right subtree
elif val > node.val:
# Try to insert a new node as the right
# child of the current node
if node.right is None:
node.right = Node(val)
elif node.right is not None:
self.__insert(node.right, val)
def inorder_traversal(self) -> None:
"""
Print content of BST using inorder traversal
(Smallest to largest values)
:return: None
"""
if self.root is None:
return
else:
self.__inorder_traversal(self.root)
print("")
def __inorder_traversal(self, node: Node) -> None:
"""
Recursive function for the above implementation
:param node: current node being printed
:return: None
"""
if node is None:
return
else:
# Get to leftmost node and begin printing
self.__inorder_traversal(node.left)
print(str(node.val), end=" ")
self.__inorder_traversal(node.right)
def get_height(self) -> int:
"""
Calculate the height of the BST's root
(Height = largest number of edges to
most distant leaf node)
:return: Height as integer
"""
return self.__get_height(self.root)
def __get_height(self, node: Node) -> int:
"""
Recursive function for the above implementation
:param node: current node being traversed
:return: Height as integer
"""
if node is None:
return -1
# Check left and right trees for height and return highest
left_height = self.__get_height(node.left)
right_height = self.__get_height(node.right)
return max(left_height, right_height) + 1
def get_min(self) -> int:
return BinarySearchTree.__get_min(self.root)
@staticmethod
def __get_min(node: Node) -> int:
"""
Finds minimum value of a subtree
:param node: root of the tree or subtree
:return: smallest value in subtree
"""
curr = node
while curr.left is not None:
curr = curr.left
return curr.val
def get_max(self) -> int:
return BinarySearchTree.__get_max(self.root)
@staticmethod
def __get_max(node: Node) -> int:
"""
Finds maximum value of a subtree
:param node: root of the tree or subtree
:return: largest value in subtree
"""
curr = node
while curr is not None:
curr = curr.right
return curr.val
def find_node(self, val: int) -> bool:
"""
Find node with value inside BST
:param val: value to search for
:return: boolean
"""
return self.__find(self.root, val)
def __find(self, node: Node, val: int) -> bool:
"""
Recursive function for the above implementation
:param node: current node being checked
:param val: value to search for
:return: boolean
"""
if node is None:
return False
elif node.val == val:
return True
elif node.val > val:
return self.__find(node.left, val)
else:
return self.__find(node.right, val)
def is_valid(self) -> bool:
"""
Check if a BST is valid by [definition](https://en.wikipedia.org/wiki/Binary_search_tree#Definition)
:return: validity status
"""
return self.__is_valid(self.root)
def __is_valid(self, node: Node) -> bool:
"""
Recursive function for the above implementation
:param node: current node of which the subtrees will be checked
:return: validity of current subtree
"""
if node is None:
return True
# Check if the left and right subtrees are lesser and greater
# (meaning the definition is true for the current node)
# Then repeat this check for every node in the tree
return \
self.is_subtree_lesser(node.left, node.val) and self.is_subtree_greater(node.right, node.val) \
and self.__is_valid(node.left) and self.__is_valid(node.right)
def is_subtree_greater(self, node: Node, val: int) -> bool:
if node is None:
return True
return \
node.val > val \
and self.is_subtree_greater(node.left, val) and self.is_subtree_greater(node.right, val)
def is_subtree_lesser(self, node: Node, val: int) -> bool:
if node is None:
return True
return \
node.val <= val \
and self.is_subtree_lesser(node.left, val) and self.is_subtree_lesser(node.right, val)
def delete(self, val: int) -> Node:
"""
Remove a node from the tree
:param val: value of node to be deleted
:return: root of updated tree
"""
return self.__delete_node(self.root, val)
def __delete_node(self, node: Node, val: int) -> Node | None:
"""
Recursive function for the above implementation
:param node: current node being checked
:param val: value of node to be deleted
:return: root of updated tree
"""
if node is None:
return node
# Search the left subtree
elif node.val > val:
node.left = self.__delete_node(node.left, val)
# Search the right subtree
elif node.val < val:
node.right = self.__delete_node(node.right, val)
# If the val is equal to the current node's val, then delete the current node
else:
# Case 1 and 2: node has one or no children
if node.left is None:
temp = node.right
node = None
return temp
elif node.right is None:
temp = node.left
node = None
return temp
# Case 3: Node with two children
# Get the lowest value of our right subtree
min_val = self.__get_min(node.right)
# Set the new node to this lowest value
# and remove the duplicate value from the right subtree
node.val = min_val
node.right = self.__delete_node(node.right, min_val)
return node
def get_successor(self, val: int) -> int:
"""
Find the next highest number in the tree
:param val: value of node to get successor of
:return: value of successor node
"""
curr = self.root
while curr is not None:
if val < curr.val:
curr = curr.left
elif val > curr.val:
curr = curr.right
else:
break
# Check whether we could find the node or not
if curr is None:
return -1
# Case 1: node has a right subtree
if curr.right is not None:
temp = curr.right
# Find the smallest value in the right subtree
while temp.left is not None:
temp = temp.left
return temp.val
# Case 2: node has no right subtree
else:
ancestor = self.root
successor = None
while ancestor is not curr:
# Check if ancestors val is higher than our found node
if curr.val < ancestor.val:
# Found a potential candidate for the successor
successor = ancestor
# Keep lowering the val until we get
# as close as possible to our found node
ancestor = ancestor.left
else:
# Our successor lies in the right subtree
ancestor = ancestor.right
if successor is None:
# We tried to find the succ of
# our highest node in the BST
return -1
else:
return successor.val
def get_predecessor(self, val: int) -> int:
"""
Find the next lowest number in the tree
:param val: value of node to get predecessor of
:return: value of predecessor node
"""
curr = self.root
while curr is not None:
if val < curr.val:
curr = curr.left
elif val > curr.val:
curr = curr.right
else:
break
if curr is None:
return -1
# Case 1: curr has a left subtree
if curr.left is not None:
# Get the highest val in the left subtree
temp = curr.left
while temp.right is not None:
temp = temp.right
return temp.val
# Case 2: curr has no left subtree
ancestor = self.root
predecessor = None
while ancestor is not curr:
if val > ancestor.val:
predecessor = ancestor
ancestor = ancestor.right
else:
ancestor = ancestor.left
if predecessor is None:
# We tried to find the pred
# of the minimum node
return -1
else:
return predecessor.val
def main():
# Generate the following tree:
#
# 50
# / \
# 40 60
# / \
# 55 80
bst = BinarySearchTree()
bst.insert(50)
bst.insert(40)
bst.insert(60)
bst.insert(80)
bst.insert(55)
bst.inorder_traversal()
invalid_bst = BinarySearchTree()
invalid_bst.insert(20)
invalid_bst.root.left = Node(30)
print(invalid_bst.is_valid())
print(bst.is_valid())
bst.delete(80)
bst.inorder_traversal()
print(bst.get_successor(55))
print(bst.get_predecessor(60))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3.7
'''
FLASK_APP=hello.py flask run
'''
import argparse
from datetime import datetime, timedelta
import json
import time
import boto3
from botocore.exceptions import ClientError
#from flask_bootstrap import Bootstrap
from flask import Flask, render_template, request
app = Flask(__name__)
#bootstrap = Bootstrap(app)
#app.config['BOOTSTRAP_BOOTSWATCH_THEME'] = 'lumen'
SESSION = boto3.Session()
def get_cpu_utilization(mqueries, region, days):
"""
Gets CPU utilization for instances
"""
client = SESSION.client('cloudwatch', region_name=region)
time_from = (datetime.now() - timedelta(days=days))
time_to = datetime.now()
response = client.get_metric_data(
MetricDataQueries=mqueries,
StartTime=time_from,
EndTime=time_to
)
return response['MetricDataResults']
def scan_region(region):
"""
Gets all instances in a region
"""
client = SESSION.client('ec2', region_name=region)
instances = []
paginator = client.get_paginator('describe_instances')
for page in paginator.paginate():
for res in page['Reservations']:
for inst in res['Instances']:
instance_map = {}
instance_map["id"] = inst['InstanceId']
instance_map["type"] = inst['InstanceType']
instances.append(instance_map)
print(f'Instances found: {len(instances)}')
return instances
@app.route('/', methods=['GET'])
def main():
regions = SESSION.get_available_regions('ec2')
return render_template('index.html', regions=regions)
@app.route('/main_scan', methods=['GET'])
def main_scan():
start_time = time.time()
execution_output = ''
days = 30
percent = 0
regions = [request.args.get('region', '')]
for region in regions:
try:
print(f'proceeding {region}')
region_average = []
mqueries = []
cpu_utilization_map = {}
instances = scan_region(region)
for instance in instances:
cpu_utilization_map[instance["id"]] = {"type": instance["type"], "average": "", "maximum": ""}
for stat in ['Average', 'Maximum']:
mqueries.append(
{
'Id': f'{stat.lower()}_{instance["id"].replace("i-", "")}',
'Label': instance["id"],
'MetricStat': {
'Metric': {
'Namespace': 'AWS/EC2',
'MetricName': 'CPUUtilization',
'Dimensions': [
{
'Name': 'InstanceId',
'Value': instance["id"]
},
]
},
'Period': (days * 86400),
'Stat': stat,
'Unit': 'Percent'
}
},
)
if mqueries:
cpu_utilization_request = get_cpu_utilization(mqueries, region, days)
for cpu_utilization in cpu_utilization_request:
# calculating average only for instances with load > 0
if "average" in cpu_utilization['Id']:
if cpu_utilization['Values']:
cpu_utilization_map[cpu_utilization['Label']]["average"] = cpu_utilization['Values'][0]
region_average.append(cpu_utilization['Values'][0])
else:
cpu_utilization_map[cpu_utilization['Label']]["average"] = 0
else:
if cpu_utilization['Values']:
cpu_utilization_map[cpu_utilization['Label']]["maximum"] = cpu_utilization['Values'][0]
else:
cpu_utilization_map[cpu_utilization['Label']]["maximum"] = 0
for ec2_instance in cpu_utilization_map:
execution_output += f'<tr class="item"><td>{region}</td><td>{ec2_instance}</td><td>{cpu_utilization_map[ec2_instance]["type"]}</td><td>{round(cpu_utilization_map[ec2_instance]["average"], 2)}</td><td>{round(cpu_utilization_map[ec2_instance]["maximum"], 2)}</td></tr>'
if len(region_average):
percent = round(sum(region_average)/len(region_average), 2)
except ClientError as exc:
if exc.response['Error']['Code'] == "AuthFailure":
print(f"looks like {region} is disabled, skipping")
continue
else:
raise
return render_template('main_scan.html', rseconds=round((time.time() - start_time), 2), execution_output=execution_output, percent=percent)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Cisco Systems, Inc.
# @author: <NAME>, Cisco Systems, Inc.
from quantum.plugins.cisco.common import cisco_constants as const
class CiscoUCSMFakeDriver():
"""UCSM Fake Driver"""
def __init__(self):
pass
def _get_blade_interfaces(self, chassis_number, blade_number, ucsm_ip,
ucsm_username, ucsm_password):
blade_interfaces = {}
for element in range(20):
dist_name = "dn" + str(element)
if dist_name:
order = str(element)
rhel_name = "eth" + str(element)
blade_interface = {
const.BLADE_INTF_DN: dist_name,
const.BLADE_INTF_ORDER: order,
const.BLADE_INTF_LINK_STATE: None,
const.BLADE_INTF_OPER_STATE: None,
const.BLADE_INTF_INST_TYPE: const.BLADE_INTF_DYNAMIC,
const.BLADE_INTF_RHEL_DEVICE_NAME: rhel_name,
}
blade_interfaces[dist_name] = blade_interface
return blade_interfaces
def _get_blade_interface_state(self, blade_intf, ucsm_ip,
ucsm_username, ucsm_password):
blade_intf[const.BLADE_INTF_LINK_STATE] = \
const.BLADE_INTF_STATE_UNKNOWN
blade_intf[const.BLADE_INTF_OPER_STATE] = \
const.BLADE_INTF_STATE_UNKNOWN
blade_intf[const.BLADE_INTF_INST_TYPE] = \
const.BLADE_INTF_DYNAMIC
def create_vlan(self, vlan_name, vlan_id, ucsm_ip, ucsm_username,
ucsm_password):
pass
def create_profile(self, profile_name, vlan_name, ucsm_ip, ucsm_username,
ucsm_password):
pass
def change_vlan_in_profile(self, profile_name, old_vlan_name,
new_vlan_name, ucsm_ip, ucsm_username,
ucsm_password):
pass
def get_blade_data(self, chassis_number, blade_number, ucsm_ip,
ucsm_username, ucsm_password):
"""
Returns only the dynamic interfaces on the blade
"""
blade_interfaces = self._get_blade_interfaces(chassis_number,
blade_number,
ucsm_ip,
ucsm_username,
ucsm_password)
for blade_intf in blade_interfaces.keys():
self._get_blade_interface_state(blade_interfaces[blade_intf],
ucsm_ip, ucsm_username,
ucsm_password)
if ((blade_interfaces[blade_intf][const.BLADE_INTF_INST_TYPE] !=
const.BLADE_INTF_DYNAMIC)):
blade_interfaces.pop(blade_intf)
return blade_interfaces
def delete_vlan(self, vlan_name, ucsm_ip, ucsm_username, ucsm_password):
pass
def delete_profile(self, profile_name, ucsm_ip, ucsm_username,
ucsm_password):
pass
|
"""
the :mod:`linear` module includes linear features-based algorithms.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
import numpy as np
from sklearn import linear_model
from .predictions import PredictionImpossible
from .algo_base import AlgoBase
class Lasso(AlgoBase):
"""A basic lasso algorithm with user-item interaction terms.
The prediction :math:`\\hat{r}_{ui}` is set as:
.. math::
\hat{r}_{ui} = \alpha_1 + \alpha_2^\top y_u + \alpha_3^\top z_i +
\alpha_4^\top \text{vec}(y_u \otimes z_i)
where :math:`\alpha_1 \in \mathbb{R}, \alpha_2 \in \mathbb{R}^o, \alpha_3
\in \mathbb{R}^p` and :math:`\alpha_4 \in \mathbb{R}^{op}` are coefficient
vectors, and :math:`\otimes` represent the Kronecker product of two vectors
(i.e., all possible cross-product combinations).
Args:
add_interactions(bool): Whether to add user-item interaction terms.
Optional, default is True.
other args: See ``sklearn`` documentation for ``linear_model.Lasso``.
"""
def __init__(self, add_interactions=True, alpha=1.0, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000, tol=0.0001,
positive=False, random_state=None, selection='cyclic',
**kwargs):
AlgoBase.__init__(self, **kwargs)
self.add_interactions = add_interactions
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.positive = positive
self.random_state = random_state
self.selection = selection
# sample_weight option is currently not supported in scikit-learn
# Lasso().fit()
warnings.warn('Lasso() currently does not use sample_weight',
UserWarning)
def fit(self, trainset):
AlgoBase.fit(self, trainset)
self.lasso(trainset)
return self
def lasso(self, trainset):
if (self.trainset.n_user_features == 0 or
self.trainset.n_item_features == 0):
raise ValueError('trainset does not contain user and/or item '
'features.')
n_ratings = self.trainset.n_ratings
n_uf = self.trainset.n_user_features
n_if = self.trainset.n_item_features
u_features = self.trainset.u_features
i_features = self.trainset.i_features
uf_labels = self.trainset.user_features_labels
if_labels = self.trainset.item_features_labels
X = np.empty((n_ratings, n_uf + n_if))
y = np.empty((n_ratings,))
w = np.empty((n_ratings,))
for k, (uid, iid, rating, weight) in enumerate(
self.trainset.all_ratings(sample_weight=True)):
y[k] = rating
X[k, :n_uf] = u_features[uid]
X[k, n_uf:] = i_features[iid]
w[k] = weight
coef_labels = uf_labels + if_labels
if self.add_interactions:
temp = np.array([X[:, v] * X[:, j] for v in range(n_uf)
for j in range(n_uf, n_uf + n_if)]).T
X = np.concatenate([X, temp], axis=1)
temp = [coef_labels[v] + '*' + coef_labels[j] for v in range(n_uf)
for j in range(n_uf, n_uf + n_if)]
coef_labels += temp
reg = linear_model.Lasso(
alpha=self.alpha, fit_intercept=self.fit_intercept,
normalize=self.normalize, precompute=self.precompute,
max_iter=self.max_iter, tol=self.tol, positive=self.positive,
random_state=self.random_state, selection=self.selection)
reg.fit(X, y)
self.X = X
self.y = y
self.coef = reg.coef_
self.coef_labels = coef_labels
self.intercept = reg.intercept_
def estimate(self, u, i, u_features, i_features):
n_uf = self.trainset.n_user_features
n_if = self.trainset.n_item_features
if (len(u_features) != n_uf or
len(i_features) != n_if):
raise PredictionImpossible(
'User and/or item features are missing.')
X = np.concatenate([u_features, i_features])
if self.add_interactions:
temp = np.array([X[v] * X[j] for v in range(n_uf)
for j in range(n_uf, n_uf + n_if)])
X = np.concatenate([X, temp])
est = self.intercept + np.dot(X, self.coef)
return est
|
<gh_stars>0
"""Test pydeCONZ utilities.
pytest --cov-report term-missing --cov=pydeconz.utils tests/test_utils.py
"""
import asyncio
from unittest.mock import Mock, patch
from asynctest import CoroutineMock
import pytest
import aiohttp
from pydeconz import errors, utils
API_KEY = "1234567890"
IP = "127.0.0.1"
PORT = "80"
@pytest.mark.asyncio
async def test_get_api_key() -> None:
"""Test a successful call of get_api_key."""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(return_value=[{"success": {"username": API_KEY}}]),
):
response = await utils.async_get_api_key(session, IP, PORT)
assert response == API_KEY
@pytest.mark.asyncio
async def test_get_api_key_with_credentials() -> None:
"""Test a successful call of get_api_key with user crendentials."""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(return_value=[{"success": {"username": API_KEY}}]),
):
response = await utils.async_get_api_key(
session, IP, PORT, username="user", password="<PASSWORD>"
)
assert response == API_KEY
@pytest.mark.asyncio
async def test_delete_api_key() -> None:
"""Test a successful call of delete_api_key."""
session = Mock()
with patch("pydeconz.utils.async_request", new=CoroutineMock(return_value=True)):
await utils.async_delete_api_key(session, IP, PORT, API_KEY)
@pytest.mark.asyncio
async def test_delete_all_keys() -> None:
"""Test a successful call of delete_all_keys.
Delete all keys doesn't care what happens with delete_api_key.
"""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(return_value={"whitelist": {1: "123", 2: "456"}}),
):
await utils.async_delete_all_keys(session, IP, PORT, API_KEY)
@pytest.mark.asyncio
async def test_get_bridge_id() -> None:
"""Test a successful call of get_bridgeid."""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(return_value={"bridgeid": "12345"}),
):
response = await utils.async_get_bridge_id(session, IP, PORT, API_KEY)
assert response == "12345"
@pytest.mark.asyncio
async def test_discovery() -> None:
"""Test a successful call to discovery."""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(
return_value=[
{
"id": "123456FFFFABCDEF",
"internalipaddress": "host1",
"internalport": "port1",
},
{
"id": "234567BCDEFG",
"internalipaddress": "host2",
"internalport": "port2",
},
]
),
):
response = await utils.async_discovery(session)
assert [
{"bridgeid": "123456ABCDEF", "host": "host1", "port": "port1"},
{"bridgeid": "234567BCDEFG", "host": "host2", "port": "port2"},
] == response
@pytest.mark.asyncio
async def test_discovery_response_empty() -> None:
"""Test an empty discovery returns an empty list."""
session = Mock()
with patch("pydeconz.utils.async_request", new=CoroutineMock(return_value={})):
response = await utils.async_discovery(session)
assert not response
@pytest.mark.asyncio
async def test_request() -> None:
"""Test a successful call of request."""
response = Mock()
response.content_type = "application/json"
response.json = CoroutineMock(return_value={"json": "response"})
session = CoroutineMock(return_value=response)
result = await utils.async_request(session, "url")
assert result == {"json": "response"}
@pytest.mark.asyncio
async def test_request_fails_client_error() -> None:
"""Test a successful call of request."""
session = CoroutineMock(side_effect=aiohttp.ClientError)
with pytest.raises(errors.RequestError) as e_info:
await utils.async_request(session, "url")
assert str(e_info.value) == "Error requesting data from url: "
@pytest.mark.asyncio
async def test_request_fails_invalid_content() -> None:
"""Test a successful call of request."""
response = Mock()
response.content_type = "application/binary"
session = CoroutineMock(return_value=response)
with pytest.raises(errors.ResponseError) as e_info:
await utils.async_request(session, "url")
assert str(e_info.value) == "Invalid content type: application/binary"
@pytest.mark.asyncio
async def test_request_fails_raise_error() -> None:
"""Test a successful call of request."""
response = Mock()
response.content_type = "application/json"
response.json = CoroutineMock(
return_value=[
{"error": {"type": 1, "address": "address", "description": "description"}}
]
)
session = CoroutineMock(return_value=response)
with pytest.raises(errors.Unauthorized) as e_info:
await utils.async_request(session, "url")
assert str(e_info.value) == "address description"
|
import pandas as pd
'''Takes all of the merged block, block group, and tract files, renames the headings, adds state
abbreviations, and reorders the columns. Creates 2 new csvs, big_table and big_table_pro.
The first does not have prorated values, the second does. '''
# leading in and changing column names of all 8 files (4 zooms, 2 units)
unit_name = ["tract", "bg"]
files = ["tigerline", "cb500k", "cb5m", "cb20m"]
for shape in files:
for unit in unit_name:
# create (g) and (t) labels for column names
if(unit == "tract"):
u = " (t) "
if(unit == "bg"):
u = " (g) "
# read in the tigerline shapefiles (separate for each unit)
df = pd.read_csv("./tables_merged/" + shape + "_" + unit + ".csv", dtype={"geoid": str})
# change the names of the columns to be dependnet on the unit and the percent
percent_list = ["0.5", "0.1"]
for percent in percent_list:
# using richard's column labels
df = df.rename(columns={"dpolsby_" + percent: "a/p^2" + u + percent,
"dpolsby_pro_" + percent: "pro_a/p^2" + u + percent,
"dpop_polsby_" + percent: "w_a/p^2" + u + percent,
"dpop_polsby_pro_" + percent: "pro_w_a/p^2" + u + percent,
"rank_dpolsby_" + percent: "rank_a/p^2" + u + percent,
"rank_dpolsby_pro_" + percent: "rank_pro_a/p^2" + u + percent,
"rank_dpop_polsby_" + percent: "rank_w_a/p^2" + u + percent,
"rank_dpop_polsby_pro_" + percent: "rank_pro_w_a/p^2" + u + percent,
'dperim_' + percent: 'perim' + u + percent,
'dpperim_' + percent: 'w_perim' + u + percent,
'dperim_pro_' + percent: 'pro_perim' + u + percent,
'dpperim_pro_' + percent: 'pro_w_perim' + u + percent,
'darea_' + percent: 'area' + u + percent,
'dparea_' + percent: 'w_area' + u + percent,
'darea_pro_' + percent: 'pro_area' + u + percent,
'dparea_pro_' + percent: 'pro_w_area' + u + percent})
# change continuous names, which aren't dependent on percent or unit
df = df.rename(columns={'carea': 'c_area', 'cperim': 'c_perim',
'cpolsby': 'c_a/p^2', 'rank_cpolsby': 'rank_c_a/p^2'})
# save the relabelled files in their respective new csvs
df.to_csv("./tables_merged/" + shape + "_" + unit + ".csv")
# read in the tract and block group cvs that were just created (this could be avoided but it's ok)
tract = pd.read_csv("./tables_merged/tigerline_tract.csv", dtype={"geoid": str})
bg = pd.read_csv("./tables_merged/tigerline_bg.csv", dtype={"geoid": str})
# merge tracts to block groups by geoid
tract_bg = tract.merge(bg, left_on = "geoid", right_on = "geoid")
# read in the blocks csv
block = pd.read_csv("./tables_merged/merged_blocks.csv", dtype={"geoid": str})
# create duplicate columns for blocks for the percent, that is perim (b) 0.5 = perim (b) 0.1
# because blocks nest (more or less) in districts
for perc in ["0.1", "0.5"]:
block['perim (b) ' + perc] = block['perim (b)']
block['w_perim (b) ' + perc] = block['w_perim (b)']
block['area (b) ' + perc] = block['area (b)']
block['w_area (b) ' + perc] = block['w_area (b)']
block = block.drop(['perim (b)', 'w_perim (b)', 'area (b)', 'w_area (b)'], axis=1)
# merge tract_bg combo with blocks and rename some duplicate columns after merge
result = tract_bg.merge(block, left_on = "geoid", right_on = "geoid")
result = result.drop(['Unnamed: 0_x', 'Unnamed: 0.1_x', 'Unnamed: 0_y', 'Unnamed: 0.1_y',
'c_area_y', 'c_perim_y', 'c_a/p^2_y', 'rank_c_a/p^2_y'], axis=1)
result = result.rename(columns={'c_area_x': 'c_area', 'c_perim_x': 'c_perim',
'c_a/p^2_x': 'c_a/p^2', 'rank_c_a/p^2_x': 'rank_c_a/p^2'})
# the following section reorders the columns and does not compute anything
rank1, rank2, score1, score2, perim1, perim2, area1, area2 = ([] for i in range(8))
rank1p, rank2p, score1p, score2p, perim1p, perim2p, area1p, area2p = ([] for i in range(8))
for perc in ["0.1", "0.5"]:
for u in [" (b) ", " (g) ", " (t) "]:
rank1 = rank1 + ['rank_w_a/p^2' + u + perc]
rank2 = rank2 + ['rank_a/p^2' + u + perc]
score1 = score1 + ['w_a/p^2' + u + perc]
score2 = score2 + ['a/p^2' + u + perc]
if u != " (b) ":
rank1p = rank1p + ['rank_pro_w_a/p^2' + u + perc]
rank2p = rank2p + ['rank_pro_a/p^2' + u + perc]
score1p = score1p + ['pro_w_a/p^2' + u + perc]
score2p = score2p + ['pro_a/p^2' + u + perc]
for u in [" (b) ", " (g) ", " (t) "]:
for perc in ["0.1", "0.5"]:
perim1 = perim1 + ['perim' + u + perc]
perim2 = perim2 + ['w_perim' + u + perc]
area1 = area1 + ['area' + u + perc]
area2 = area2 + ['w_area' + u + perc]
if u != " (b) ":
perim1p = perim1p + ['pro_perim' + u + perc]
perim2p = perim2p + ['pro_w_perim' + u + perc]
area1p = area1p + ['pro_area' + u + perc]
area2p = area2p + ['pro_w_area' + u + perc]
# Adding state abbreviations
fips = pd.read_csv('../state_fips.txt', sep='\t', lineterminator='\n', dtype={"STATE": str, "FIP": str})
fips_dict = {}
for i, row in fips.iterrows():
fips_dict.update({row["FIP"]:row["ABBREVIATION"]})
abbrev = []
for i in result['geoid']:
abbrev.append(fips_dict[i[:2]])
result['state'] = abbrev
result.sort_values('geoid', inplace=True)
result = result.reset_index(drop=True)
# create two csvs: one with prorated values, one without
result = result.rename(columns={'c_a/p^2': 'c_4pi*a/p^2', 'rank_c_a/p^2': 'rank_c_4pi*a/p^2'})
contin = ['geoid', 'state', 'c_perim', 'c_area', 'c_4pi*a/p^2', 'rank_c_4pi*a/p^2']
result_nopro = result[contin + rank1 + rank2 + score1 + score2 + perim1 + perim2 + area1 + area2]
result_pro = result[contin + rank1 + rank1p + rank2 + rank2p +
score1 + score1p + score2 + score2p +
perim1 + perim1p + perim2 + perim2p +
area1 + area1p + area2 + area2p]
# Write to CSV here to keep old header names
#result_nopro.to_csv("./big_table.csv") # length 54
#result_pro.to_csv("./big_table_pro.csv") # length 86 because 54+32 for prorated
# Set new header names
result_nopro.rename(columns=lambda x: x.replace(' (b) ', '_b_'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace(' (g) ', '_g_'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace(' (t) ', '_t_'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace('c', 'cont'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace('4pi*a/p^2', 'pp'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace('w_a/p^2', 'disc_w_pp'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace('a/p^2', 'disc_pp'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace('area', 'disc_area'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace('w_disc_area', 'disc_w_area'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace('perim', 'disc_perim'), inplace=True)
result_nopro.rename(columns=lambda x: x.replace('w_disc_perim', 'disc_w_perim'), inplace=True)
result_nopro = result_nopro.rename(columns={'cont_disc_perim': 'cont_perim', 'cont_disc_area': 'cont_area'})
result_pro.rename(columns=lambda x: x.replace(' (b) ', '_b_'), inplace=True)
result_pro.rename(columns=lambda x: x.replace(' (g) ', '_g_'), inplace=True)
result_pro.rename(columns=lambda x: x.replace(' (t) ', '_t_'), inplace=True)
result_pro.rename(columns=lambda x: x.replace('c', 'cont'), inplace=True)
result_pro.rename(columns=lambda x: x.replace('4pi*a/p^2', 'pp'), inplace=True)
result_pro.rename(columns=lambda x: x.replace('w_a/p^2', 'disc_w_pp'), inplace=True)
result_pro.rename(columns=lambda x: x.replace('a/p^2', 'disc_pp'), inplace=True)
result_pro.rename(columns=lambda x: x.replace('area', 'disc_area'), inplace=True)
result_pro.rename(columns=lambda x: x.replace('w_disc_area', 'disc_w_area'), inplace=True)
result_pro.rename(columns=lambda x: x.replace('perim', 'disc_perim'), inplace=True)
result_pro.rename(columns=lambda x: x.replace('w_disc_perim', 'disc_w_perim'), inplace=True)
result_pro = result_pro.rename(columns={'cont_disc_perim': 'cont_perim', 'cont_disc_area': 'cont_area'})
result_nopro.to_csv("./big_table.csv") # length 54
result_pro.to_csv("./big_table_pro.csv") # length 86 because 54+32 for prorated
header_nopro = ['', '', 'CONTINUOUS:', 'CONTINUOUS:', 'CONTINUOUS SCORE:', 'CONTINUOUS RANK:',
'POP RANK:', 'POP RANK:', 'POP RANK:', 'POP RANK:', 'POP RANK:', 'POP RANK:',
'RANKING:', 'RANKING:', 'RANKING:', 'RANKING:', 'RANKING:', 'RANKING:',
'POP SCORE:', 'POP SCORE:', 'POP SCORE:', 'POP SCORE:', 'POP SCORE:', 'POP SCORE:',
'SCORE:', 'SCORE:', 'SCORE:', 'SCORE:', 'SCORE:', 'SCORE:',
'PERIM:', 'PERIM:', 'PERIM:', 'PERIM:', 'PERIM:', 'PERIM:',
'POP PERIM:', 'POP PERIM:', 'POP PERIM:', 'POP PERIM:', 'POP PERIM:', 'POP PERIM:',
'AREA:', 'AREA:', 'AREA:', 'AREA:', 'AREA:', 'AREA:',
'POP AREA:', 'POP AREA:', 'POP AREA:', 'POP AREA:', 'POP AREA:', 'POP AREA:']
header_pro = ['', '', 'CONTINUOUS:', 'CONTINUOUS:', 'CONTINUOUS SCORE:', 'CONTINUOUS RANK:',
'POP RANK:', 'POP RANK:', 'POP RANK:', 'POP RANK:', 'POP RANK:', 'POP RANK:',
'PRO POP RANK:', 'PRO POP RANK:', 'PRO POP RANK:', 'PRO POP RANK:',
'RANKING:', 'RANKING:', 'RANKING:', 'RANKING:', 'RANKING:', 'RANKING:',
'PRO RANKING:', 'PRO RANKING:', 'PRO RANKING:', 'PRO RANKING:',
'POP SCORE:', 'POP SCORE:', 'POP SCORE:', 'POP SCORE:', 'POP SCORE:', 'POP SCORE:',
'PRO POP SCORE:', 'PRO POP SCORE:', 'PRO POP SCORE:','PRO POP SCORE:',
'SCORE:', 'SCORE:', 'SCORE:', 'SCORE:', 'SCORE:', 'SCORE:',
'PRO SCORE:', 'PRO SCORE:', 'PRO SCORE:', 'PRO SCORE:',
'PERIM:', 'PERIM:', 'PERIM:', 'PERIM:', 'PERIM:', 'PERIM:',
'PRO PERIM:', 'PRO PERIM:', 'PRO PERIM:', 'PRO PERIM:',
'POP PERIM:', 'POP PERIM:', 'POP PERIM:', 'POP PERIM:', 'POP PERIM:', 'POP PERIM:',
'PRO POP PERIM:', 'PRO POP PERIM:', 'PRO POP PERIM:', 'PRO POP PERIM:',
'AREA:', 'AREA:', 'AREA:', 'AREA:', 'AREA:', 'AREA:',
'PRO AREA:', 'PRO AREA:', 'PRO AREA:', 'PRO AREA:',
'POP AREA:', 'POP AREA:', 'POP AREA:', 'POP AREA:', 'POP AREA:', 'POP AREA:',
'PRO POP AREA:', 'PRO POP AREA:', 'PRO POP AREA:', 'PRO POP AREA:']
result_nopro.columns = pd.MultiIndex.from_tuples(list(zip(result_nopro.columns, header_nopro)))
result_pro.columns = pd.MultiIndex.from_tuples(list(zip(result_pro.columns, header_pro)))
result_nopro.to_csv("./stylized/style_big_table.csv") # length 54
result_pro.to_csv("./stylized/style_big_table_pro.csv") # length 86 because 54+32 for prorated
# richard's table has 48 columns
# ours (without prorated) has 54 because 48 + 4 for contin + 1 for geoid + 1 for state
|
"""
Cartesian fields
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
from .coordinate_handler import \
CoordinateHandler, \
_unknown_coord, \
_get_coord_fields
import yt.visualization._MPL as _MPL
class CartesianCoordinateHandler(CoordinateHandler):
def __init__(self, ds, ordering = ('x','y','z')):
super(CartesianCoordinateHandler, self).__init__(ds, ordering)
def setup_fields(self, registry):
for axi, ax in enumerate(self.axis_order):
f1, f2 = _get_coord_fields(axi)
registry.add_field(("index", "d%s" % ax), function = f1,
display_field = False,
units = "code_length")
registry.add_field(("index", "path_element_%s" % ax), function = f1,
display_field = False,
units = "code_length")
registry.add_field(("index", "%s" % ax), function = f2,
display_field = False,
units = "code_length")
def _cell_volume(field, data):
rv = data["index", "dx"].copy(order='K')
rv *= data["index", "dy"]
rv *= data["index", "dz"]
return rv
registry.add_field(("index", "cell_volume"), function=_cell_volume,
display_field=False, units = "code_length**3")
registry.check_derived_fields(
[("index", "dx"), ("index", "dy"), ("index", "dz"),
("index", "x"), ("index", "y"), ("index", "z"),
("index", "cell_volume")])
def pixelize(self, dimension, data_source, field, bounds, size,
antialias = True, periodic = True):
if dimension < 3:
return self._ortho_pixelize(data_source, field, bounds, size,
antialias, dimension, periodic)
else:
return self._oblique_pixelize(data_source, field, bounds, size,
antialias)
def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
dim, periodic):
# We should be using fcoords
period = self.period[:2].copy() # dummy here
period[0] = self.period[self.x_axis[dim]]
period[1] = self.period[self.y_axis[dim]]
if hasattr(period, 'in_units'):
period = period.in_units("code_length").d
buff = _MPL.Pixelize(data_source['px'], data_source['py'],
data_source['pdx'], data_source['pdy'],
data_source[field], size[0], size[1],
bounds, int(antialias),
period, int(periodic)).transpose()
return buff
def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
indices = np.argsort(data_source['dx'])[::-1]
buff = _MPL.CPixelize(data_source['x'], data_source['y'],
data_source['z'], data_source['px'],
data_source['py'], data_source['pdx'],
data_source['pdy'], data_source['pdz'],
data_source.center, data_source._inv_mat, indices,
data_source[field], size[0], size[1], bounds).transpose()
return buff
def convert_from_cartesian(self, coord):
return coord
def convert_to_cartesian(self, coord):
return coord
def convert_to_cylindrical(self, coord):
center = self.ds.domain_center
return cartesian_to_cylindrical(coord, center)
def convert_from_cylindrical(self, coord):
center = self.ds.domain_center
return cylindrical_to_cartesian(coord, center)
def convert_to_spherical(self, coord):
raise NotImplementedError
def convert_from_spherical(self, coord):
raise NotImplementedError
_x_pairs = (('x', 'y'), ('y', 'z'), ('z', 'x'))
_y_pairs = (('x', 'z'), ('y', 'x'), ('z', 'y'))
@property
def period(self):
return self.ds.domain_width
|
<reponame>PerFuchs/edge-frames<filename>src/sharesCalculator.py
import csv
import itertools
from functools import reduce
from typing import List, Tuple, Dict
from string import ascii_lowercase
import operator as op
# from poibin.poibin import PoiBin
from collections import deque
from math import sqrt, ceil, isclose
def workload_per_worker(pattern, config: Dict[str, int]):
w = 0
v, e = pattern
p_unique = 1
for (a, b) in e:
size = config[a] * config[b]
p_assign = 1 / size
p_unique_assign = p_assign * p_unique
p_unique = p_unique * (1 - p_assign)
w += p_unique_assign
# probabilities = [1 / (config[a] * config[b]) for (a, b) in e]
# poison_binominial = PoiBin(probabilities)
#
# po_bi = 1 - (poison_binominial.pmf([0])[0])
# assert(isclose(w, po_bi)) # That assert never fails, from now on I use the implementation above because it is faster.
return w
def best_configuration(workers: int, edges: List[Tuple[str, str]], vertices: List[str]):
"""
best configuration as defined in "From Theory to Practice":
most even configuration with lowest workload
"""
min_workload = 1.0
best_conf = dict(zip(vertices, [0 for v in vertices]))
visited = set()
toVisit = deque()
toVisit.append(tuple([1 for _ in vertices]))
while (len(toVisit) > 0):
c_tuple = toVisit.pop()
c = dict(zip(vertices, c_tuple))
w = workload_per_worker((vertices, edges), c)
if w < min_workload:
min_workload = w
best_conf = c
elif w == min_workload and (best_conf is None or max(c.values()) < max(best_conf.values())):
best_conf = c
for i, d in enumerate(c_tuple):
new_dim_sizes = (c_tuple[0:i] +
tuple([c_tuple[i] + 1]) +
c_tuple[i + 1:])
if (reduce(op.mul, new_dim_sizes) <= workers
and new_dim_sizes not in visited
and (workers <= 64 or max(new_dim_sizes) < ceil(sqrt(workers)))): # Optimization for many workers, takes to long otherwise.
toVisit.append(new_dim_sizes)
return best_conf
def number_of_workers(config):
return reduce(lambda x, y: x * y, config.values())
def clique_pattern(num_vertices):
vertices = list(map(lambda i: ascii_lowercase[i], range(num_vertices)))
edges = []
for v1 in vertices:
for v2 in vertices:
if v1 < v2:
edges.append((v1, v2))
return vertices, edges
def path_pattern(num_vertices):
vertices = list(map(lambda i: ascii_lowercase[i], range(num_vertices)))
edges = []
for (i, v1) in enumerate(vertices):
if i < len(vertices) - 1:
edges.append((v1, vertices[i + 1]))
return vertices, edges
def circle_pattern(num_vertices):
v, e = path_pattern(num_vertices)
e.append((v[0], v[-1]))
return v, e
def two_rings_pattern():
v, e = clique_pattern(3)
v += 'z'
e += [('a', 'z'), ('b', 'z')]
return v, e
def diamond_pattern():
clique = clique_pattern(4)
clique[1].remove(('a', 'd'))
return clique
def house_pattern():
clique = clique_pattern(5)
clique[1].remove(('a', 'd'))
clique[1].remove(('a', 'e'))
return clique
def write_replication_file():
"""
Produces a table that shows how many percent of the E relationship is hold at each node for the optimal
configurations, given a fixed number of workers. Optimality is defined as detailed in Chu et al. 2015.
:return:
"""
clique_patterns = list(map(lambda i: clique_pattern(i), range(3, 6)))
path_patterns = list(map(lambda i: path_patterns(i), range(2, 6)))
patterns = clique_patterns + path_patterns + [diamond_pattern()] + [house_pattern()]
field_names = ['vertices', 'edges', 'workers', 'workers_used', 'config', 'max_percentage']
rows = []
workers = [64, 128]
with open('output.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(field_names)
for (v, e) in patterns:
for w in workers:
c = best_configuration(w, e, v)
writer.writerow((len(v), len(e), w, number_of_workers(c), '; '.join(map(str, map(lambda v: c[v], v))), workload_per_worker((v, e), c)))
f.flush()
def check_against_paper_results():
# Size of edge relationship in paper (twitter, follower -> followee)
e_size = 1114289
workers = 64
# Patterns of the paper
patterns = [clique_pattern(3)] + [clique_pattern(4)] + [circle_pattern(4)] + [two_rings_pattern()]
# Number of tuples shuffled in millions from the paper, same order as patterns
expected_tuples_to_shuffle = [13, 24, 35, 17]
calculated_tuple_shuffles = []
for (v, e) in patterns:
config = best_configuration(workers, e, v)
print(config)
w = workload_per_worker((v, e), config)
calculated_tuple_shuffles.append(round(e_size * w * number_of_workers(config) / 1000000))
print("Expected tuples to shuffle: ", expected_tuples_to_shuffle)
print("Calculated tuples to shuffle: ", calculated_tuple_shuffles)
# check_against_paper_results()
# write_replication_file()
|
<gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from pterotactyl.utility import utils
BASE_MESH_SIZE = 1824
BASE_CHART_SIZE = 25
# replay buffer used for learning RL models over the environment
class ReplayMemory:
def __init__(self, args):
self.args = args
# basic info which might be used by a learning method
# _n denotes observations occuring after the action is perfromed
self.mask = torch.zeros((self.args.mem_capacity, self.args.num_actions))
self.mask_n = torch.zeros((self.args.mem_capacity, self.args.num_actions))
self.actions = torch.zeros((self.args.mem_capacity))
self.rewards = torch.zeros(self.args.mem_capacity)
self.score = torch.zeros(self.args.mem_capacity)
self.score_n = torch.zeros(self.args.mem_capacity)
self.first_score = torch.zeros(self.args.mem_capacity)
if self.args.use_recon:
num_fingers = 1 if self.args.finger else 4
mesh_shape = BASE_MESH_SIZE + (
BASE_CHART_SIZE * self.args.num_grasps * num_fingers
)
self.mesh = torch.zeros((self.args.mem_capacity, mesh_shape, 4))
self.mesh_n = torch.zeros((self.args.mem_capacity, mesh_shape, 4))
if self.args.use_latent:
latent_size = utils.load_model_config(self.args.auto_location)[
0
].encoding_size
self.latent = torch.zeros((self.args.mem_capacity, latent_size))
self.latent_n = torch.zeros((self.args.mem_capacity, latent_size))
self.first_latent = torch.zeros((self.args.mem_capacity, latent_size))
self.position = 0
self.count_seen = 0
# add a set of transitions to the replay buffer
def push(self, action, observation, next_observation, reward):
for i in range(len(action)):
self.actions[self.position] = action[i]
self.rewards[self.position] = reward[i]
self.score[self.position] = observation["score"][i]
self.score_n[self.position] = next_observation["score"][i]
self.first_score[self.position] = observation["first_score"][i]
self.mask[self.position] = observation["mask"][i]
self.mask_n[self.position] = next_observation["mask"][i]
if self.args.use_recon:
self.mesh[self.position] = observation["mesh"][i]
self.mesh_n[self.position] = next_observation["mesh"][i]
if self.args.use_latent:
self.latent[self.position] = observation["latent"][i]
self.latent_n[self.position] = next_observation["latent"][i]
self.first_latent[self.position] = observation["first_latent"][i]
self.count_seen += 1
self.position = (self.position + 1) % self.args.mem_capacity
# sample a set of transitions from the replay buffer
def sample(self):
if (
self.count_seen < self.args.burn_in
or self.count_seen < self.args.train_batch_size
):
return None
indices = np.random.choice(
min(self.count_seen, self.args.mem_capacity), self.args.train_batch_size
)
data = {
"mask": self.mask[indices],
"mask_n": self.mask_n[indices],
"actions": self.actions[indices],
"rewards": self.rewards[indices],
"score": self.score[indices],
"score_n": self.score_n[indices],
"first_score": self.first_score[indices],
}
if self.args.use_recon:
data["mesh"] = self.mesh[indices]
data["mesh_n"] = self.mesh_n[indices]
if self.args.use_latent:
data["latent"] = self.latent[indices]
data["latent_n"] = self.latent_n[indices]
data["first_latent"] = self.first_latent[indices]
return data
# save the replay buffer to disk
def save(self, directory):
data = {
"mask": self.mask,
"mask_n": self.mask_n,
"actions": self.actions,
"rewards": self.rewards,
"score": self.score,
"first_score": self.first_score,
"position": self.position,
"count_seen": self.count_seen,
}
if self.args.use_recon:
data["mesh"] = self.mesh
data["mesh_n"] = self.mesh_n
if self.args.use_latent:
data["latent"] = self.latent
data["latent_n"] = self.latent_n
data["first_latent"] = self.first_latent
temp_path = directory + "_replay_buffer_temp.pt"
full_path = directory + "_replay_buffer.pt"
torch.save(data, temp_path)
os.rename(temp_path, full_path)
# load the replay buffer from the disk
def load(self, directory):
data = torch.load(directory + "_replay_buffer.pt")
self.mask = data["mask"]
self.mask_n = data["mask_n"]
self.actions = data["actions"]
self.actions = data["actions"]
self.rewards = data["rewards"]
self.score = data["score"]
self.first_score = data["first_score"]
self.position = data["position"]
self.count_seen = data["count_seen"]
if self.args.use_recon:
self.mesh = data["mesh"]
self.mesh_n = data["mesh_n"]
if self.args.use_latent:
self.latent = data["latent"]
self.latent_n = data["latent_n"]
self.first_latent = data["first_latent"]
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from keystoneclient import service_catalog
from keystoneclient.v2_0 import client as keystone_client
from keystoneclient.v2_0 import tokens
from horizon import exceptions
from horizon.api import APIResourceWrapper
LOG = logging.getLogger(__name__)
DEFAULT_ROLE = None
def _get_endpoint_url(request):
return request.session.get('region_endpoint',
getattr(settings, 'OPENSTACK_KEYSTONE_URL'))
class Token(APIResourceWrapper):
"""Simple wrapper around keystoneclient.tokens.Tenant"""
_attrs = ['id', 'user', 'serviceCatalog', 'tenant']
class User(APIResourceWrapper):
"""Simple wrapper around keystoneclient.users.User"""
_attrs = ['email', 'enabled', 'id', 'tenantId', 'name']
class Services(APIResourceWrapper):
_attrs = ['disabled', 'host', 'id', 'last_update', 'stats', 'type', 'up',
'zone']
def keystoneclient(request, username=None, password=<PASSWORD>, tenant_id=None,
token_id=None, endpoint=None, endpoint_type=None):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
# Take care of client connection caching/fetching a new client
user = request.user
if hasattr(request, '_keystone') and \
request._keystone.auth_token == token_id:
LOG.debug("Using cached client for token: %s" % user.token)
conn = request._keystone
else:
LOG.debug("Creating a new client connection with endpoint: %s."
% endpoint)
conn = keystone_client.Client(username=username or user.username,
password=password,
tenant_id=tenant_id or user.tenant_id,
token=token_id or user.token,
auth_url=_get_endpoint_url(request),
endpoint=endpoint)
request._keystone = conn
# Fetch the correct endpoint for the user type
catalog = getattr(conn, 'service_catalog', None)
if catalog and "serviceCatalog" in catalog.catalog.keys():
if endpoint_type:
endpoint = catalog.url_for(service_type='identity',
endpoint_type=endpoint_type)
elif user.is_admin():
endpoint = catalog.url_for(service_type='identity',
endpoint_type='adminURL')
else:
endpoint = catalog.url_for(service_type='identity',
endpoint_type='publicURL')
else:
endpoint = _get_endpoint_url(request)
conn.management_url = endpoint
return conn
def tenant_create(request, tenant_name, description, enabled):
return keystoneclient(request).tenants.create(tenant_name,
description,
enabled)
def tenant_get(request, tenant_id):
return keystoneclient(request).tenants.get(tenant_id)
def tenant_delete(request, tenant_id):
keystoneclient(request).tenants.delete(tenant_id)
def tenant_list(request):
return keystoneclient(request).tenants.list()
def tenant_update(request, tenant_id, tenant_name, description, enabled):
return keystoneclient(request).tenants.update(tenant_id,
tenant_name,
description,
enabled)
def tenant_list_for_token(request, token, endpoint_type=None):
c = keystoneclient(request,
token_id=token,
endpoint=_get_endpoint_url(request),
endpoint_type=endpoint_type)
return c.tenants.list()
def token_create(request, tenant, username, password):
'''
Creates a token using the username and password provided. If tenant
is provided it will retrieve a scoped token and the service catalog for
the given tenant. Otherwise it will return an unscoped token and without
a service catalog.
'''
c = keystoneclient(request,
username=username,
password=password,
tenant_id=tenant,
endpoint=_get_endpoint_url(request))
token = c.tokens.authenticate(username=username,
password=password,
tenant_id=tenant)
return Token(token)
def token_create_scoped(request, tenant, token):
'''
Creates a scoped token using the tenant id and unscoped token; retrieves
the service catalog for the given tenant.
'''
if hasattr(request, '_keystone'):
del request._keystone
c = keystoneclient(request,
tenant_id=tenant,
token_id=token,
endpoint=_get_endpoint_url(request))
raw_token = c.tokens.authenticate(tenant_id=tenant,
token=token,
return_raw=True)
c.service_catalog = service_catalog.ServiceCatalog(raw_token)
if request.user.is_admin():
c.management_url = c.service_catalog.url_for(service_type='identity',
endpoint_type='adminURL')
else:
c.management_url = c.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
scoped_token = tokens.Token(tokens.TokenManager, raw_token)
return Token(scoped_token)
def user_list(request, tenant_id=None):
return [User(u) for u in
keystoneclient(request).users.list(tenant_id=tenant_id)]
def user_create(request, user_id, email, password, tenant_id, enabled):
return User(keystoneclient(request).users.create(
user_id, password, email, tenant_id, enabled))
def user_delete(request, user_id):
keystoneclient(request).users.delete(user_id)
def user_get(request, user_id):
return User(keystoneclient(request).users.get(user_id))
def user_update_email(request, user_id, email):
return User(keystoneclient(request).users.update_email(user_id, email))
def user_update_enabled(request, user_id, enabled):
return User(keystoneclient(request).users.update_enabled(user_id, enabled))
def user_update_password(request, user_id, password):
return User(keystoneclient(request).users \
.update_password(user_id, password))
def user_update_tenant(request, user_id, tenant_id):
return User(keystoneclient(request).users \
.update_tenant(user_id, tenant_id))
def role_list(request):
""" Returns a global list of available roles. """
return keystoneclient(request).roles.list()
def add_tenant_user_role(request, tenant_id, user_id, role_id):
""" Adds a role for a user on a tenant. """
return keystoneclient(request).roles.add_user_role(user_id,
role_id,
tenant_id)
def remove_tenant_user(request, tenant_id, user_id):
""" Removes all roles from a user on a tenant, removing them from it. """
client = keystoneclient(request)
roles = client.roles.roles_for_user(user_id, tenant_id)
for role in roles:
client.roles.remove_user_role(user_id, role.id, tenant_id)
def get_default_role(request):
"""
Gets the default role object from Keystone and saves it as a global
since this is configured in settings and should not change from request
to request. Supports lookup by name or id.
"""
global DEFAULT_ROLE
default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
if default and DEFAULT_ROLE is None:
try:
roles = keystoneclient(request).roles.list()
except:
exceptions.handle(request)
for role in roles:
if role.id == default or role.name == default:
DEFAULT_ROLE = role
break
return DEFAULT_ROLE
|
<reponame>pip-services3-python/pip-services3-expressions-python
# -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
from .IVariantOperations import IVariantOperations
from .Variant import Variant
from .VariantType import VariantType
class AbstractVariantOperations(IVariantOperations, ABC):
"""
Implements an abstract variant operations manager object.
"""
def _type_to_string(self, value: VariantType) -> str:
"""
Convert variant type to string representation
:param value: a variant type to be converted.
:return: a string representation of the type.
"""
types = {
VariantType.Null: 'Null',
VariantType.Integer: 'Integer',
VariantType.Long: 'Long',
VariantType.Float: 'Float',
VariantType.Double: 'Double',
VariantType.String: 'String',
VariantType.Boolean: 'Boolean',
VariantType.DateTime: 'DateTime',
VariantType.TimeSpan: 'TimeSpan',
VariantType.Object: 'Object',
VariantType.Array: 'Array'
}
try:
return types[value]
except KeyError as e:
return 'Unknown'
@abstractmethod
def convert(self, value: Variant, new_type: VariantType) -> Variant:
"""
Converts variant to specified type
:param value: A variant value to be converted.
:param new_type: A type of object to be returned.
:return: A converted Variant value.
"""
def add(self, value1: Variant, value2: Variant) -> Variant:
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer + value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long + value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_float = value1.as_float + value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_double = value1.as_double + value2.as_double
return result
elif value1.type == VariantType.String:
result.as_string = value1.as_string + value2.as_string
return result
elif value1.type == VariantType.TimeSpan:
result.as_time_span = value1.as_time_span + value2.as_time_span
return result
raise Exception(f"Operation '+' is not supported for type {self._type_to_string(value1.type)}")
def sub(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '-' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer - value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long - value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_float = value1.as_float - value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_double = value1.as_double - value2.as_double
return result
elif value1.type == VariantType.DateTime:
result.as_string = value1.as_datetime - value2.as_datetime
return result
elif value1.type == VariantType.TimeSpan:
result.as_time_span = value1.as_time_span - value2.as_time_span
return result
raise Exception(f"Operation '-' is not supported for type {self._type_to_string(value1.type)}")
def mul(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '*' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer * value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long * value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_float = value1.as_float * value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_double = value1.as_double * value2.as_double
return result
raise Exception(f"Operation '*' is not supported for type {self._type_to_string(value1.type)}")
def div(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '/' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = int(value1.as_integer / value2.as_integer)
return result
elif value1.type == VariantType.Long:
result.as_long = int(value1.as_long / value2.as_long)
return result
elif value1.type == VariantType.Float:
result.as_float = value1.as_float / value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_double = value1.as_double / value2.as_double
return result
raise Exception(f"Operation '/' is not supported for type {self._type_to_string(value1.type)}")
def mod(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '%' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer % value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long % value2.as_long
return result
raise Exception(f"Operation '%' is not supported for type {self._type_to_string(value1.type)}")
def pow(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '^' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Performs operation.
if value1.type in [VariantType.Integer, VariantType.Long, VariantType.Float, VariantType.Double]:
# Converts second operand to the type of the first operand.
value1 = self.convert(value1, VariantType.Float)
value2 = self.convert(value2, VariantType.Float)
result.as_float = value1.as_float + value2.as_float
raise Exception(f"Operation '^' is not supported for type {self._type_to_string(value1.type)}")
def and_(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs AND operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer and value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long and value2.as_long
return result
elif value1.type == VariantType.Boolean:
result.as_boolean = value1.as_boolean and value2.as_boolean
return result
raise Exception(f"Operation 'AND' is not supported for type {self._type_to_string(value1.type)}")
def or_(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs OR operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer or value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long or value2.as_long
return result
elif value1.type == VariantType.Boolean:
result.as_boolean = value1.as_boolean or value2.as_boolean
return result
raise Exception(f"Operation 'OR' is not supported for type {self._type_to_string(value1.type)}")
def xor(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs XOR operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer ^ value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long ^ value2.as_long
return result
elif value1.type == VariantType.Boolean:
result.as_boolean = value1.as_boolean ^ value2.as_boolean
return result
raise Exception(f"Operation 'XOR' is not supported for type {self._type_to_string(value1.type)}")
def lsh(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '<<' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer << value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long << value2.as_long
return result
raise Exception(f"Operation '<<' is not supported for type {self._type_to_string(value1.type)}")
def rsh(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '>>' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer >> value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long >> value2.as_long
return result
raise Exception(f"Operation '>>' is not supported for type {self._type_to_string(value1.type)}")
def not_(self, value: Variant) -> Variant:
"""
Performs NOT operation for a variant.
:param value: The operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value.type == VariantType.Null:
return result
# Performs operation.
if value.type == VariantType.Integer:
result.as_integer = ~value.as_integer
return result
elif value.type == VariantType.Long:
result.as_long = ~value.as_long
return result
elif value.type == VariantType.Boolean:
result.as_boolean = ~value.as_boolean
return result
raise Exception(f"Operation NOT is not supported for type {self._type_to_string(value.type)}")
def negative(self, value: Variant) -> Variant:
"""
Performs unary '-' operation for a variant.
:param value: The operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value.type == VariantType.Null:
return result
# Performs operation.
if value.type == VariantType.Integer:
result.as_integer = -value.as_integer
return result
elif value.type == VariantType.Long:
result.as_long = -value.as_long
return result
elif value.type == VariantType.Float:
result.as_float = -value.as_float
return result
elif value.type == VariantType.Double:
result.as_double = -value.as_double
return result
raise Exception(f"Operation '-' is not supported for type {self._type_to_string(value.type)}")
def equal(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '=' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null and value2.type == VariantType.Null:
result.as_boolean = True
return result
if value1.type == VariantType.Null or value2.type == VariantType.Null:
result.as_boolean = False
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_boolean = value1.as_integer == value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_boolean = value1.as_long == value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_boolean = value1.as_float == value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_boolean = value1.as_double == value2.as_double
return result
elif value1.type == VariantType.String:
result.as_boolean = value1.as_string.rstrip('.0') == value2.as_string.rstrip('.0')
return result
elif value1.type == VariantType.TimeSpan:
result.as_boolean = value1.as_time_span == value2.as_time_span
return result
elif value1.type == VariantType.DateTime:
result.as_boolean = value1.as_datetime == value2.as_datetime
return result
elif value1.type == VariantType.Boolean:
result.as_boolean = value1.as_boolean == value2.as_boolean
return result
elif value1.type == VariantType.Object:
result.as_boolean = value1.as_object == value2.as_object
return result
raise Exception(f"Operation '==' is not supported for type {self._type_to_string(value1.type)}")
def not_equal(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '<>' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null and value2.type == VariantType.Null:
result.as_boolean = True
return result
if value1.type == VariantType.Null or value2.type == VariantType.Null:
result.as_boolean = False
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer != value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long != value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_float = value1.as_float != value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_double = value1.as_double != value2.as_double
return result
elif value1.type == VariantType.String:
result.as_string = value1.as_string != value2.as_string
return result
elif value1.type == VariantType.TimeSpan:
result.as_time_span = value1.as_time_span != value2.as_time_span
return result
elif value1.type == VariantType.DateTime:
result.as_datetime = value1.as_datetime != value2.as_datetime
return result
elif value1.type == VariantType.Boolean:
result.as_boolean = value1.as_boolean != value2.as_boolean
return result
elif value1.type == VariantType.Object:
result.as_object = value1.as_object != value2.as_object
return result
raise Exception(f"Operation '<>' is not supported for type {self._type_to_string(value1.type)}")
def more(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '>' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
result.as_boolean = False
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_boolean = value1.as_integer > value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_boolean = value1.as_long > value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_boolean = value1.as_float > value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_boolean = value1.as_double > value2.as_double
return result
elif value1.type == VariantType.String:
result.as_boolean = value1.as_string > value2.as_string
return result
elif value1.type == VariantType.TimeSpan:
result.as_boolean = value1.as_time_span > value2.as_time_span
return result
elif value1.type == VariantType.DateTime:
result.as_boolean = value1.as_datetime.timestamp() > value2.as_datetime.timestamp()
return result
raise Exception(f"Operation '>' is not supported for type {self._type_to_string(value1.type)}")
def less(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '<' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
result.as_boolean = False
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer < value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long < value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_float = value1.as_float < value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_double = value1.as_double < value2.as_double
return result
elif value1.type == VariantType.String:
result.as_string = value1.as_string < value2.as_string
return result
elif value1.type == VariantType.TimeSpan:
result.as_time_span = value1.as_time_span < value2.as_time_span
return result
elif value1.type == VariantType.DateTime:
result.as_datetime = value1.as_datetime.timestamp() < value2.as_datetime.timestamp()
return result
raise Exception(f"Operation '<' is not supported for type {self._type_to_string(value1.type)}")
def more_equal(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '>=' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
result.as_boolean = False
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer >= value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long >= value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_float = value1.as_float >= value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_double = value1.as_double >= value2.as_double
return result
elif value1.type == VariantType.String:
result.as_string = value1.as_string >= value2.as_string
return result
elif value1.type == VariantType.TimeSpan:
result.as_time_span = value1.as_time_span >= value2.as_time_span
return result
elif value1.type == VariantType.DateTime:
result.as_datetime = value1.as_datetime.timestamp() >= value2.as_datetime.timestamp()
return result
raise Exception(f"Operation '>=' is not supported for type {self._type_to_string(value1.type)}")
def less_equal(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs '<=' operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
result.as_boolean = False
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, value1.type)
# Performs operation.
if value1.type == VariantType.Integer:
result.as_integer = value1.as_integer <= value2.as_integer
return result
elif value1.type == VariantType.Long:
result.as_long = value1.as_long <= value2.as_long
return result
elif value1.type == VariantType.Float:
result.as_float = value1.as_float <= value2.as_float
return result
elif value1.type == VariantType.Double:
result.as_double = value1.as_double <= value2.as_double
return result
elif value1.type == VariantType.String:
result.as_string = value1.as_string <= value2.as_string
return result
elif value1.type == VariantType.TimeSpan:
result.as_time_span = value1.as_time_span <= value2.as_time_span
return result
elif value1.type == VariantType.DateTime:
result.as_datetime = value1.as_datetime.timestamp() <= value2.as_datetime.timestamp()
return result
raise Exception(f"Operation '<=' is not supported for type {self._type_to_string(value1.type)}")
def in_(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs IN operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
result.as_boolean = False
return result
# Processes null arrays.
if value1.as_object is None:
result.as_boolean = False
return result
if value1.type == VariantType.Array:
array = value1.as_array
for element in array:
eq = self.equal(value2, element)
if eq.type == VariantType.Boolean and eq.as_boolean:
result.as_boolean = True
return result
result.as_boolean = False
return result
return self.equal(value1, value2)
def get_element(self, value1: Variant, value2: Variant) -> Variant:
"""
Performs [] operation for two variants.
:param value1: The first operand for this operation.
:param value2: The second operand for this operation.
:return: A result variant object.
"""
result = Variant()
# Processes VariantType.Null values.
if value1.type == VariantType.Null or value2.type == VariantType.Null:
return result
# Converts second operand to the type of the first operand.
value2 = self.convert(value2, VariantType.Integer)
if value1.type == VariantType.Array:
return value1.get_by_index(value2.as_integer)
elif value1.type == VariantType.String:
result.as_string = value1.as_string[value2.as_integer]
return result
raise Exception(f"Operation '[]' is not supported for type {self._type_to_string(value1.type)}")
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
# https://medium.com/@keagileageek/paramiko-how-to-ssh-and-file-transfers-with-python-75766179de73
import paramiko
import os
# import requests
import wget
from datetime import datetime
import time
DEFAULT_LOCAL_FILES_DIR = os.getcwd()
REMOTE_DIR_PATH = '/tmp'
UPLOAD_FILE_NAMES = ['upgmgr', 'dcoField.tar.gz']
ssh_client = None
HOSTNAME = "10.100.57.99"
USERNAME = "root"
PASSWORD = ''
PREVIOUS_FW_VERSION='' # todo
UPGRADED_FW_VERSION = '' # todo
class Stage_1_Download_FW_SOFTWARE:
def __init__(self):
self.remote_files_path_list = None
def step_1_get_remote_files(self):
self.remote_files_path_list = 1
def step_2_download_firmware(self):
# test url is fine
# download
# what if you have already downloaded files: Show the name of those files to user and
# ask them if they would like to use the existing files or download? If use existing then expect
# yes ... if user says : No... then downlaod the file
print("Example of firmware image URL: https://iartifactory.infinera.com/artifactory/DCO-FW/release/dco-p1-1.92.2/image/")
# firmware_url = input("Enter the URL of firmware image: ")
#for f in self.remote_files_path_list:
# pass
print("hello")
class Stage_2_Start_Telnet_Machine:
def step_1_connect_telnet(self):
pass
def step_2_execute_commands(self):
pass
class Stage_3_Upgrade_FW:
def __init__(self):
self.local_files_path_list = None
def step_1_find_local_files_paths(self, local_files_dir: str = DEFAULT_LOCAL_FILES_DIR,
file_names=UPLOAD_FILE_NAMES) -> None:
# check user input directory is valid or not
if local_files_dir != DEFAULT_LOCAL_FILES_DIR:
if not os.path.isdir(local_files_dir):
raise Exception("Directory of files does not exist")
# construct absolute addresses of these files
files_abs_paths = []
for f_name in file_names:
f_abs_path = os.path.join(local_files_dir, f_name)
files_abs_paths.append(f_abs_path)
# check if all the files exist or not
for f_abs_path in files_abs_paths:
if not os.path.isfile(f_abs_path):
raise Exception(f"str(f_abs_path) is not available")
print(f"Success: All the required {str(UPLOAD_FILE_NAMES)} files are present")
self.local_files_path_list = files_abs_paths
def step_2_connect_ssh(self, hostname: str = HOSTNAME, username: str = USERNAME, password: str = PASSWORD) -> None:
global ssh_client
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh_client.connect(hostname=hostname, username=username, password=password)
except paramiko.AuthenticationException:
# This exception takes care of Authentication errors & exceptions
raise Exception('ERROR : Authentication failed because of irrelevant details!')
except Exception:
raise Exception(f"Error: Cannot connect to the {hostname}. The board cannot start"
f" after reboot, it needs to power cycle from SW host bard")
print("ssh connection is established")
def step_3_upload_files(self, list_localfilepaths: list = None, remotefiledir: str = REMOTE_DIR_PATH):
if not list_localfilepaths:
list_localfilepaths = self.local_files_path_list
# Note: File transfer is handled by the ftp protocol
ftp_client = ssh_client.open_sftp()
self.mkdir_cd_remote_dir(ftp_client, remotefiledir)
for l_file_path in list_localfilepaths:
_, file_name = os.path.split(l_file_path)
try:
ftp_client.put(l_file_path, file_name)
except Exception:
ftp_client.close()
raise Exception(f"Upload failed for: {l_file_path}")
else:
print(f"Upload successful for: {file_name}")
ftp_client.close()
def step_4_execute_upgrade_commands(self):
self.exec_cmd('ls -lrt')
self.exec_cmd('/opt/infinera/img/bin/stop_dco')
self.exec_cmd('cd /tmp')
self.exec_cmd('./upgmgr -f')
def step_5_set_firmware_ip_address(self):
self.exec_cmd("sed -i -r 's/127.8.0.3/0.0.0.0/g' /etc/systemd/system/gnxi.service")
time.sleep(2)
self.exec_cmd('systemctl daemon-reload')
time.sleep(10)
self.exec_cmd('systemctl restart gnxi')
def mkdir_cd_remote_dir(self, ftp_client, remote_dir: str) -> None:
"""
CD to remote directory if remote directory exists, otherwise create one and CD to it
:param ftp_client:
:type ftp_client:
:param remote_dir:
:type remote_dir:
:return:
:rtype:
"""
try:
ftp_client.chdir(remote_dir) # Test if remote dir exists
except IOError:
ftp_client.mkdir(remote_dir) # Create remote dir
ftp_client.chdir(remote_dir) # cd to remote dir
def download_files(self, remote_files_path: list, local_path='.'):
ftp_client = ssh_client.open_sftp()
for r in remote_files_path:
ftp_client.get(r, local_path)
ftp_client.close()
def exec_cmd(self, command):
try:
stdin, stdout, stderr = ssh_client.exec_command(command)
except Exception:
raise Exception(f"Execution failed for: {command}")
else:
print(f"Executed successfully: {command}")
print("Output of the command is following:") # todo: there is no output??
for line in stdout:
# todo: print each line in the remote output. Verify if it works??
print(line)
def close_ssh(self):
if ssh_client:
ssh_client.close()
#todo: employ ctrl + c to terminate program
def welcome():
current_time = datetime.now().strftime("%H:%M:%S")
hour = int(current_time.split(':')[0])
if 4 <= hour < 12:
msg = 'Good Morning'
elif 12 <= hour < 5:
msg = 'Good Afternoon'
elif 5 <= hour < 8:
msg = 'Good Evening'
else:
msg = 'Good Night... Family does not wait for time. Take rest!'
print(msg)
print("..............................................................")
print(f". Current Time: {current_time} (Canada) .")
print(". Please make sure to have a stable internet connection .")
print("..............................................................")
# fw_ssh_ip_addr = input("Enter Firmware IP address: ")
# sw_serial_telnet_ip_addr = input("Enter Software serial connection IP address: ")
# sw_serial_telnet_port = input("Enter Software serial connection Port Number: ")
input("Press Enter to continue: ")
def bye():
current_time = datetime.now().strftime("%H:%M:%S")
print("\n")
print("..............................................................")
print(". Thank you for using this application .")
print(f". Current Time: {current_time} (Canada) .")
print("..............................................................")
print(f"Previous Firmware Version: {PREVIOUS_FW_VERSION}")
print(f"Upgraded Firmware Version: {UPGRADED_FW_VERSION}")
print("Dasvidaniya.")
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
# Stage 0: Welcome
welcome()
# Stage 1: Download Firmware in local system
stage_1 = Stage_1_Download_FW_SOFTWARE()
stage_1.step_1_get_remote_files()
stage_1.step_2_download_firmware()
# Stage 2: Start Telnet Machine
stage_2 = Stage_2_Start_Telnet_Machine()
stage_2.step_1_connect_telnet()
stage_2.step_2_execute_commands()
# Stage 3: Upgrade Firmware and set ip address
stage_3 = Stage_3_Upgrade_FW()
stage_3.step_1_find_local_files_paths()
stage_3.step_2_connect_ssh()
stage_3.step_3_upload_files()
stage_3.step_4_execute_upgrade_commands()
stage_3.step_5_set_firmware_ip_address()
stage_3.close_ssh()
# Stage infinity:
bye()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
<reponame>LordKBX/EbookCollection
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from common.lang import *
from common.bdd import *
dialogStyleBtnGreen = 'background-color: rgb(0, 153, 15); color: rgb(255, 255, 255);'
def __get_bases():
language = Lang()
bdd = BDD()
style = bdd.get_param('style')
return language, style
def InfoDialog(title: str, text: str, parent: any = None):
msg_box = QtWidgets.QMessageBox(parent)
language, style = __get_bases()
msg_box.setStyleSheet(get_style_var(style, 'QMessageBox'))
msg_box.setWindowTitle(title)
msg_box.setText(text)
msg_box.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg_box.button(QtWidgets.QMessageBox.Ok).setText(language['Generic/DialogBtnOk'])
msg_box.button(QtWidgets.QMessageBox.Ok).setFocusPolicy(QtCore.Qt.NoFocus)
msg_box.button(QtWidgets.QMessageBox.Ok).setStyleSheet(get_style_var(style, 'QMessageBoxBtnGeneric'))
msg_box.button(QtWidgets.QMessageBox.Ok).setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# msg_box.setDefaultButton(QtWidgets.QMessageBox.Ok)
msg_box.setIcon(QtWidgets.QMessageBox.Information)
ret = msg_box.exec()
def InfoDialogConfirm(title: str, text: str, yes: str, no: str, parent: any = None):
msg_box = QtWidgets.QMessageBox(parent)
language, style = __get_bases()
msg_box.setStyleSheet(get_style_var(style, 'QMessageBox'))
msg_box.setWindowTitle(title)
msg_box.setText(text)
msg_box.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg_box.button(QtWidgets.QMessageBox.Yes).setText(yes)
msg_box.button(QtWidgets.QMessageBox.Yes).setFocusPolicy(QtCore.Qt.NoFocus)
msg_box.button(QtWidgets.QMessageBox.Yes).setStyleSheet(get_style_var(style, 'QMessageBoxBtnGreen'))
msg_box.button(QtWidgets.QMessageBox.Yes).setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
msg_box.button(QtWidgets.QMessageBox.No).setText(no)
msg_box.button(QtWidgets.QMessageBox.No).setFocusPolicy(QtCore.Qt.NoFocus)
msg_box.button(QtWidgets.QMessageBox.No).setStyleSheet(get_style_var(style, 'QMessageBoxBtnRed'))
msg_box.button(QtWidgets.QMessageBox.No).setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# msg_box.setDefaultButton(QtWidgets.QMessageBox.No)
msg_box.setIcon(QtWidgets.QMessageBox.Information)
ret = msg_box.exec()
if ret == QtWidgets.QMessageBox.Yes:
return True
else:
return False
def WarnDialog(title: str, text: str, parent: any = None):
msg_box = QtWidgets.QMessageBox(parent)
language, style = __get_bases()
msg_box.setStyleSheet(get_style_var(style, 'QMessageBox'))
msg_box.setWindowTitle(title)
msg_box.setText(text)
msg_box.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg_box.button(QtWidgets.QMessageBox.Ok).setText(language['Generic/DialogBtnOk'])
msg_box.button(QtWidgets.QMessageBox.Ok).setFocusPolicy(QtCore.Qt.NoFocus)
msg_box.button(QtWidgets.QMessageBox.Ok).setStyleSheet(get_style_var(style, 'QMessageBoxBtnGeneric'))
msg_box.button(QtWidgets.QMessageBox.Ok).setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# msg_box.setDefaultButton(QtWidgets.QMessageBox.Ok)
msg_box.setIcon(QtWidgets.QMessageBox.Warning)
ret = msg_box.exec()
def WarnDialogConfirm(title: str, text: str, yes: str, no: str, parent: any = None):
msg_box = QtWidgets.QMessageBox(parent)
language, style = __get_bases()
msg_box.setStyleSheet(get_style_var(style, 'QMessageBox'))
msg_box.setWindowTitle(title)
msg_box.setText(text)
msg_box.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg_box.setFocusPolicy(QtCore.Qt.NoFocus)
msg_box.button(QtWidgets.QMessageBox.Yes).setText(yes)
msg_box.button(QtWidgets.QMessageBox.Yes).setFocusPolicy(QtCore.Qt.NoFocus)
msg_box.button(QtWidgets.QMessageBox.Yes).setStyleSheet(get_style_var(style, 'QMessageBoxBtnRed'))
msg_box.button(QtWidgets.QMessageBox.Yes).setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
msg_box.button(QtWidgets.QMessageBox.No).setText(no)
msg_box.button(QtWidgets.QMessageBox.No).setFocusPolicy(QtCore.Qt.NoFocus)
msg_box.button(QtWidgets.QMessageBox.No).setStyleSheet(get_style_var(style, 'QMessageBoxBtnGreen'))
msg_box.button(QtWidgets.QMessageBox.No).setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# msg_box.setDefaultButton(QtWidgets.QMessageBox.No)
msg_box.setIcon(QtWidgets.QMessageBox.Warning)
ret = msg_box.exec()
if ret == QtWidgets.QMessageBox.Yes:
return True
else:
return False
def InputDialog(title: str, text: str, yes: str = None, no: str = None, parent: any = None, value: str = None):
language, style = __get_bases()
msg_box = QtWidgets.QDialog(parent, QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowCloseButtonHint)
msg_box.setStyleSheet(get_style_var(style, 'QDialog'))
msg_box.setWindowTitle(title)
msg_box.setLayout(QtWidgets.QVBoxLayout())
label = QtWidgets.QLabel(text)
msg_box.layout().addWidget(label)
input = QtWidgets.QLineEdit()
if value is not None:
input.setText(value)
msg_box.layout().addWidget(input)
action = QtWidgets.QAction()
action.triggered.connect(lambda: print('TESSSSSSST!'))
button_box = QtWidgets.QDialogButtonBox()
button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
button_box.setFocusPolicy(QtCore.Qt.NoFocus)
if yes is None:
yes = language['Generic/DialogBtnOk']
if no is None:
no = language['Generic/DialogBtnCancel']
button_box.button(QtWidgets.QDialogButtonBox.Ok).setText(yes)
button_box.button(QtWidgets.QDialogButtonBox.Ok).setFocusPolicy(QtCore.Qt.NoFocus)
button_box.button(QtWidgets.QDialogButtonBox.Ok).setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
button_box.button(QtWidgets.QDialogButtonBox.Cancel).setText(no)
button_box.button(QtWidgets.QDialogButtonBox.Cancel).setFocusPolicy(QtCore.Qt.NoFocus)
button_box.button(QtWidgets.QDialogButtonBox.Cancel).setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
button_box.accepted.connect(msg_box.accept)
button_box.rejected.connect(msg_box.reject)
msg_box.layout().addWidget(button_box)
ret = msg_box.exec_()
if ret == 1:
return input.text()
else:
return None
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import random
import unittest
import imath
import os
import IECore
import GafferTest
import GafferImage
import GafferImageTest
class DeepHoldoutTest( GafferImageTest.ImageTestCase ) :
representativeImagePath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/representativeDeepImage.exr" )
def testBasics( self ):
representativeImage = GafferImage.ImageReader()
representativeImage["fileName"].setValue( self.representativeImagePath )
offset = GafferImage.Offset()
offset["in"].setInput( representativeImage["out"] )
holdout = GafferImage.DeepHoldout()
holdout["in"].setInput( representativeImage["out"] )
holdout["holdout"].setInput( offset["out"] )
flat = GafferImage.DeepToFlat()
flat["in"].setInput( representativeImage["out"] )
# For the case of holding out an image by itself, we can find an analytic solution for the
# held out alpha. For a composited alpha value A, the held out alpha will be ( 1 - ( 1 - A )^2 ) / 2
# Check that this relationship holds
alphaOnlyHoldout = GafferImage.DeleteChannels()
alphaOnlyHoldout["in"].setInput( holdout["out"] )
alphaOnlyHoldout["mode"].setValue( GafferImage.DeleteChannels.Mode.Keep )
alphaOnlyHoldout["channels"].setValue( '[A]' )
complementAndSquare = GafferImage.Grade()
complementAndSquare["in"].setInput( flat["out"] )
complementAndSquare["channels"].setValue( '[A]' )
complementAndSquare["multiply"].setValue( imath.Color4f( 1, 1, 1, -1 ) )
complementAndSquare["offset"].setValue( imath.Color4f( 0, 0, 0, 1 ) )
complementAndSquare["gamma"].setValue( imath.Color4f( 1, 1, 1, 0.5 ) )
complementAndHalve = GafferImage.Grade()
complementAndHalve["in"].setInput( complementAndSquare["out"] )
complementAndHalve["channels"].setValue( '[A]' )
complementAndHalve["multiply"].setValue( imath.Color4f( 1, 1, 1, -0.5 ) )
complementAndHalve["offset"].setValue( imath.Color4f( 0, 0, 0, 0.5 ) )
alphaOnlyReference = GafferImage.DeleteChannels()
alphaOnlyReference["in"].setInput( complementAndHalve["out"] )
alphaOnlyReference["mode"].setValue( GafferImage.DeleteChannels.Mode.Keep )
alphaOnlyReference["channels"].setValue( '[A]' )
self.assertImagesEqual( alphaOnlyHoldout["out"], alphaOnlyReference["out"], maxDifference = 1e-6 )
# For a more complex holdout, we can create a comparison manually using shuffles and a DeepMerge
preShuffle = GafferImage.Shuffle()
preShuffle["in"].setInput( representativeImage["out"] )
preShuffle["channels"].addChild( preShuffle.ChannelPlug( "holdoutR", "R" ) )
preShuffle["channels"].addChild( preShuffle.ChannelPlug( "holdoutG", "G" ) )
preShuffle["channels"].addChild( preShuffle.ChannelPlug( "holdoutB", "B" ) )
preShuffle["channels"].addChild( preShuffle.ChannelPlug( "holdoutA", "A" ) )
manualHoldoutMerge = GafferImage.DeepMerge()
manualHoldoutMerge["in"][0].setInput( preShuffle["out"] )
manualHoldoutMerge["in"][1].setInput( offset["out"] )
manualHoldoutFlatten = GafferImage.DeepToFlat()
manualHoldoutFlatten["in"].setInput( manualHoldoutMerge["out"] )
postShuffle = GafferImage.Shuffle()
postShuffle["in"].setInput( manualHoldoutFlatten["out"] )
postShuffle["channels"].addChild( postShuffle.ChannelPlug( "R", "holdoutR" ) )
postShuffle["channels"].addChild( postShuffle.ChannelPlug( "G", "holdoutG" ) )
postShuffle["channels"].addChild( postShuffle.ChannelPlug( "B", "holdoutB" ) )
postShuffle["channels"].addChild( postShuffle.ChannelPlug( "A", "holdoutA" ) )
channelCleanup = GafferImage.DeleteChannels()
channelCleanup["in"].setInput( postShuffle["out"] )
channelCleanup["mode"].setValue( GafferImage.DeleteChannels.Mode.Keep )
channelCleanup["channels"].setValue( '[RGBAZ]' )
cropCleanup = GafferImage.Crop()
cropCleanup["in"].setInput( channelCleanup["out"] )
cropCleanup["area"].setValue( imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 150, 100 ) ) )
self.assertImagesEqual( holdout["out"], cropCleanup["out"], maxDifference = 1e-5 )
# The way we handle Z is a bit arbitrary, but everything else we should be able to match with
# this network with arbitrary inputs
holdoutNoZ = GafferImage.DeleteChannels()
holdoutNoZ["in"].setInput( holdout["out"] )
holdoutNoZ["channels"].setValue( 'Z' )
channelCleanup["channels"].setValue( '[RGBA]' )
offset["offset"].setValue( imath.V2i( 13, 31 ) )
self.assertImagesEqual( holdoutNoZ["out"], cropCleanup["out"], maxDifference = 1e-5 )
offset["offset"].setValue( imath.V2i( -13, -51 ) )
self.assertImagesEqual( holdoutNoZ["out"], cropCleanup["out"], maxDifference = 1e-5 )
offset["offset"].setValue( imath.V2i( 103, -27 ) )
self.assertImagesEqual( holdoutNoZ["out"], cropCleanup["out"], maxDifference = 1e-5 )
def testDirtyPropagation( self ):
a = GafferImage.Constant()
b = GafferImage.Constant()
aShuffle = GafferImage.Shuffle()
aShuffle["in"].setInput( a["out"] )
bShuffle = GafferImage.Shuffle()
bShuffle["in"].setInput( b["out"] )
holdout = GafferImage.DeepHoldout()
holdout["in"].setInput( aShuffle["out"] )
holdout["holdout"].setInput( bShuffle["out"] )
cs = GafferTest.CapturingSlot( holdout.plugDirtiedSignal() )
a["color"]["r"].setValue( 0.5 )
dirtiedPlugs = { x[0].relativeName( holdout ) for x in cs }
self.assertIn( "__intermediateIn.channelData", dirtiedPlugs )
self.assertIn( "__flattened.channelData", dirtiedPlugs )
self.assertIn( "out.channelData", dirtiedPlugs )
del cs[:]
b["color"]["a"].setValue( 0.5 )
dirtiedPlugs = { x[0].relativeName( holdout ) for x in cs }
self.assertIn( "__flattened.channelData", dirtiedPlugs )
self.assertIn( "out.channelData", dirtiedPlugs )
del cs[:]
aShuffle["channels"].addChild( bShuffle.ChannelPlug( "Z", "__white" ) )
dirtiedPlugs = { x[0].relativeName( holdout ) for x in cs }
self.assertIn( "__intermediateIn.channelData", dirtiedPlugs )
self.assertIn( "__flattened.channelData", dirtiedPlugs )
self.assertIn( "out.channelData", dirtiedPlugs )
self.assertIn( "__intermediateIn.channelNames", dirtiedPlugs )
self.assertIn( "__flattened.channelNames", dirtiedPlugs )
self.assertIn( "out.channelNames", dirtiedPlugs )
del cs[:]
bShuffle["channels"].addChild( bShuffle.ChannelPlug( "Z", "__white" ) )
dirtiedPlugs = { x[0].relativeName( holdout ) for x in cs }
self.assertIn( "__flattened.channelData", dirtiedPlugs )
self.assertIn( "out.channelData", dirtiedPlugs )
self.assertIn( "__flattened.channelNames", dirtiedPlugs )
del cs[:]
if __name__ == "__main__":
unittest.main()
|
<filename>11 - Extra-- sonos snips voice app/action-sonos.py
#!/usr/bin/env python2
# -*-: coding utf-8 -*-
import logging
import sys
import traceback
from hermes_python.hermes import Hermes
from snipssonos.helpers.snips_config_parser import read_configuration_file
from snipssonos.helpers.snips_configuration_validator import validate_configuration_file, AVAILABLE_MUSIC_SERVICES
from snipssonos.use_cases.hotword.lower_volume import HotwordLowerVolumeUseCase
from snipssonos.use_cases.hotword.restore_volume import HotwordRestoreVolumeUseCase
from snipssonos.use_cases.volume.up import VolumeUpUseCase
from snipssonos.use_cases.volume.down import VolumeDownUseCase
from snipssonos.use_cases.volume.set import VolumeSetUseCase
from snipssonos.use_cases.mute import MuteUseCase
from snipssonos.use_cases.play.track import PlayTrackUseCase
from snipssonos.use_cases.play.artist import PlayArtistUseCase
from snipssonos.use_cases.play.music import PlayMusicUseCase
from snipssonos.use_cases.resume_music import ResumeMusicUseCase
from snipssonos.use_cases.speaker_interrupt import SpeakerInterruptUseCase
from snipssonos.use_cases.next_track import NextTrackUseCase
from snipssonos.use_cases.previous_track import PreviousTrackUseCase
from snipssonos.use_cases.get_track_info import GetTrackInfoUseCase
from snipssonos.use_cases.request_objects import HotwordLowerVolumeRequestObject, HotwordRestoreVolumeRequestObject
from snipssonos.adapters.request_adapter import VolumeUpRequestAdapter, PlayTrackRequestAdapter, \
PlayArtistRequestAdapter, VolumeSetRequestAdapter, VolumeDownRequestAdapter, ResumeMusicRequestAdapter, \
SpeakerInterruptRequestAdapter, MuteRequestAdapter, PlayMusicRequestAdapter, NextTrackRequestAdapter, \
PreviousTrackRequestAdapter, GetTrackInfoRequestAdapter
from snipssonos.services.node.device_discovery_service import NodeDeviceDiscoveryService
from snipssonos.services.node.device_transport_control import NodeDeviceTransportControlService
from snipssonos.services.spotify.music_playback_service import SpotifyNodeMusicPlaybackService
from snipssonos.services.deezer.music_playback_service import DeezerNodeMusicPlaybackService
from snipssonos.services.spotify.music_search_service import SpotifyMusicSearchService
from snipssonos.services.hermes.state_persistence import HermesStatePersistence
from snipssonos.services.feedback.feedback_service import FeedbackService
from snipssonos.services.deezer.music_search_and_play_service import DeezerMusicSearchService
# Utils functions
CONFIG_INI = "config.ini"
# Configuration
CONFIGURATION = read_configuration_file(CONFIG_INI)
validate_configuration_file(CONFIGURATION)
MUSIC_PROVIDER = CONFIGURATION["global"].get('music_provider', AVAILABLE_MUSIC_SERVICES[0])
CLIENT_ID = CONFIGURATION['secret']['client_id']
CLIENT_SECRET = CONFIGURATION['secret']['client_secret']
REFRESH_TOKEN = CONFIGURATION['secret']['refresh_token']
# Connection
HOSTNAME = CONFIGURATION['global'].get('hostname', "localhost")
HERMES_HOST = "{}:1883".format(HOSTNAME)
# Language
LANGUAGE = CONFIGURATION['global'].get('language', "fr")
# Logging
LOG_LEVEL = CONFIGURATION['global'].get('log_level')
if LOG_LEVEL == "info":
logging.getLogger().setLevel(level=logging.INFO)
else:
logging.getLogger().setLevel(level=logging.DEBUG)
# Hotword callback
def hotword_detected_callback(hermes, sessionStartedMessage):
logging.info("Hotword detected")
use_case = HotwordLowerVolumeUseCase(hermes.device_discovery_service, hermes.device_transport_control_service,
hermes.state_persistence_service)
request_object = HotwordLowerVolumeRequestObject()
response = use_case.execute(request_object)
if not response:
logging.error("An error occured when trying to lower the volume when the wakeword was detected")
hermes.publish_end_session(sessionStartedMessage.session_id, "")
def restore_volume_for_hotword(intent_callback):
def restore_volume_wrapper(hermes, intentMessage):
intent_callback(hermes, intentMessage) # We call the callback
# We restore the volume to what it was before the hotword was detected.
use_case = HotwordRestoreVolumeUseCase(hermes.device_discovery_service, hermes.device_transport_control_service,
hermes.state_persistence_service)
request_object = HotwordRestoreVolumeRequestObject()
response = use_case.execute(request_object)
if not response:
logging.error("Error when recovering the volume")
logging.error(response.message)
return restore_volume_wrapper
def session_ended_callback(hermes, sessionEndedMessage):
logging.info("Session ended")
INTENT_NOT_RECOGNIZED = 4 # TODO : refactor this.
# We restore the volume to what it was before the hotword was detected.
if sessionEndedMessage.termination.termination_type == INTENT_NOT_RECOGNIZED:
logging.info("The intent was not recognized")
use_case = HotwordRestoreVolumeUseCase(hermes.device_discovery_service, hermes.device_transport_control_service,
hermes.state_persistence_service)
request_object = HotwordRestoreVolumeRequestObject()
response = use_case.execute(request_object)
if not response:
logging.error("Error when recovering the volume")
logging.error(response.message)
# Music management functions
@restore_volume_for_hotword
def addSong_callback(hermes, intentMessage):
raise NotImplementedError("addSong_callback() not implemented")
@restore_volume_for_hotword
def getInfos_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = GetTrackInfoUseCase(hermes.device_discovery_service,
hermes.device_transport_control_service, hermes.feedback_service)
get_track_request = GetTrackInfoRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(get_track_request)
if not response:
feedback = hermes.feedback_service.from_response_object(response)
hermes.publish_end_session(intentMessage.session_id, feedback)
else:
logging.debug("Response Success : {}".format(response))
hermes.publish_end_session(intentMessage.session_id, response.feedback)
@restore_volume_for_hotword
def radioOn_callback(hermes, intentMessage):
raise NotImplementedError("radioOn_callback() not implemented")
@restore_volume_for_hotword
def previousSong_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = PreviousTrackUseCase(hermes.device_discovery_service, hermes.device_transport_control_service)
previous_track_request = PreviousTrackRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(previous_track_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def nextSong_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = NextTrackUseCase(hermes.device_discovery_service, hermes.device_transport_control_service)
next_track_request = NextTrackRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(next_track_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def resumeMusic_callback(hermes, intentMessage): # Playback functions
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = ResumeMusicUseCase(hermes.device_discovery_service, hermes.device_transport_control_service)
resume_music_request = ResumeMusicRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(resume_music_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def speakerInterrupt_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = SpeakerInterruptUseCase(hermes.device_discovery_service, hermes.device_transport_control_service)
speaker_interrupt_request = SpeakerInterruptRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(speaker_interrupt_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def volumeDown_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = VolumeDownUseCase(hermes.device_discovery_service, hermes.device_transport_control_service,
hermes.state_persistence_service)
volume_down_request = VolumeDownRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(volume_down_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def volumeUp_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = VolumeUpUseCase(hermes.device_discovery_service, hermes.device_transport_control_service,
hermes.state_persistence_service)
volume_up_request = VolumeUpRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(volume_up_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
def volumeSet_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = VolumeSetUseCase(hermes.device_discovery_service, hermes.device_transport_control_service)
volume_set_request = VolumeSetRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(volume_set_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
# Restore the volume to the previous level.
use_case = HotwordRestoreVolumeUseCase(hermes.device_discovery_service, hermes.device_transport_control_service,
hermes.state_persistence_service)
request_object = HotwordRestoreVolumeRequestObject()
response = use_case.execute(request_object)
if not response:
logging.error("Error when recovering the volume")
logging.error(response.message)
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def mute_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = MuteUseCase(hermes.device_discovery_service, hermes.device_transport_control_service)
mute_request = MuteRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(mute_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def playTrack_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = PlayTrackUseCase(hermes.device_discovery_service, hermes.music_search_service,
hermes.music_playback_service, hermes.feedback_service)
play_track_request = PlayTrackRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(play_track_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def playArtist_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = PlayArtistUseCase(hermes.device_discovery_service, hermes.music_search_service,
hermes.music_playback_service, hermes.feedback_service)
play_artist_request = PlayArtistRequestAdapter.from_intent_message(intentMessage)
logging.info(play_artist_request)
response = use_case.execute(play_artist_request)
if not response:
logging.info(response.value)
hermes.publish_end_session(intentMessage.session_id, hermes.feedback_service.get_short_error_message())
else:
logging.info(response)
hermes.publish_end_session(intentMessage.session_id, "")
@restore_volume_for_hotword
def playMusic_callback(hermes, intentMessage):
logging.info("Intent detected [{}]".format(intentMessage.intent.intent_name))
use_case = PlayMusicUseCase(hermes.device_discovery_service, hermes.music_search_service,
hermes.music_playback_service, hermes.feedback_service)
play_music_request = PlayMusicRequestAdapter.from_intent_message(intentMessage)
response = use_case.execute(play_music_request)
if not response:
logging.error('Error type : {}'.format(response.type))
logging.error('Error message : {}'.format(response.message))
logging.error('Error exception : {}'.format(response.exception))
logging.error(response.tb)
feedback = hermes.feedback_service.from_response_object(response)
hermes.publish_end_session(intentMessage.session_id, feedback)
else:
logging.debug("Response Success : {}".format(response))
hermes.publish_end_session(intentMessage.session_id, response.feedback)
def get_playback_service(music_provider):
if music_provider == "deezer":
return DeezerNodeMusicPlaybackService()
if music_provider == "spotify":
return SpotifyNodeMusicPlaybackService(CONFIGURATION=CONFIGURATION)
def get_music_search_service(music_provider, device_disco_service):
if music_provider == "spotify":
return SpotifyMusicSearchService(CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN)
if music_provider == "deezer":
return DeezerMusicSearchService(device_disco_service)
if __name__ == "__main__":
with Hermes(HERMES_HOST) as h:
logging.info("Starting SONOS app.")
h.state_persistence_service = HermesStatePersistence(dict())
h.device_discovery_service = NodeDeviceDiscoveryService(CONFIGURATION)
h.device_transport_control_service = NodeDeviceTransportControlService(CONFIGURATION)
h.feedback_service = FeedbackService(LANGUAGE)
h.music_search_service = get_music_search_service(MUSIC_PROVIDER, h.device_discovery_service)
h.music_playback_service = get_playback_service(MUSIC_PROVIDER)
logging.info("All services initialized. Waiting for queries ...")
h \
.subscribe_session_started(hotword_detected_callback) \
.subscribe_intent("playMusic4", playMusic_callback) \
.subscribe_intent("volumeUp4", volumeUp_callback) \
.subscribe_intent("volumeDown4", volumeDown_callback) \
.subscribe_intent("volumeSet4", volumeSet_callback) \
.subscribe_intent("muteSound4", mute_callback) \
.subscribe_intent("resumeMusic4", resumeMusic_callback) \
.subscribe_intent("speakerInterrupt4", speakerInterrupt_callback) \
.subscribe_intent("nextSong4", nextSong_callback) \
.subscribe_intent("previousSong4", previousSong_callback) \
.subscribe_intent("getInfos4", getInfos_callback) \
.subscribe_session_ended(session_ended_callback) \
.loop_forever()
|
<gh_stars>0
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OTLP Metrics Exporter"""
import logging
from typing import List, Optional, Sequence, Type, TypeVar
from grpc import ChannelCredentials
from opentelemetry.configuration import Configuration
from opentelemetry.exporter.otlp.exporter import (
OTLPExporterMixin,
_get_resource_data,
_load_credential_from_file,
)
from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (
ExportMetricsServiceRequest,
)
from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (
MetricsServiceStub,
)
from opentelemetry.proto.common.v1.common_pb2 import StringKeyValue
from opentelemetry.proto.metrics.v1.metrics_pb2 import (
AggregationTemporality,
DoubleDataPoint,
DoubleGauge,
DoubleSum,
InstrumentationLibraryMetrics,
IntDataPoint,
IntGauge,
IntSum,
)
from opentelemetry.proto.metrics.v1.metrics_pb2 import Metric as OTLPMetric
from opentelemetry.proto.metrics.v1.metrics_pb2 import ResourceMetrics
from opentelemetry.sdk.metrics import (
Counter,
SumObserver,
UpDownCounter,
UpDownSumObserver,
ValueObserver,
ValueRecorder,
)
from opentelemetry.sdk.metrics.export import (
ExportRecord,
MetricsExporter,
MetricsExportResult,
)
from opentelemetry.sdk.metrics.export.aggregate import (
HistogramAggregator,
LastValueAggregator,
MinMaxSumCountAggregator,
SumAggregator,
ValueObserverAggregator,
)
logger = logging.getLogger(__name__)
DataPointT = TypeVar("DataPointT", IntDataPoint, DoubleDataPoint)
def _get_data_points(
export_record: ExportRecord,
data_point_class: Type[DataPointT],
aggregation_temporality: int,
) -> List[DataPointT]:
if isinstance(export_record.aggregator, SumAggregator):
value = export_record.aggregator.checkpoint
elif isinstance(export_record.aggregator, MinMaxSumCountAggregator):
# FIXME: How are values to be interpreted from this aggregator?
raise Exception("MinMaxSumCount aggregator data not supported")
elif isinstance(export_record.aggregator, HistogramAggregator):
# FIXME: How are values to be interpreted from this aggregator?
raise Exception("Histogram aggregator data not supported")
elif isinstance(export_record.aggregator, LastValueAggregator):
value = export_record.aggregator.checkpoint
elif isinstance(export_record.aggregator, ValueObserverAggregator):
value = export_record.aggregator.checkpoint.last
if aggregation_temporality == (
AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE
):
start_time_unix_nano = export_record.aggregator.first_timestamp
else:
start_time_unix_nano = (
export_record.aggregator.initial_checkpoint_timestamp
)
return [
data_point_class(
labels=[
StringKeyValue(key=str(label_key), value=str(label_value))
for label_key, label_value in export_record.labels
],
value=value,
start_time_unix_nano=start_time_unix_nano,
time_unix_nano=(export_record.aggregator.last_update_timestamp),
)
]
class OTLPMetricsExporter(
MetricsExporter,
OTLPExporterMixin[
ExportRecord, ExportMetricsServiceRequest, MetricsExportResult
],
):
# pylint: disable=unsubscriptable-object
"""OTLP metrics exporter
Args:
endpoint: OpenTelemetry Collector receiver endpoint
insecure: Connection type
credentials: Credentials object for server authentication
headers: Headers to send when exporting
timeout: Backend request timeout in seconds
"""
_stub = MetricsServiceStub
_result = MetricsExportResult
def __init__(
self,
endpoint: Optional[str] = None,
insecure: Optional[bool] = None,
credentials: Optional[ChannelCredentials] = None,
headers: Optional[Sequence] = None,
timeout: Optional[int] = None,
):
if insecure is None:
insecure = Configuration().EXPORTER_OTLP_METRIC_INSECURE
if (
not insecure
and Configuration().EXPORTER_OTLP_METRIC_CERTIFICATE is not None
):
credentials = credentials or _load_credential_from_file(
Configuration().EXPORTER_OTLP_METRIC_CERTIFICATE
)
super().__init__(
**{
"endpoint": endpoint
or Configuration().EXPORTER_OTLP_METRIC_ENDPOINT,
"insecure": insecure,
"credentials": credentials,
"headers": headers
or Configuration().EXPORTER_OTLP_METRIC_HEADERS,
"timeout": timeout
or Configuration().EXPORTER_OTLP_METRIC_TIMEOUT,
}
)
# pylint: disable=no-self-use
def _translate_data(
self, export_records: Sequence[ExportRecord]
) -> ExportMetricsServiceRequest:
# pylint: disable=too-many-locals,no-member
# pylint: disable=attribute-defined-outside-init
sdk_resource_instrumentation_library_metrics = {}
# The criteria to decide how to translate export_records is based on this table
# taken directly from OpenTelemetry Proto v0.5.0:
# TODO: Update table after the decision on:
# https://github.com/open-telemetry/opentelemetry-specification/issues/731.
# By default, metrics recording using the OpenTelemetry API are exported as
# (the table does not include MeasurementValueType to avoid extra rows):
#
# Instrument Type
# ----------------------------------------------
# Counter Sum(aggregation_temporality=delta;is_monotonic=true)
# UpDownCounter Sum(aggregation_temporality=delta;is_monotonic=false)
# ValueRecorder TBD
# SumObserver Sum(aggregation_temporality=cumulative;is_monotonic=true)
# UpDownSumObserver Sum(aggregation_temporality=cumulative;is_monotonic=false)
# ValueObserver Gauge()
for export_record in export_records:
if export_record.resource not in (
sdk_resource_instrumentation_library_metrics.keys()
):
sdk_resource_instrumentation_library_metrics[
export_record.resource
] = InstrumentationLibraryMetrics()
type_class = {
int: {
"sum": {"class": IntSum, "argument": "int_sum"},
"gauge": {"class": IntGauge, "argument": "int_gauge"},
"data_point_class": IntDataPoint,
},
float: {
"sum": {"class": DoubleSum, "argument": "double_sum"},
"gauge": {
"class": DoubleGauge,
"argument": "double_gauge",
},
"data_point_class": DoubleDataPoint,
},
}
value_type = export_record.instrument.value_type
sum_class = type_class[value_type]["sum"]["class"]
gauge_class = type_class[value_type]["gauge"]["class"]
data_point_class = type_class[value_type]["data_point_class"]
if isinstance(export_record.instrument, Counter):
aggregation_temporality = (
AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE
)
otlp_metric_data = sum_class(
data_points=_get_data_points(
export_record,
data_point_class,
aggregation_temporality,
),
aggregation_temporality=aggregation_temporality,
is_monotonic=True,
)
argument = type_class[value_type]["sum"]["argument"]
elif isinstance(export_record.instrument, UpDownCounter):
aggregation_temporality = (
AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE
)
otlp_metric_data = sum_class(
data_points=_get_data_points(
export_record,
data_point_class,
aggregation_temporality,
),
aggregation_temporality=aggregation_temporality,
is_monotonic=False,
)
argument = type_class[value_type]["sum"]["argument"]
elif isinstance(export_record.instrument, (ValueRecorder)):
logger.warning("Skipping exporting of ValueRecorder metric")
continue
elif isinstance(export_record.instrument, SumObserver):
aggregation_temporality = (
AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE
)
otlp_metric_data = sum_class(
data_points=_get_data_points(
export_record,
data_point_class,
aggregation_temporality,
),
aggregation_temporality=aggregation_temporality,
is_monotonic=True,
)
argument = type_class[value_type]["sum"]["argument"]
elif isinstance(export_record.instrument, UpDownSumObserver):
aggregation_temporality = (
AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE
)
otlp_metric_data = sum_class(
data_points=_get_data_points(
export_record,
data_point_class,
aggregation_temporality,
),
aggregation_temporality=aggregation_temporality,
is_monotonic=False,
)
argument = type_class[value_type]["sum"]["argument"]
elif isinstance(export_record.instrument, (ValueObserver)):
otlp_metric_data = gauge_class(
data_points=_get_data_points(
export_record,
data_point_class,
AggregationTemporality.AGGREGATION_TEMPORALITY_DELTA,
)
)
argument = type_class[value_type]["gauge"]["argument"]
instrumentation_library_metrics = sdk_resource_instrumentation_library_metrics[
export_record.resource
]
instrumentation_library_metrics.metrics.append(
OTLPMetric(
**{
"name": export_record.instrument.name,
"description": (export_record.instrument.description),
"unit": export_record.instrument.unit,
argument: otlp_metric_data,
}
)
)
instrumentation_library_metrics.instrumentation_library.name = (
export_record.instrument.meter.instrumentation_info.name
)
version = (
export_record.instrument.meter.instrumentation_info.version
)
if version:
(
instrumentation_library_metrics.instrumentation_library.version
) = version
return ExportMetricsServiceRequest(
resource_metrics=_get_resource_data(
sdk_resource_instrumentation_library_metrics,
ResourceMetrics,
"metrics",
)
)
def export(self, metrics: Sequence[ExportRecord]) -> MetricsExportResult:
# pylint: disable=arguments-differ
return self._export(metrics)
|
"""
Define the Device class.
Authors:
<NAME> <<EMAIL>>
Created: Jan 2018
Modified: Jan 2018
"""
from typing import Set, List
from mgb.local_detection import Neighborhood
from mgb.shared import Configuration
class Device(object):
"""Define a mobile device.
A mobile device keep track of its neighborhood and executes the local detection part
of the algorithm.
"""
def __init__(self, uid: int, config: Configuration) -> None:
"""Initialize mobile device data.
:param uid: Identifier of the mobile device.
"""
self._uid = uid
self._current_connections: Set[int] = set()
self._friend_threshold = config.friend_threshold
self._inactive_threshold = config.inactive_threshold
self._strangers = Neighborhood(self._inactive_threshold, self._friend_threshold)
self._friends = Neighborhood(self._inactive_threshold, self._friend_threshold)
self._archived: List[Neighborhood] = []
def add_connection(self, uid: int) -> None:
"""Add a new connection in the list.
:param uid: Identifier of the other node in the connection.
"""
self._current_connections.add(uid)
def remove_connection(self, uid: int) -> None:
"""Remove a connection from the list.
:param uid: Identifier of the other node in the connection.
"""
self._current_connections.remove(uid)
@property
def uid(self) -> int:
"""Return the mobile unique identifier."""
return self._uid
@property
def archived(self) -> List[Neighborhood]:
"""Access archived friends lists (groups)."""
return self._archived
@property
def friends(self) -> Neighborhood:
"""Return the current list of friends."""
return self._friends
@property
def strangers(self) -> Neighborhood:
"""Return the current list of strangers."""
return self._strangers
def run_local_detection(self, time: float) -> None:
"""Run the local detection algorithm based on current state."""
self._update_friends_connection()
self._update_strangers_connection(time)
# Check if we need to archive current friend list as a group
if not self._friends.is_active:
# When add itself as a member the ended time is adjusted to current time
self._friends.add(self.uid, time)
if len(self._friends) > 2:
self._archived.append(self._friends)
self._friends = Neighborhood(self._inactive_threshold, self._friend_threshold)
self._strangers = Neighborhood(self._inactive_threshold, self._friend_threshold)
def _update_friends_connection(self) -> None:
"""Update friends information based on current connections.
An entity in the friends list has its close_counter incremented if there are
a connection with it. Otherwise, it has its away_counter incremented.
"""
# Friend with current connection
for uid in (self._friends & self._current_connections):
self._friends[uid].increment_close()
# Friends without current connection
for uid in (self._friends - self._current_connections):
self._friends[uid].increment_away()
def _update_strangers_connection(self, time: float) -> None:
"""Update strangers information based on current connections.
An entity in the strangers list has its close_counter incremented if there are a
connection with it. If there are no connection with it, the away counter is incremented.
New nodes are added in the strangers list. Nodes with a number of connections greater
than the threshold, are transferred to friends list.
:param time: The current simulation time.
"""
# Strangers without current connection
for uid in (self._strangers - self._current_connections):
self._strangers[uid].increment_away()
# If the entity is inactive, remove it from strangers list
if not self._strangers[uid].is_active:
self._strangers.remove(uid)
# Strangers with current connection
for uid in (self._strangers & self._current_connections):
self._strangers[uid].increment_close()
# If the entity is a friend, remove it from strangers list and add it to friends
if self._strangers[uid].is_friend:
self._strangers.remove(uid)
self._friends.add(uid, time)
# New strangers
for uid in (self._current_connections - (self._strangers | self._friends)):
self._strangers.add(uid, time)
self._strangers[uid].increment_close()
|
<reponame>nearj/mpvr-motionfiltering
from .definitions import *
class ScenarioSetting():
def __init__(self, name, motion_data, video_data, incidence_data):
self.name = name
self.motion_data = motion_data
self.video_data = video_data
class RawData:
def __init__(self, path):
self.path = path
class MotionData(RawData):
def __init__(self, path, sensored_axes_tag, motion_seperator, is_classified,
axes = ['pitch', 'yaw', 'roll', 'surge', 'heave', 'sway']):
super(MotionData, self).__init__(path)
self.sensored_axes_tag = sensored_axes_tag
self.motion_seperator = motion_seperator
self.is_classified = is_classified
self.axes = axes
class VideoData(RawData):
def __init__(self, path, extension, width, height):
super(VideoData, self).__init__(path)
self.extension = extension
self.width = width
self.height = height
class Config:
""" Configure for MPVR experiment.
:var DATA_MANAGER: defualt raw data manager configure.
:type DATA_MANAGER: dict
:var THREEDI: stands for 3DI data set
:type THREEDI: dict
:var UOS2018: stands for university of seoul data set at 2018
:type UOS2018: dict
"""
def __init__(self,
scenario_name: str,
motion_data: MotionData,
video_data: VideoData,
incidence_data_path,
timestamp_path,
save_result_path,
target_sampling_rate = 3):
self.scenario_name = scenario_name
self.motion_data = motion_data
self.video_data = video_data
self.incidence_data_path = incidence_data_path
self.timestamp_path = timestamp_path
self.save_result_path = save_result_path
self.target_sampling_rate = target_sampling_rate
@staticmethod
def section_list():
return SECTIONS
@staticmethod
def get_section_scenarios(section):
return globals()[section]['scenarios']
@staticmethod
def get_tags():
return TAGS
@classmethod
def get_config(cls, section, scenario_name):
return getattr(cls, str.lower(section))(scenario_name)
@classmethod
def threedi_2018(cls, scenario_name):
sensored_axes_tag = None
motion_seperator = [-0.8, -0.2, 0.2, 0.8]
if scenario_name == '3DI_00':
sensored_axes_tag = {'pitch': 'SensorPitch', 'roll': 'SensorRoll'}
extension = '.png'
width = 1
height = 1
else:
sensored_axes_tag = {'pitch': 'PitchEulerAngle', 'roll': 'RollEulerAngle'}
extension = '.mp4'
width = 1
height = 1
default_raw_path = DATA_RAW_DIR + THREEDI_2018['dir']
motion_data_path = default_raw_path + 'motion/' + scenario_name + '.csv'
video_data_path = default_raw_path + 'video/' + scenario_name + '.mp4'
incidence_data_path = default_raw_path + 'incidence/' + scenario_name + '.csv'
timestamp_path = default_raw_path + 'timestamp/' + scenario_name + '.csv'
save_result_path = DATA_PROCESSED_DIR + 'result/' + THREEDI_2018['dir']
motion_data = MotionData(motion_data_path,
sensored_axes_tag,
motion_seperator,
False)
video_data = VideoData(video_data_path,
extension,
width,
height)
return cls(scenario_name, motion_data, video_data, incidence_data_path, timestamp_path,
save_result_path)
@classmethod
def motion_device_2018(cls, scenario_name):
sensored_axes_tag = {}
motion_seperator = [-0.8, -0.2, 0.2, 0.8]
if scenario_name in ['S1_pitch', 'S2_pitch', 'S3_pitch', 'S4', 'S6']:
sensored_axes_tag['pitch'] = 'pitch'
if scenario_name in ['S1_yaw', 'S2_yaw', 'S3_yaw', 'S5', 'S6']:
sensored_axes_tag['yaw'] = 'yaw'
if scenario_name in ['S1_roll', 'S2_roll', 'S3_roll', 'S5', 'S6']:
sensored_axes_tag['roll'] = 'roll'
if scenario_name in ['S1_surge', 'S2_surge', 'S3_surge', 'S4', 'S5', 'S6']:
sensored_axes_tag['surge'] = 'surge'
if scenario_name in ['S1_heave', 'S2_heave', 'S3_heave']:
sensored_axes_tag['heave'] = 'heave'
if scenario_name in ['S1_sway', 'S2_sway', 'S3_sway']:
sensored_axes_tag['sway'] = 'sway'
default_raw_path = DATA_RAW_DIR + MOTION_DEVICE_2018['dir']
motion_data_path = default_raw_path + 'motion/' + scenario_name + '.csv'
video_data_path = default_raw_path + 'video/' + scenario_name + '.mp4'
incidence_data_path = default_raw_path + 'incidence/' + scenario_name + '.csv'
timestamp_path = default_raw_path + 'timestamp/' + scenario_name + '.csv'
save_result_path = DATA_PROCESSED_DIR + 'result/' + MOTION_DEVICE_2018['dir']
motion_data = MotionData(motion_data_path,
sensored_axes_tag,
motion_seperator,
True)
video_data = VideoData(video_data_path,
'.mp4',
1,
1)
return cls(scenario_name, motion_data, video_data, incidence_data_path, timestamp_path,
save_result_path)
# ###########################################################################
# # UOS2018 #
# ###########################################################################
# UOS2018 = \
# {
# 'scenarios':
# {
# 'S1_pitch':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'pitch'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# }
# },
# 'S1_yaw':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'yaw'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# }
# },
# 'S1_roll':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'roll'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# }
# },
# 'S1_surge':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# }
# },
# 'S1_heave':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'heave'
# ],
# 'time': 10,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 10,
# 'fps': 60
# },
# },
# 'S1_sway':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# }
# },
# 'S2_pitch':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'pitch'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S2_yaw':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'yaw'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# }
# },
# 'S2_roll':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'roll'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S2_surge':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S2_heave':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'heave'
# ],
# 'time': 10,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 10,
# 'fps': 60
# },
# },
# 'S2_sway':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S3_pitch':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'pitch'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S3_yaw':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'yaw'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S3_roll':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'roll'
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S3_surge':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S3_heave':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'heave'
# ],
# 'time': 10,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 10,
# 'fps': 60
# },
# },
# 'S3_sway':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# ],
# 'time': 20,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 20,
# 'fps': 60
# },
# },
# 'S4':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'pitch, surge'
# ],
# 'time': 60,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 60,
# 'fps': 60
# },
# },
# 'S5':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'yaw', 'roll', 'surge'
# ],
# 'time': 60,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 60,
# 'fps': 60
# },
# },
# 'S6':
# {
# 'motion':
# {
# 'extension': '.txt',
# 'sensored':
# [
# 'pitch', 'yaw', 'roll', 'surge'
# ],
# 'time': 60,
# 'sampling_rate': 3 #hz
# },
# 'video':
# {
# 'extension': '.mp4',
# 'resolution':
# {
# 'width': 1024,
# 'height': 768
# },
# 'time': 60,
# 'fps': 60
# },
# },
# }
# }
# ###########################################################################
# # 3DI #
# ###########################################################################
# THREEDI = \
# {
# 'scenarios':
# {
# '3DI': 2636
# },
# 'motion':
# {
# 'extension': '.csv',
# 'sensored': {
# 'pitch': 'SensorPitch',
# # 'yaw': 'SensorYaw',
# 'roll': 'SensorRoll'},
# 'sampling_rate': None # not uniform
# },
# 'video':
# {
# 'extension': '.png',
# 'resolution':
# {
# 'width': 352,
# 'height': 240
# },
# 'fps': 25,
# 'time': 105.40 # 1m 45.40 sec
# },
# 'time':
# {
# 'sampling': 'non_uniform',
# 'time_column': 'Time',
# 'start_index': 1,
# 'end_index': 2636,
# 'step_min': 0.30,
# 'step_max': 0.36
# }
# }
|
<gh_stars>10-100
import math, time, ctypes, platform
import numpy as np
from OpenGL import GL
from renderable import TileRenderable
class LineRenderable():
"Renderable comprised of GL_LINES"
vert_shader_source = 'lines_v.glsl'
vert_shader_source_3d = 'lines_3d_v.glsl'
frag_shader_source = 'lines_f.glsl'
log_create_destroy = False
line_width = 1
# items in vert array: 2 for XY-only renderables, 3 for ones that include Z
vert_items = 2
# use game object's art_off_pct values
use_art_offset = True
visible = True
def __init__(self, app, quad_size_ref=None, game_object=None):
self.app = app
# we may be attached to a game object
self.go = game_object
self.unique_name = '%s_%s' % (int(time.time()), self.__class__.__name__)
self.quad_size_ref = quad_size_ref
self.x, self.y, self.z = 0, 0, 0
self.scale_x, self.scale_y = 1, 1
# handle Z differently if verts are 2D vs 3D
self.scale_z = 0 if self.vert_items == 2 else 1
self.build_geo()
self.width, self.height = self.get_size()
self.reset_loc()
if self.app.use_vao:
self.vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.vao)
if self.vert_items == 3:
self.vert_shader_source = self.vert_shader_source_3d
self.shader = self.app.sl.new_shader(self.vert_shader_source, self.frag_shader_source)
# uniforms
self.proj_matrix_uniform = self.shader.get_uniform_location('projection')
self.view_matrix_uniform = self.shader.get_uniform_location('view')
self.position_uniform = self.shader.get_uniform_location('objectPosition')
self.scale_uniform = self.shader.get_uniform_location('objectScale')
self.quad_size_uniform = self.shader.get_uniform_location('quadSize')
self.color_uniform = self.shader.get_uniform_location('objectColor')
# vert buffers
self.vert_buffer, self.elem_buffer = GL.glGenBuffers(2)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_buffer)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vert_array.nbytes,
self.vert_array, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.elem_buffer)
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, self.elem_array.nbytes,
self.elem_array, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0)
self.vert_count = int(len(self.elem_array))
self.pos_attrib = self.shader.get_attrib_location('vertPosition')
GL.glEnableVertexAttribArray(self.pos_attrib)
offset = ctypes.c_void_p(0)
GL.glVertexAttribPointer(self.pos_attrib, self.vert_items,
GL.GL_FLOAT, GL.GL_FALSE, 0, offset)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
# vert colors
self.color_buffer = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.color_buffer)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.color_array.nbytes,
self.color_array, GL.GL_STATIC_DRAW)
self.color_attrib = self.shader.get_attrib_location('vertColor')
GL.glEnableVertexAttribArray(self.color_attrib)
GL.glVertexAttribPointer(self.color_attrib, 4,
GL.GL_FLOAT, GL.GL_FALSE, 0, offset)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
if self.app.use_vao:
GL.glBindVertexArray(0)
if self.log_create_destroy:
self.app.log('created: %s' % self)
def __str__(self):
"for debug purposes, return a unique name"
return self.unique_name
def build_geo(self):
"""
create self.vert_array, self.elem_array, self.color_array
"""
pass
def reset_loc(self):
pass
def update(self):
if self.go:
self.update_transform_from_object(self.go)
def reset_size(self):
self.width, self.height = self.get_size()
def update_transform_from_object(self, obj):
TileRenderable.update_transform_from_object(self, obj)
def rebind_buffers(self):
# resend verts
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_buffer)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vert_array.nbytes,
self.vert_array, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.elem_buffer)
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, self.elem_array.nbytes,
self.elem_array, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
self.vert_count = int(len(self.elem_array))
# resend color
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.color_buffer)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.color_array.nbytes,
self.color_array, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
def get_projection_matrix(self):
return np.eye(4, 4)
def get_view_matrix(self):
return np.eye(4, 4)
def get_loc(self):
return self.x, self.y, self.z
def get_size(self):
# overriden in subclasses that need specific width/height data
return 1, 1
def get_quad_size(self):
if self.quad_size_ref:
return self.quad_size_ref.quad_width, self.quad_size_ref.quad_height
else:
return 1, 1
def get_color(self):
return (1, 1, 1, 1)
def get_line_width(self):
return self.line_width
def destroy(self):
if self.app.use_vao:
GL.glDeleteVertexArrays(1, [self.vao])
GL.glDeleteBuffers(3, [self.vert_buffer, self.elem_buffer, self.color_buffer])
if self.log_create_destroy:
self.app.log('destroyed: %s' % self)
def render(self):
if not self.visible:
return
GL.glUseProgram(self.shader.program)
GL.glUniformMatrix4fv(self.proj_matrix_uniform, 1, GL.GL_FALSE, self.get_projection_matrix())
GL.glUniformMatrix4fv(self.view_matrix_uniform, 1, GL.GL_FALSE, self.get_view_matrix())
GL.glUniform3f(self.position_uniform, *self.get_loc())
GL.glUniform3f(self.scale_uniform, self.scale_x, self.scale_y, self.scale_z)
GL.glUniform2f(self.quad_size_uniform, *self.get_quad_size())
GL.glUniform4f(self.color_uniform, *self.get_color())
# VAO vs non-VAO paths
if self.app.use_vao:
GL.glBindVertexArray(self.vao)
else:
offset = ctypes.c_void_p(0)
# attribs:
# pos
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_buffer)
GL.glVertexAttribPointer(self.pos_attrib, self.vert_items,
GL.GL_FLOAT, GL.GL_FALSE, 0, ctypes.c_void_p(0))
GL.glEnableVertexAttribArray(self.pos_attrib)
# color
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.color_buffer)
GL.glVertexAttribPointer(self.color_attrib, 4,
GL.GL_FLOAT, GL.GL_FALSE, 0, offset)
GL.glEnableVertexAttribArray(self.color_attrib)
# bind elem array - see similar behavior in Cursor.render
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.elem_buffer)
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
if platform.system() != 'Darwin':
GL.glLineWidth(self.get_line_width())
GL.glDrawElements(GL.GL_LINES, self.vert_count,
GL.GL_UNSIGNED_INT, None)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0)
GL.glDisable(GL.GL_BLEND)
if self.app.use_vao:
GL.glBindVertexArray(0)
GL.glUseProgram(0)
# common data/code used by various boxes
BOX_VERTS = [(0, 0), (1, 0), (1, -1), (0, -1)]
def get_box_arrays(vert_list=None, color=(1, 1, 1, 1)):
verts = np.array(vert_list or BOX_VERTS, dtype=np.float32)
elems = np.array([0, 1, 1, 2, 2, 3, 3, 0], dtype=np.uint32)
colors = np.array([color * 4], dtype=np.float32)
return verts, elems, colors
class UIRenderableX(LineRenderable):
"Red X used to denote transparent color in various places"
color = (1, 0, 0, 1)
line_width = 2
def build_geo(self):
self.vert_array = np.array([(0, 0), (1, 1), (1, 0), (0, 1)], dtype=np.float32)
self.elem_array = np.array([0, 1, 2, 3], dtype=np.uint32)
self.color_array = np.array([self.color * 4], dtype=np.float32)
class SwatchSelectionBoxRenderable(LineRenderable):
"used for UI selection boxes etc"
color = (0.5, 0.5, 0.5, 1)
line_width = 2
def __init__(self, app, quad_size_ref):
LineRenderable.__init__(self, app, quad_size_ref)
# track tile X and Y for cursor movement
self.tile_x, self.tile_y = 0,0
def get_color(self):
return self.color
def build_geo(self):
self.vert_array, self.elem_array, self.color_array = get_box_arrays(None, self.color)
class WorldLineRenderable(LineRenderable):
"any LineRenderable that draws in world, ie in 3D perspective"
def get_projection_matrix(self):
return self.app.camera.projection_matrix
def get_view_matrix(self):
return self.app.camera.view_matrix
class DebugLineRenderable(WorldLineRenderable):
"""
renderable for drawing debug lines in the world.
use set_lines and add_lines to replace and add to, respectively, the list
of 3D vertex locations (and, optionally, colors).
"""
color = (0.5, 0, 0, 1)
vert_items = 3
line_width = 3
def set_lines(self, new_verts, new_colors=None):
"replace current debug lines with new given lines"
self.vert_array = np.array(new_verts, dtype=np.float32)
elements = []
# TODO: possible OB1 bug here, sometimes draws line to object origin(?)
for i in range(1, len(new_verts)):
elements += [i - 1, i]
self.elem_array = np.array(elements, dtype=np.uint32)
self.color_array = np.array(new_colors or self.color * len(new_verts),
dtype=np.float32)
self.rebind_buffers()
def set_color(self, new_color):
"changes all debug lines to given color"
self.color = new_color
lines = int(len(self.vert_array) / self.vert_items)
self.color_array = np.array(self.color * lines, dtype=np.float32)
self.rebind_buffers()
def get_quad_size(self):
return 1, 1
def add_lines(self, new_verts, new_colors=None):
"add lines to the current ones"
line_items = len(self.vert_array)
lines = int(line_items / self.vert_items)
# if new_verts is a list of tuples, unpack into flat list
if type(new_verts[0]) is tuple:
new_verts_unpacked = []
for (x, y, z) in new_verts:
new_verts_unpacked += [x, y, z]
new_verts = new_verts_unpacked
new_size = int(line_items + len(new_verts))
self.vert_array.resize(new_size)
self.vert_array[line_items:new_size] = new_verts
# grow elem buffer
old_elem_size = len(self.elem_array)
new_elem_size = int(old_elem_size + len(new_verts) / self.vert_items)
# TODO: "contiguous" parameter that joins new lines with previous
self.elem_array.resize(new_elem_size)
self.elem_array[old_elem_size:new_elem_size] = range(old_elem_size,
new_elem_size)
# grow color buffer
old_color_size = len(self.color_array)
new_color_size = int(old_color_size + len(new_verts) / self.vert_items * 4)
self.color_array.resize(new_color_size)
self.color_array[old_color_size:new_color_size] = new_colors or self.color * int(len(new_verts) / self.vert_items)
self.rebind_buffers()
def reset_lines(self):
self.build_geo()
def build_geo(self):
# start empty
self.vert_array = np.array([], dtype=np.float32)
self.elem_array = np.array([], dtype=np.uint32)
self.color_array = np.array([], dtype=np.float32)
def render(self):
# only render if we have any data
if len(self.vert_array) == 0:
return
WorldLineRenderable.render(self)
class OriginIndicatorRenderable(WorldLineRenderable):
"classic 3-axis thingy showing location/rotation/scale"
red = (1.0, 0.1, 0.1, 1.0)
green = (0.1, 1.0, 0.1, 1.0)
blue = (0.1, 0.1, 1.0, 1.0)
origin = (0, 0, 0)
x_axis = (1, 0, 0)
y_axis = (0, 1, 0)
z_axis = (0, 0, 1)
vert_items = 3
line_width = 3
use_art_offset = False
def __init__(self, app, game_object):
LineRenderable.__init__(self, app, None, game_object)
def get_quad_size(self):
return 1, 1
def get_size(self):
return self.go.scale_x, self.go.scale_y
def update_transform_from_object(self, obj):
self.x, self.y, self.z = obj.x, obj.y, obj.z
self.scale_x, self.scale_y = obj.scale_x, obj.scale_y
if obj.flip_x:
self.scale_x *= -1
self.scale_z = obj.scale_z
def build_geo(self):
self.vert_array = np.array([self.origin, self.x_axis,
self.origin, self.y_axis,
self.origin, self.z_axis],
dtype=np.float32)
self.elem_array = np.array([0, 1, 2, 3, 4, 5], dtype=np.uint32)
self.color_array = np.array([self.red, self.red, self.green, self.green,
self.blue, self.blue], dtype=np.float32)
class BoundsIndicatorRenderable(WorldLineRenderable):
color = (1, 1, 1, 0.5)
line_width_active = 2
line_width_inactive = 1
def __init__(self, app, game_object):
self.art = game_object.renderable.art
LineRenderable.__init__(self, app, None, game_object)
def set_art(self, new_art):
self.art = new_art
self.reset_size()
def get_size(self):
art = self.go.art
w = (art.width * art.quad_width) * self.go.scale_x
h = (art.height * art.quad_height) * self.go.scale_y
return w, h
def get_color(self):
# pulse if selected
if self.go in self.app.gw.selected_objects:
color = 0.75 + (math.sin(self.app.get_elapsed_time() / 100) / 2)
return (color, color, color, 1)
else:
return (1, 1, 1, 1)
def get_line_width(self):
return self.line_width_active if self.go in self.app.gw.selected_objects else self.line_width_inactive
def get_quad_size(self):
if not self.go:
return 1, 1
return self.art.width * self.art.quad_width, self.art.height * self.art.quad_height
def build_geo(self):
self.vert_array, self.elem_array, self.color_array = get_box_arrays(None, self.color)
class CollisionRenderable(WorldLineRenderable):
# green = dynamic, blue = static
dynamic_color = (0, 1, 0, 1)
static_color = (0, 0, 1, 1)
def __init__(self, shape):
self.color = self.dynamic_color if shape.go.is_dynamic() else self.static_color
self.shape = shape
WorldLineRenderable.__init__(self, shape.go.app, None, shape.go)
def update(self):
self.update_transform_from_object(self.shape)
def update_transform_from_object(self, obj):
self.x = obj.x
self.y = obj.y
def get_circle_points(radius, steps=24):
angle = 0
points = [(radius, 0)]
for i in range(steps):
angle += math.radians(360 / steps)
x = math.cos(angle) * radius
y = math.sin(angle) * radius
points.append((x, y))
return points
class CircleCollisionRenderable(CollisionRenderable):
line_width = 2
segments = 24
def get_quad_size(self):
return self.shape.radius, self.shape.radius
def get_size(self):
w = h = self.shape.radius * 2
w *= self.go.scale_x
h *= self.go.scale_y
return w, h
def build_geo(self):
verts, elements, colors = [], [], []
angle = 0
last_x, last_y = 1, 0
i = 0
while i < self.segments * 4:
angle += math.radians(360 / self.segments)
verts.append((last_x, last_y))
x = math.cos(angle)
y = math.sin(angle)
verts.append((x, y))
last_x, last_y = x, y
elements.append((i, i+1))
i += 2
colors.append([self.color * 2])
self.vert_array = np.array(verts, dtype=np.float32)
self.elem_array = np.array(elements, dtype=np.uint32)
self.color_array = np.array(colors, dtype=np.float32)
class BoxCollisionRenderable(CollisionRenderable):
line_width = 2
def get_quad_size(self):
return self.shape.halfwidth * 2, self.shape.halfheight * 2
def get_size(self):
w, h = self.shape.halfwidth * 2, self.shape.halfheight * 2
w *= self.go.scale_x
h *= self.go.scale_y
return w, h
def build_geo(self):
verts = [(-0.5, 0.5), (0.5, 0.5), (0.5, -0.5), (-0.5, -0.5)]
self.vert_array, self.elem_array, self.color_array = get_box_arrays(verts, self.color)
class TileBoxCollisionRenderable(BoxCollisionRenderable):
"box for each tile in a CST_TILE object"
line_width = 1
def get_loc(self):
# draw at Z level of collision layer
return self.x, self.y, self.go.get_layer_z(self.go.col_layer_name)
|
<reponame>scottwedge/OpenStack-Stein
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from senlin.common import consts
from senlin.common import exception as exc
from senlin.common.i18n import _
from senlin.common import schema
from senlin.common import utils
from senlin.profiles import base
LOG = logging.getLogger(__name__)
class StackProfile(base.Profile):
"""Profile for an OpenStack Heat stack."""
VERSIONS = {
'1.0': [
{'status': consts.SUPPORTED, 'since': '2016.04'}
]
}
KEYS = (
CONTEXT, TEMPLATE, TEMPLATE_URL, PARAMETERS,
FILES, TIMEOUT, DISABLE_ROLLBACK, ENVIRONMENT,
) = (
'context', 'template', 'template_url', 'parameters',
'files', 'timeout', 'disable_rollback', 'environment',
)
properties_schema = {
CONTEXT: schema.Map(
_('A dictionary for specifying the customized context for '
'stack operations'),
default={},
),
TEMPLATE: schema.Map(
_('Heat stack template.'),
default={},
updatable=True,
),
TEMPLATE_URL: schema.String(
_('Heat stack template url.'),
default='',
updatable=True,
),
PARAMETERS: schema.Map(
_('Parameters to be passed to Heat for stack operations.'),
default={},
updatable=True,
),
FILES: schema.Map(
_('Contents of files referenced by the template, if any.'),
default={},
updatable=True,
),
TIMEOUT: schema.Integer(
_('A integer that specifies the number of minutes that a '
'stack operation times out.'),
updatable=True,
),
DISABLE_ROLLBACK: schema.Boolean(
_('A boolean specifying whether a stack operation can be '
'rolled back.'),
default=True,
updatable=True,
),
ENVIRONMENT: schema.Map(
_('A map that specifies the environment used for stack '
'operations.'),
default={},
updatable=True,
)
}
OP_NAMES = (
OP_ABANDON,
) = (
'abandon',
)
OPERATIONS = {
OP_ABANDON: schema.Map(
_('Abandon a heat stack node.'),
)
}
def __init__(self, type_name, name, **kwargs):
super(StackProfile, self).__init__(type_name, name, **kwargs)
self.stack_id = None
def validate(self, validate_props=False):
"""Validate the schema and the data provided."""
# general validation
self.spec_data.validate()
self.properties.validate()
# validate template
template = self.properties[self.TEMPLATE]
template_url = self.properties[self.TEMPLATE_URL]
if not template and not template_url:
msg = _("Both template and template_url are not specified "
"for profile '%s'.") % self.name
raise exc.InvalidSpec(message=msg)
if validate_props:
self.do_validate(obj=self)
def do_validate(self, obj):
"""Validate the stack template used by a node.
:param obj: Node object to operate.
:returns: True if validation succeeds.
:raises: `InvalidSpec` exception is raised if template is invalid.
"""
kwargs = {
'stack_name': utils.random_name(),
'template': self.properties[self.TEMPLATE],
'template_url': self.properties[self.TEMPLATE_URL],
'parameters': self.properties[self.PARAMETERS],
'files': self.properties[self.FILES],
'environment': self.properties[self.ENVIRONMENT],
'preview': True,
}
try:
self.orchestration(obj).stack_create(**kwargs)
except exc.InternalError as ex:
msg = _('Failed in validating template: %s') % six.text_type(ex)
raise exc.InvalidSpec(message=msg)
return True
def do_create(self, obj):
"""Create a heat stack using the given node object.
:param obj: The node object to operate on.
:returns: The UUID of the heat stack created.
"""
tags = ["cluster_node_id=%s" % obj.id]
if obj.cluster_id:
tags.append('cluster_id=%s' % obj.cluster_id)
tags.append('cluster_node_index=%s' % obj.index)
kwargs = {
'stack_name': obj.name + '-' + utils.random_name(8),
'template': self.properties[self.TEMPLATE],
'template_url': self.properties[self.TEMPLATE_URL],
'timeout_mins': self.properties[self.TIMEOUT],
'disable_rollback': self.properties[self.DISABLE_ROLLBACK],
'parameters': self.properties[self.PARAMETERS],
'files': self.properties[self.FILES],
'environment': self.properties[self.ENVIRONMENT],
'tags': ",".join(tags)
}
try:
stack = self.orchestration(obj).stack_create(**kwargs)
# Timeout = None means we will use the 'default_action_timeout'
# It can be overridden by the TIMEOUT profile properties
timeout = None
if self.properties[self.TIMEOUT]:
timeout = self.properties[self.TIMEOUT] * 60
self.orchestration(obj).wait_for_stack(stack.id, 'CREATE_COMPLETE',
timeout=timeout)
return stack.id
except exc.InternalError as ex:
raise exc.EResourceCreation(type='stack',
message=six.text_type(ex))
def do_delete(self, obj, **params):
"""Delete the physical stack behind the node object.
:param obj: The node object to operate on.
:param kwargs params: Optional keyword arguments for the delete
operation.
:returns: This operation always returns True unless exception is
caught.
:raises: `EResourceDeletion` if interaction with heat fails.
"""
stack_id = obj.physical_id
if not stack_id:
return True
ignore_missing = params.get('ignore_missing', True)
try:
self.orchestration(obj).stack_delete(stack_id, ignore_missing)
self.orchestration(obj).wait_for_stack_delete(stack_id)
except exc.InternalError as ex:
raise exc.EResourceDeletion(type='stack', id=stack_id,
message=six.text_type(ex))
return True
def do_update(self, obj, new_profile, **params):
"""Perform update on object.
:param obj: the node object to operate on
:param new_profile: the new profile used for updating
:param params: other parameters for the update request.
:returns: A boolean indicating whether the operation is successful.
"""
self.stack_id = obj.physical_id
if not self.stack_id:
return False
if not self.validate_for_update(new_profile):
return False
fields = {}
new_template = new_profile.properties[new_profile.TEMPLATE]
if new_template != self.properties[self.TEMPLATE]:
fields['template'] = new_template
new_params = new_profile.properties[new_profile.PARAMETERS]
if new_params != self.properties[self.PARAMETERS]:
fields['parameters'] = new_params
new_timeout = new_profile.properties[new_profile.TIMEOUT]
if new_timeout != self.properties[self.TIMEOUT]:
fields['timeout_mins'] = new_timeout
new_dr = new_profile.properties[new_profile.DISABLE_ROLLBACK]
if new_dr != self.properties[self.DISABLE_ROLLBACK]:
fields['disable_rollback'] = new_dr
new_files = new_profile.properties[new_profile.FILES]
if new_files != self.properties[self.FILES]:
fields['files'] = new_files
new_environment = new_profile.properties[new_profile.ENVIRONMENT]
if new_environment != self.properties[self.ENVIRONMENT]:
fields['environment'] = new_environment
if not fields:
return True
try:
hc = self.orchestration(obj)
# Timeout = None means we will use the 'default_action_timeout'
# It can be overridden by the TIMEOUT profile properties
timeout = None
if self.properties[self.TIMEOUT]:
timeout = self.properties[self.TIMEOUT] * 60
hc.stack_update(self.stack_id, **fields)
hc.wait_for_stack(self.stack_id, 'UPDATE_COMPLETE',
timeout=timeout)
except exc.InternalError as ex:
raise exc.EResourceUpdate(type='stack', id=self.stack_id,
message=six.text_type(ex))
return True
def do_check(self, obj):
"""Check stack status.
:param obj: Node object to operate.
:returns: True if check succeeded, or False otherwise.
"""
stack_id = obj.physical_id
if stack_id is None:
return False
hc = self.orchestration(obj)
try:
# Timeout = None means we will use the 'default_action_timeout'
# It can be overridden by the TIMEOUT profile properties
timeout = None
if self.properties[self.TIMEOUT]:
timeout = self.properties[self.TIMEOUT] * 60
hc.stack_check(stack_id)
hc.wait_for_stack(stack_id, 'CHECK_COMPLETE', timeout=timeout)
except exc.InternalError as ex:
raise exc.EResourceOperation(op='checking', type='stack',
id=stack_id,
message=six.text_type(ex))
return True
def do_get_details(self, obj):
if not obj.physical_id:
return {}
try:
stack = self.orchestration(obj).stack_get(obj.physical_id)
return stack.to_dict()
except exc.InternalError as ex:
return {
'Error': {
'code': ex.code,
'message': six.text_type(ex)
}
}
def do_adopt(self, obj, overrides=None, snapshot=False):
"""Adopt an existing stack node for management.
:param obj: A node object for this operation. It could be a puppet
node that provides only 'user', 'project' and 'physical_id'
properties when doing a preview. It can be a real Node object for
node adoption.
:param overrides: A dict containing the properties that will be
overridden when generating a profile for the stack.
:param snapshot: A boolean flag indicating whether the profile should
attempt a snapshot operation before adopting the stack. If set to
True, the ID of the snapshot will be used as the image ID.
:returns: A dict containing the spec created from the stack object or
a dict containing error information if failure occurred.
"""
driver = self.orchestration(obj)
# TODO(Qiming): Add snapshot support
# snapshot = driver.snapshot_create(...)
try:
stack = driver.stack_get(obj.physical_id)
tmpl = driver.stack_get_template(obj.physical_id)
env = driver.stack_get_environment(obj.physical_id)
files = driver.stack_get_files(obj.physical_id)
except exc.InternalError as ex:
return {'Error': {'code': ex.code, 'message': six.text_type(ex)}}
spec = {
self.ENVIRONMENT: env.to_dict(),
self.FILES: files,
self.TEMPLATE: tmpl.to_dict(),
self.PARAMETERS: dict((k, v) for k, v in stack.parameters.items()
if k.find('OS::', 0) < 0),
self.TIMEOUT: stack.timeout_mins,
self.DISABLE_ROLLBACK: stack.is_rollback_disabled
}
if overrides:
spec.update(overrides)
return spec
def _refresh_tags(self, current, node, add=False):
"""Refresh tag list.
:param current: Current list of tags.
:param node: The node object.
:param add: Flag indicating whether new tags are added.
:returns: (tags, updated) where tags contains a new list of tags and
updated indicates whether new tag list differs from the old
one.
"""
tags = []
for tag in current:
if tag.find('cluster_id=') == 0:
continue
elif tag.find('cluster_node_id=') == 0:
continue
elif tag.find('cluster_node_index=') == 0:
continue
if tag.strip() != "":
tags.append(tag.strip())
if add:
tags.append('cluster_id=' + node.cluster_id)
tags.append('cluster_node_id=' + node.id)
tags.append('cluster_node_index=%s' % node.index)
tag_str = ",".join(tags)
return (tag_str, tags != current)
def do_join(self, obj, cluster_id):
if not obj.physical_id:
return False
hc = self.orchestration(obj)
try:
stack = hc.stack_get(obj.physical_id)
tags, updated = self._refresh_tags(stack.tags, obj, True)
field = {'tags': tags}
if updated:
hc.stack_update(obj.physical_id, **field)
except exc.InternalError as ex:
LOG.error('Failed in updating stack tags: %s.', ex)
return False
return True
def do_leave(self, obj):
if not obj.physical_id:
return False
hc = self.orchestration(obj)
try:
stack = hc.stack_get(obj.physical_id)
tags, updated = self._refresh_tags(stack.tags, obj, False)
field = {'tags': tags}
if updated:
hc.stack_update(obj.physical_id, **field)
except exc.InternalError as ex:
LOG.error('Failed in updating stack tags: %s.', ex)
return False
return True
def handle_abandon(self, obj, **options):
"""Handler for abandoning a heat stack node."""
pass
|
<filename>assets/src/ba_data/python/bastd/actor/onscreentimer.py
# Released under the MIT License. See LICENSE for details.
#
"""Defines Actor(s)."""
from __future__ import annotations
from typing import TYPE_CHECKING, overload
import ba
if TYPE_CHECKING:
from typing import Optional, Union, Any, Literal
class OnScreenTimer(ba.Actor):
"""A handy on-screen timer.
category: Gameplay Classes
Useful for time-based games where time increases.
"""
def __init__(self) -> None:
super().__init__()
self._starttime_ms: Optional[int] = None
self.node = ba.newnode('text',
attrs={
'v_attach': 'top',
'h_attach': 'center',
'h_align': 'center',
'color': (1, 1, 0.5, 1),
'flatness': 0.5,
'shadow': 0.5,
'position': (0, -70),
'scale': 1.4,
'text': ''
})
self.inputnode = ba.newnode('timedisplay',
attrs={
'timemin': 0,
'showsubseconds': True
})
self.inputnode.connectattr('output', self.node, 'text')
def start(self) -> None:
"""Start the timer."""
tval = ba.time(timeformat=ba.TimeFormat.MILLISECONDS)
assert isinstance(tval, int)
self._starttime_ms = tval
self.inputnode.time1 = self._starttime_ms
ba.getactivity().globalsnode.connectattr('time', self.inputnode,
'time2')
def has_started(self) -> bool:
"""Return whether this timer has started yet."""
return self._starttime_ms is not None
def stop(self,
endtime: Union[int, float] = None,
timeformat: ba.TimeFormat = ba.TimeFormat.SECONDS) -> None:
"""End the timer.
If 'endtime' is not None, it is used when calculating
the final display time; otherwise the current time is used.
'timeformat' applies to endtime and can be SECONDS or MILLISECONDS
"""
if endtime is None:
endtime = ba.time(timeformat=ba.TimeFormat.MILLISECONDS)
timeformat = ba.TimeFormat.MILLISECONDS
if self._starttime_ms is None:
print('Warning: OnScreenTimer.stop() called without start() first')
else:
endtime_ms: int
if timeformat is ba.TimeFormat.SECONDS:
endtime_ms = int(endtime * 1000)
elif timeformat is ba.TimeFormat.MILLISECONDS:
assert isinstance(endtime, int)
endtime_ms = endtime
else:
raise ValueError(f'invalid timeformat: {timeformat}')
self.inputnode.timemax = endtime_ms - self._starttime_ms
# Overloads so type checker knows our exact return type based in args.
@overload
def getstarttime(
self,
timeformat: Literal[ba.TimeFormat.SECONDS] = ba.TimeFormat.SECONDS
) -> float:
...
@overload
def getstarttime(self,
timeformat: Literal[ba.TimeFormat.MILLISECONDS]) -> int:
...
def getstarttime(
self,
timeformat: ba.TimeFormat = ba.TimeFormat.SECONDS
) -> Union[int, float]:
"""Return the sim-time when start() was called.
Time will be returned in seconds if timeformat is SECONDS or
milliseconds if it is MILLISECONDS.
"""
val_ms: Any
if self._starttime_ms is None:
print('WARNING: getstarttime() called on un-started timer')
val_ms = ba.time(timeformat=ba.TimeFormat.MILLISECONDS)
else:
val_ms = self._starttime_ms
assert isinstance(val_ms, int)
if timeformat is ba.TimeFormat.SECONDS:
return 0.001 * val_ms
if timeformat is ba.TimeFormat.MILLISECONDS:
return val_ms
raise ValueError(f'invalid timeformat: {timeformat}')
@property
def starttime(self) -> float:
"""Shortcut for start time in seconds."""
return self.getstarttime()
def handlemessage(self, msg: Any) -> Any:
# if we're asked to die, just kill our node/timer
if isinstance(msg, ba.DieMessage):
if self.node:
self.node.delete()
|
<reponame>Jagermeister/out_of_many_one
""" Constant SQL Statements """
### Document Link Raw
DOCUMENT_LINK_RAW_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS document_link_raw (
document_link_raw_key INTEGER PRIMARY KEY,
document_link_raw_hash TEXT,
name_first TEXT,
name_last TEXT,
filer_type TEXT,
report_type TEXT,
filed_date TEXT
);
'''
DOCUMENT_LINK_RAW_CREATE = '''
INSERT INTO document_link_raw (
document_link_raw_hash,
name_first,
name_last,
filer_type,
report_type,
filed_date
) VALUES (
?, ?, ?, ?, ?, ?
);
'''
DOCUMENT_LINK_RAWS_READ = '''
SELECT
document_link_raw_key,
document_link_raw_hash,
name_first,
name_last,
filer_type,
report_type,
filed_date
FROM document_link_raw AS R;
'''
DOCUMENT_LINK_RAWS_NOT_PARSED = '''
SELECT
document_link_raw_key,
name_first,
name_last,
filer_type,
report_type,
filed_date
FROM document_link_raw AS R
WHERE NOT EXISTS(
SELECT *
FROM document_link AS D
WHERE D.document_link_raw_key = R.document_link_raw_key
);
'''
### Filer
FILER_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS filer (
filer_key INTEGER PRIMARY KEY,
name_first TEXT NOT NULL,
name_last TEXT NOT NULL
);
'''
FILER_CREATE = '''
INSERT INTO filer (
name_first,
name_last
) VALUES (
?, ?
);
'''
FILERS_READ = '''
SELECT
F.filer_key,
F.name_first,
F.name_last
FROM filer AS F;
'''
### Filer Type
FILER_TYPE_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS filer_type (
filer_type_key INTEGER PRIMARY KEY,
filer_name TEXT NOT NULL,
is_senator INTEGER NOT NULL,
is_candidate INTEGER NOT NULL,
is_former_senator INTEGER NOT NULL
);
'''
FILER_TYPES_READ = '''
SELECT
FT.filer_type_key,
FT.filer_name,
FT.is_senator,
FT.is_candidate,
FT.is_former_senator
FROM filer_type AS FT;
'''
FILER_TYPE_DEFAULTS = [
{'filer_name': 'Senator', 'is_senator': 1, 'is_candidate': 0, 'is_former_senator': 0},
{'filer_name': 'Candidate', 'is_senator': 0, 'is_candidate': 1, 'is_former_senator': 0},
{'filer_name': '<NAME>', 'is_senator': 0, 'is_candidate': 0, 'is_former_senator': 1},
]
FILER_TYPE_POPULATE = '''
INSERT INTO filer_type (
filer_name,
is_senator,
is_candidate,
is_former_senator
)
SELECT
:filer_name, :is_senator, :is_candidate, :is_former_senator
WHERE NOT EXISTS (
SELECT *
FROM filer_type AS FT
WHERE FT.filer_name = :filer_name
AND FT.is_senator = :is_senator
AND FT.is_candidate = :is_candidate
AND FT.is_former_senator = :is_former_senator
);
'''
### Document Type
DOCUMENT_TYPE_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS document_type (
document_type_key INTEGER PRIMARY KEY,
document_type_name TEXT NOT NULL,
is_annual INTEGER NOT NULL,
is_blind_trust INTEGER NOT NULL,
is_due_date_extension INTEGER NOT NULL,
is_miscellaneous_information INTEGER NOT NULL,
is_periodic_transaction_report INTEGER NOT NULL,
is_unknown INTEGER NOT NULL
);
'''
DOCUMENT_TYPE_DEFAULTS = [
{'document_type_name': 'Annual', 'is_annual': 1, 'is_blind_trust': 0, 'is_due_date_extension': 0, 'is_miscellaneous_information': 0, 'is_periodic_transaction_report': 0, 'is_unknown': 0},
{'document_type_name': 'Blind Trusts', 'is_annual': 0, 'is_blind_trust': 1, 'is_due_date_extension': 0, 'is_miscellaneous_information': 0, 'is_periodic_transaction_report': 0, 'is_unknown': 0},
{'document_type_name': 'Due Date Extension', 'is_annual': 0, 'is_blind_trust': 0, 'is_due_date_extension': 1, 'is_miscellaneous_information': 0, 'is_periodic_transaction_report': 0, 'is_unknown': 0},
{'document_type_name': 'Miscellaneous Information', 'is_annual': 0, 'is_blind_trust': 0, 'is_due_date_extension': 0, 'is_miscellaneous_information': 1, 'is_periodic_transaction_report': 0, 'is_unknown': 0},
{'document_type_name': 'Periodic Transaction Report', 'is_annual': 0, 'is_blind_trust': 0, 'is_due_date_extension': 0, 'is_miscellaneous_information': 0, 'is_periodic_transaction_report': 1, 'is_unknown': 0},
{'document_type_name': 'UNKNOWN', 'is_annual': 0, 'is_blind_trust': 0, 'is_due_date_extension': 0, 'is_miscellaneous_information': 0, 'is_periodic_transaction_report': 0, 'is_unknown': 1},
]
DOCUMENT_TYPE_POPULATE = '''
INSERT INTO document_type (
document_type_name,
is_annual,
is_blind_trust,
is_due_date_extension,
is_miscellaneous_information,
is_periodic_transaction_report,
is_unknown
)
SELECT
:document_type_name,
:is_annual,
:is_blind_trust,
:is_due_date_extension,
:is_miscellaneous_information,
:is_periodic_transaction_report,
:is_unknown
WHERE NOT EXISTS (
SELECT *
FROM document_type AS DT
WHERE DT.document_type_name = :document_type_name
AND DT.is_annual = :is_annual
AND DT.is_blind_trust = :is_blind_trust
AND DT.is_due_date_extension = :is_due_date_extension
AND DT.is_miscellaneous_information = :is_miscellaneous_information
AND DT.is_periodic_transaction_report = :is_periodic_transaction_report
AND DT.is_unknown = :is_unknown
);
'''
DOCUMENT_TYPES_READ = '''
SELECT
DT.document_type_key,
DT.document_type_name,
DT.is_annual,
DT.is_blind_trust,
DT.is_due_date_extension,
DT.is_miscellaneous_information,
DT.is_periodic_transaction_report,
DT.is_unknown
FROM document_type AS DT;
'''
### Document Link
DOCUMENT_LINK_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS document_link (
document_link_key INTEGER PRIMARY KEY,
document_link_raw_key INTEGER NOT NULL,
filer_key INTEGER NOT NULL,
filer_type_key INTEGER NOT NULL,
document_type_key INTEGER NOT NULL,
is_paper INTEGER NOT NULL,
unique_id TEXT NOT NULL,
document_name TEXT,
document_date TEXT,
FOREIGN KEY(document_link_raw_key) REFERENCES document_link_raw(document_link_raw_key),
FOREIGN KEY(filer_key) REFERENCES filer(filer_key),
FOREIGN KEY(filer_type_key) REFERENCES filer_type(filer_type_key),
FOREIGN KEY(document_type_key) REFERENCES document_type(document_type_key)
);
'''
DOCUMENT_LINK_CREATE = '''
INSERT INTO document_link (
document_link_raw_key,
filer_key,
filer_type_key,
document_type_key,
is_paper,
unique_id,
document_name,
document_date
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?
);
'''
DOCUMENT_LINKS_ANNUAL_REPORT_GET = '''
SELECT
DL.document_link_key,
DL.unique_id
FROM document_link AS DL
JOIN document_type AS DT
ON DT.document_type_key = DL.document_type_key
AND DT.is_annual = 1
WHERE DL.is_paper = 0
AND NOT EXISTS (
SELECT *
FROM report_annual_raw AS R
WHERE R.document_link_key = DL.document_link_key
);
'''
### Annual Report Raw
REPORT_ANNUAL_RAW_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_raw (
report_annual_raw_key INTEGER PRIMARY KEY,
document_link_key INTEGER NOT NULL,
header TEXT,
part_one_charity TEXT,
part_two_earned_income TEXT,
part_three_assets TEXT,
part_four_a_ptr TEXT,
part_four_b_transactions TEXT,
part_five_gifts TEXT,
part_six_travel TEXT,
part_seven_liabilities TEXT,
part_eight_positions TEXT,
part_nine_agreements TEXT,
part_ten_compensation TEXT,
comments TEXT,
FOREIGN KEY(document_link_key) REFERENCES document_link(document_link_key)
);
'''
REPORT_ANNUAL_RAW_CREATE = '''
INSERT INTO report_annual_raw (
document_link_key,
header,
part_one_charity,
part_two_earned_income,
part_three_assets,
part_four_a_ptr,
part_four_b_transactions,
part_five_gifts,
part_six_travel,
part_seven_liabilities,
part_eight_positions,
part_nine_agreements,
part_ten_compensation,
comments
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
);
'''
REPORT_ANNUALS_READ = '''
SELECT
R.report_annual_raw_key,
R.document_link_key,
R.header,
R.part_one_charity,
R.part_two_earned_income,
R.part_three_assets,
R.part_four_a_ptr,
R.part_four_b_transactions,
R.part_five_gifts,
R.part_six_travel,
R.part_seven_liabilities,
R.part_eight_positions,
R.part_nine_agreements,
R.part_ten_compensation,
R.comments
FROM report_annual_raw AS R
JOIN document_link AS D
ON D.document_link_key = R.document_link_key
JOIN document_type AS T
ON T.document_type_key = D.document_type_key
AND T.is_annual = 1
JOIN filer_type AS F
ON F.filer_type_key = D.filer_type_key
AND F.is_senator = 1
WHERE NOT EXISTS (
SELECT *
FROM report_annual AS A
WHERE A.report_annual_raw_key = R.report_annual_raw_key
);
'''
### Annual Report
REPORT_ANNUAL_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual (
report_annual_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
calendar_year INTEGER,
filer_name TEXT NOT NULL,
filed_date TEXT NOT NULL,
comment TEXT,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_CREATE = '''
INSERT INTO report_annual (
report_annual_raw_key,
calendar_year,
filer_name,
filed_date,
comment
) VALUES (
?, ?, ?, ?, ?
);
'''
### Annual Report Part One
REPORT_ANNUAL_CHARITY_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_charity (
report_annual_charity_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
event_date TEXT NOT NULL,
activity TEXT NOT NULL,
amount REAL NOT NULL,
paid_person TEXT NOT NULL,
paid_location TEXT NOT NULL,
payment_received_person TEXT NOT NULL,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_CHARITY_CREATE = '''
INSERT INTO report_annual_charity (
report_annual_raw_key,
event_id,
event_date,
activity,
amount,
paid_person,
paid_location,
payment_received_person
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Part Two
REPORT_ANNUAL_EARNED_INCOME_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_earned_income (
report_annual_earned_income_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
payment_received_person TEXT NOT NULL,
payment_type TEXT NOT NULL,
paid_person TEXT NOT NULL,
paid_location TEXT NOT NULL,
amount REAL NOT NULL,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_EARNED_INCOME_CREATE = '''
INSERT INTO report_annual_earned_income (
report_annual_raw_key,
event_id,
payment_received_person,
payment_type,
paid_person,
paid_location,
amount
) VALUES (
?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Part Three
REPORT_ANNUAL_ASSET_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_asset (
report_annual_asset_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id TEXT NOT NULL,
asset TEXT NOT NULL,
asset_type TEXT NOT NULL,
asset_subtype TEXT,
owner TEXT NOT NULL,
value TEXT NOT NULL,
income_type TEXT NOT NULL,
income TEXT NOT NULL,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_ASSET_CREATE = '''
INSERT INTO report_annual_asset (
report_annual_raw_key,
event_id,
asset,
asset_type,
asset_subtype,
owner,
value,
income_type,
income
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Four A
REPORT_ANNUAL_PTR_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_ptr (
report_annual_ptr_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
transaction_date TEXT NOT NULL,
owner TEXT NOT NULL,
ticker TEXT NOT NULL,
asset TEXT NOT NULL,
transaction_type TEXT NOT NULL,
amount TEXT NOT NULL,
comment TEXT,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_PTR_CREATE = '''
INSERT INTO report_annual_ptr (
report_annual_raw_key,
event_id,
transaction_date,
owner,
ticker,
asset,
transaction_type,
amount,
comment
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Four B
REPORT_ANNUAL_TRANSACTION_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_transaction (
report_annual_transaction_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
owner TEXT NOT NULL,
ticker TEXT NOT NULL,
asset TEXT NOT NULL,
transaction_type TEXT NOT NULL,
transaction_date TEXT NOT NULL,
amount TEXT NOT NULL,
comment TEXT,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_TRANSACTION_CREATE = '''
INSERT INTO report_annual_transaction (
report_annual_raw_key,
event_id,
owner,
ticker,
asset,
transaction_type,
transaction_date,
amount,
comment
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Five
REPORT_ANNUAL_GIFT_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_gift (
report_annual_gift_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
gift_date TEXT NOT NULL,
recipient TEXT NOT NULL,
gift TEXT NOT NULL,
value REAL NOT NULL,
from_person TEXT NOT NULL,
from_location TEXT NOT NULL,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_GIFT_CREATE = '''
INSERT INTO report_annual_gift (
report_annual_raw_key,
event_id,
gift_date,
recipient,
gift,
value,
from_person,
from_location
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Six
REPORT_ANNUAL_TRAVEL_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_travel (
report_annual_travel_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
travel_dates TEXT NOT NULL,
travelers TEXT NOT NULL,
travel_type TEXT NOT NULL,
itinerary TEXT NOT NULL,
reimbursed_for TEXT NOT NULL,
paid_person TEXT NOT NULL,
paid_location TEXT NOT NULL,
comment TEXT NOT NULL,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_TRAVEL_CREATE = '''
INSERT INTO report_annual_travel (
report_annual_raw_key,
event_id,
travel_dates,
travelers,
travel_type,
itinerary,
reimbursed_for,
paid_person,
paid_location,
comment
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Seven
REPORT_ANNUAL_LIABILITY_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_liability (
report_annual_liability_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
year_incurred INTEGER NOT NULL,
debtor TEXT NOT NULL,
liability_type TEXT NOT NULL,
points TEXT NOT NULL,
term_rate TEXT NOT NULL,
amount TEXT NOT NULL,
creditor_name TEXT NOT NULL,
creditor_location TEXT NOT NULL,
comments TEXT,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_LIABILITY_CREATE = '''
INSERT INTO report_annual_liability (
report_annual_raw_key,
event_id,
year_incurred,
debtor,
liability_type,
points,
term_rate,
amount,
creditor_name,
creditor_location,
comments
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Eight
REPORT_ANNUAL_POSITION_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_position (
report_annual_position_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
position_dates TEXT NOT NULL,
position TEXT NOT NULL,
entity_name TEXT NOT NULL,
entity_location TEXT NOT NULL,
entity_type TEXT NOT NULL,
comment TEXT,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_POSITION_CREATE = '''
INSERT INTO report_annual_position (
report_annual_raw_key,
event_id,
position_dates,
position,
entity_name,
entity_location,
entity_type,
comment
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Nine
REPORT_ANNUAL_AGREEMENT_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_agreement (
report_annual_agreement_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
agreement_date TEXT NOT NULL,
party_name TEXT NOT NULL,
party_location TEXT NOT NULL,
agreement_type TEXT NOT NULL,
status_and_terms TEXT NOT NULL,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_AGREEMENT_CREATE = '''
INSERT INTO report_annual_agreement (
report_annual_raw_key,
event_id,
agreement_date,
party_name,
party_location,
agreement_type,
status_and_terms
) VALUES (
?, ?, ?, ?, ?, ?, ?
);
'''
### Annual Report Ten
REPORT_ANNUAL_COMPENSATION_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_compensation (
report_annual_compensation_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
event_id INTEGER NOT NULL,
source_name TEXT NOT NULL,
source_location TEXT NOT NULL,
duties TEXT NOT NULL,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_COMPENSATION_CREATE = '''
INSERT INTO report_annual_compensation (
report_annual_raw_key,
event_id,
source_name,
source_location,
duties
) VALUES (
?, ?, ?, ?, ?
);
'''
### Annual Report Attachment
REPORT_ANNUAL_ATTACHMENT_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS report_annual_attachment (
report_annual_attachment_key INTEGER PRIMARY KEY,
report_annual_raw_key INTEGER NOT NULL,
link TEXT NOT NULL,
attachment_name TEXT NOT NULL,
attached_date TEXT NOT NULL,
FOREIGN KEY(report_annual_raw_key) REFERENCES report_annual_raw(report_annual_raw_key)
);
'''
REPORT_ANNUAL_ATTACHMENT_CREATE = '''
INSERT INTO report_annual_attachment (
report_annual_raw_key,
link,
attachment_name,
attached_date
) VALUES (
?, ?, ?, ?
);
'''
### Normalized Tables to reduce data duplication and enhance reporting
### Dollar Value
DOLLAR_VALUE_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS dollar_value (
dollar_value_key INTEGER PRIMARY KEY,
value_name TEXT NOT NULL,
value_minimum INTEGER,
value_maximum INTEGER
);
'''
DOLLAR_VALUE_DEFAULTS = [
{'value_name': 'UNKNOWN', 'value_minimum': 0, 'value_maximum': 0},
{'value_name': 'None (or less than $1,001)', 'value_minimum': 0, 'value_maximum': 1000},
{'value_name': '$1,001 - $15,000', 'value_minimum': 1001, 'value_maximum': 15000},
{'value_name': '$15,001 - $50,000', 'value_minimum': 15001, 'value_maximum': 50000},
{'value_name': '$50,001 - $100,000', 'value_minimum': 50001, 'value_maximum': 100000},
{'value_name': '$100,001 - $250,000', 'value_minimum': 100001, 'value_maximum': 250000},
{'value_name': '$250,001 - $500,000', 'value_minimum': 250001, 'value_maximum': 500000},
{'value_name': '$500,001 - $1,000,000', 'value_minimum': 500001, 'value_maximum': 1000000},
{'value_name': '$1,000,001 - $5,000,000', 'value_minimum': 1000001, 'value_maximum': 5000000},
{'value_name': '$5,000,001 - $25,000,000', 'value_minimum': 5000001, 'value_maximum': 25000000},
{'value_name': '$25,000,001 - $50,000,000', 'value_minimum': 25000001, 'value_maximum': 50000000},
]
DOLLAR_VALUE_POPULATE = '''
INSERT INTO dollar_value (
value_name,
value_minimum,
value_maximum
)
SELECT
:value_name, :value_minimum, :value_maximum
WHERE NOT EXISTS (
SELECT *
FROM dollar_value AS DV
WHERE DV.value_name = :value_name
AND DV.value_minimum = :value_minimum
AND DV.value_maximum = :value_maximum
);
'''
DOLLAR_VALUES_READ = '''
SELECT
DL.dollar_value_key,
DL.value_name,
DL.value_minimum,
DL.value_maximum
FROM dollar_value AS DL;
'''
### Asset Owner
ASSET_OWNER_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS asset_owner (
asset_owner_key INTEGER PRIMARY KEY,
owner_name TEXT NOT NULL,
is_self INTEGER NOT NULL,
is_spouse INTEGER NOT NULL,
is_joint INTEGER NOT NULL,
is_child INTEGER NOT NULL
);
'''
ASSET_OWNER_DEFAULTS = [
{'owner_name': 'UNKNOWN', 'is_self': 0, 'is_spouse': 0, 'is_joint': 0, 'is_child': 0},
{'owner_name': 'Self', 'is_self': 1, 'is_spouse': 0, 'is_joint': 0, 'is_child': 0},
{'owner_name': 'Spouse', 'is_self': 0, 'is_spouse': 1, 'is_joint': 0, 'is_child': 0},
{'owner_name': 'Joint', 'is_self': 0, 'is_spouse': 0, 'is_joint': 1, 'is_child': 0},
{'owner_name': 'Child', 'is_self': 0, 'is_spouse': 0, 'is_joint': 0, 'is_child': 1},
]
ASSET_OWNER_POPULATE = '''
INSERT INTO asset_owner (
owner_name,
is_self,
is_spouse,
is_joint,
is_child
)
SELECT
:owner_name, :is_self, :is_spouse, :is_joint, :is_child
WHERE NOT EXISTS (
SELECT *
FROM asset_owner AS AO
WHERE AO.owner_name = :owner_name
AND AO.is_self = :is_self
AND AO.is_spouse = :is_spouse
AND AO.is_joint = :is_joint
AND AO.is_child = :is_child
);
'''
ASSET_OWNERS_READ = '''
SELECT
AO.asset_owner_key,
AO.owner_name,
AO.is_self,
AO.is_spouse,
AO.is_joint,
AO.is_child
FROM asset_owner AS AO;
'''
### Asset Type
ASSET_TYPE_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS asset_type (
asset_type_key INTEGER PRIMARY KEY,
type_name TEXT NOT NULL
);
'''
ASSET_TYPE_DEFAULTS = [
{'type_name': 'UNKNOWN'},
{'type_name': 'Accounts Receivable'},
{'type_name': 'American Depository Receipt'},
{'type_name': 'Annuity'},
{'type_name': 'Bank Deposit'},
{'type_name': 'Brokerage/Managed Account'},
{'type_name': 'Business Entity'},
{'type_name': 'Common Trust Fund of a Bank'},
{'type_name': 'Corporate Securities'},
{'type_name': 'Deferred Compensation'},
{'type_name': 'Education Savings Plans'},
{'type_name': 'Equity Index-Linked Note'},
{'type_name': 'Farm'},
{'type_name': 'Government Securities'},
{'type_name': 'Intellectual Property'},
{'type_name': 'Investment Fund'},
{'type_name': 'Life Insurance'},
{'type_name': 'Mutual Funds'},
{'type_name': 'Other Securities'},
{'type_name': 'Personal Property'},
{'type_name': 'Real Estate'},
{'type_name': 'Retirement Plans'},
{'type_name': 'Trust'},
{'type_name': 'UGMA/UTMA'},
]
ASSET_TYPE_POPULATE = '''
INSERT INTO asset_type (
type_name
)
SELECT
:type_name
WHERE NOT EXISTS (
SELECT *
FROM asset_type AS AT
WHERE AT.type_name = :type_name
);
'''
ASSET_TYPES_READ = '''
SELECT
AT.asset_type_key,
AT.type_name
FROM asset_type AS AT;
'''
### Transaction Type
TRANSACTION_TYPE_TABLE_CREATE = '''
CREATE TABLE IF NOT EXISTS transaction_type (
transaction_type_key INTEGER PRIMARY KEY,
type_name TEXT NOT NULL,
is_sale INTEGER NOT NULL,
is_purchase INTEGER NOT NULL,
is_exchange INTEGER NOT NULL
);
'''
TRANSACTION_TYPE_DEFAULTS = [
{'type_name': 'UNKNOWN', 'is_sale': 0, 'is_purchase': 0, 'is_exchange': 0},
{'type_name': 'Sale', 'is_sale': 1, 'is_purchase': 0, 'is_exchange': 0},
{'type_name': 'Purchase', 'is_sale': 0, 'is_purchase': 1, 'is_exchange': 0},
{'type_name': 'Exchange', 'is_sale': 0, 'is_purchase': 0, 'is_exchange': 1},
]
TRANSACTION_TYPE_POPULATE = '''
INSERT INTO transaction_type (
type_name,
is_sale,
is_purchase,
is_exchange
)
SELECT
:type_name, :is_sale, :is_purchase, :is_exchange
WHERE NOT EXISTS (
SELECT *
FROM transaction_type AS TT
WHERE TT.type_name = :type_name
AND TT.is_sale = :is_sale
AND TT.is_purchase = :is_purchase
AND TT.is_exchange = :is_exchange
);
'''
TRANSACTION_TYPES_READ = '''
SELECT
TT.transaction_type_key,
TT.type_name,
TT.is_sale,
TT.is_purchase,
TT.is_exchange
FROM transaction_type AS TT;
'''
### Index and table creation
TABLE_INDEXES_CREATION = """
CREATE INDEX IF NOT EXISTS 'document_link_document_type_key' ON 'document_link'('document_type_key');
CREATE INDEX IF NOT EXISTS 'document_link_filer_type_key' ON 'document_link'('filer_type_key');
CREATE INDEX IF NOT EXISTS 'document_link_filer_key' ON 'document_link'('filer_key');
CREATE INDEX IF NOT EXISTS 'document_link_document_link_raw_key' ON 'document_link'('document_link_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_report_annual_raw_key' ON 'report_annual'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_raw_document_link_key' ON 'report_annual_raw'('document_link_key');
CREATE INDEX IF NOT EXISTS 'report_annual_agreement_report_annual_raw_key' ON 'report_annual_agreement'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_asset_report_annual_raw_key' ON 'report_annual_asset'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_attachment_report_annual_raw_key' ON 'report_annual_attachment'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_charity_report_annual_raw_key' ON 'report_annual_charity'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_compensation_report_annual_raw_key' ON 'report_annual_compensation'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_earned_income_report_annual_raw_key' ON 'report_annual_earned_income'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_gift_report_annual_raw_key' ON 'report_annual_gift'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_liability_report_annual_raw_key' ON 'report_annual_liability'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_position_report_annual_raw_key' ON 'report_annual_position'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_ptr_report_annual_raw_key' ON 'report_annual_ptr'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_transaction_report_annual_raw_key' ON 'report_annual_transaction'('report_annual_raw_key');
CREATE INDEX IF NOT EXISTS 'report_annual_travel_report_annual_raw_key' ON 'report_annual_travel'('report_annual_raw_key');
"""
TABLES_PARSED_TRUNCATE = """
DROP TABLE report_annual;
DROP TABLE report_annual_agreement;
DROP TABLE report_annual_asset;
DROP TABLE report_annual_attachment;
DROP TABLE report_annual_charity;
DROP TABLE report_annual_compensation;
DROP TABLE report_annual_earned_income;
DROP TABLE report_annual_gift;
DROP TABLE report_annual_liability;
DROP TABLE report_annual_position;
DROP TABLE report_annual_ptr;
DROP TABLE report_annual_transaction;
DROP TABLE report_annual_travel;
"""
TABLES_CREATION = [
REPORT_ANNUAL_TABLE_CREATE,
REPORT_ANNUAL_RAW_TABLE_CREATE,
REPORT_ANNUAL_CHARITY_TABLE_CREATE,
REPORT_ANNUAL_EARNED_INCOME_TABLE_CREATE,
REPORT_ANNUAL_ASSET_TABLE_CREATE,
REPORT_ANNUAL_PTR_TABLE_CREATE,
REPORT_ANNUAL_TRANSACTION_TABLE_CREATE,
REPORT_ANNUAL_GIFT_TABLE_CREATE,
REPORT_ANNUAL_TRAVEL_TABLE_CREATE,
REPORT_ANNUAL_LIABILITY_TABLE_CREATE,
REPORT_ANNUAL_POSITION_TABLE_CREATE,
REPORT_ANNUAL_AGREEMENT_TABLE_CREATE,
REPORT_ANNUAL_COMPENSATION_TABLE_CREATE,
REPORT_ANNUAL_ATTACHMENT_TABLE_CREATE,
DOCUMENT_LINK_TABLE_CREATE,
DOCUMENT_TYPE_TABLE_CREATE,
FILER_TABLE_CREATE,
FILER_TYPE_TABLE_CREATE,
DOCUMENT_LINK_RAW_TABLE_CREATE
]
TABLE_NAMES = [
'document_link',
'report_annual_compensation',
'document_link_raw',
'report_annual_earned_income',
'document_type',
'report_annual_gift',
'filer',
'report_annual_liability',
'filer_type',
'report_annual_position',
'report_annual',
'report_annual_ptr',
'report_annual_agreement',
'report_annual_raw',
'report_annual_asset',
'report_annual_transaction',
'report_annual_attachment',
'report_annual_travel',
'report_annual_charity',
]
|
from collections import OrderedDict
import Pyro4
from pocs.camera import create_cameras_from_config as create_local_cameras
from pocs.utils import error
from pocs.utils import logger as logger_module
from huntsman.pocs.camera.pyro import Camera as PyroCamera
from huntsman.pocs.utils import load_config
def list_distributed_cameras(ns_host=None, logger=None):
"""Detect distributed cameras.
Looks for a Pyro name server and queries it for the list of registered cameras.
Args:
host (str, optional): hostname or IP address of the name server host. If not given
will attempt to locate the name server via UDP network broadcast.
logger (logging.Logger, optional): logger to use for messages, if not given will
use the root logger.
Returns:
dict: Dictionary of detected distributed camera name, URI pairs
"""
if not logger:
logger = logger_module.get_root_logger()
try:
# Get a proxy for the name server (will raise NamingError if not found)
with Pyro4.locateNS(host=ns_host) as name_server:
# Find all the registered POCS cameras
camera_uris = name_server.list(metadata_all={'POCS', 'Camera'})
camera_uris = OrderedDict(sorted(camera_uris.items(), key=lambda t: t[0]))
n_cameras = len(camera_uris)
if n_cameras > 0:
msg = "Found {} distributed cameras on name server".format(n_cameras)
logger.debug(msg)
else:
msg = "Found name server but no distributed cameras"
logger.warning(msg)
except Pyro4.errors.NamingError as err:
msg = "Couldn't connect to Pyro name server: {}".format(err)
logger.warning(msg)
camera_uris = OrderedDict()
return camera_uris
def create_cameras_from_config(config=None, logger=None, **kwargs):
"""Create camera object(s) based on the config.
Creates a camera for each camera item listed in the config. Ensures the
appropriate camera module is loaded.
Args:
**kwargs (dict): Can pass a `cameras` object that overrides the info in
the configuration file. Can also pass `auto_detect`(bool) to try and
automatically discover the ports.
Returns:
OrderedDict: An ordered dictionary of created camera objects, with the
camera name as key and camera instance as value. Returns an empty
OrderedDict if there is no camera configuration items.
Raises:
error.CameraNotFound: Raised if camera cannot be found at specified port or if
auto_detect=True and no cameras are found.
error.PanError: Description
"""
if not logger:
logger = logger_module.get_root_logger()
if not config:
config = load_config(**kwargs)
# Helper method to first check kwargs then config
def kwargs_or_config(item, default=None):
return kwargs.get(item, config.get(item, default))
a_simulator = 'camera' in kwargs_or_config('simulator', default=list())
logger.debug("Camera simulator: {}".format(a_simulator))
camera_info = kwargs_or_config('cameras', default=dict())
if not camera_info and not a_simulator:
logger.info('No camera information in config.')
return OrderedDict()
distributed_cameras = kwargs.get('distributed_cameras',
camera_info.get('distributed_cameras', False))
try:
cameras = create_local_cameras(config=config, logger=logger, **kwargs)
except (error.PanError, KeyError, error.CameraNotFound):
logger.debug("No local cameras")
cameras = OrderedDict()
if not a_simulator and distributed_cameras:
logger.debug("Creating distributed cameras")
cameras.update(create_distributed_cameras(camera_info, logger=logger))
if len(cameras) == 0:
raise error.CameraNotFound(
msg="No cameras available. Exiting.", exit=True)
# Find primary camera
primary_camera = None
for camera in cameras.values():
if camera.is_primary:
primary_camera = camera
# If no camera was specified as primary use the first
if primary_camera is None:
camera_names = sorted(cameras.keys())
primary_camera = cameras[camera_names[0]]
primary_camera.is_primary = True
logger.debug("Primary camera: {}", primary_camera)
logger.debug("{} cameras created", len(cameras))
return cameras
def create_distributed_cameras(camera_info, logger=None):
"""Create distributed camera object(s) based on detected cameras and config
Creates a `pocs.camera.pyro.Camera` object for each distributed camera detected.
Args:
camera_info: 'cameras' section from POCS config
logger (logging.Logger, optional): logger to use for messages, if not given will
use the root logger.
Returns:
OrderedDict: An ordered dictionary of created camera objects, with the
camera name as key and camera instance as value. Returns an empty
OrderedDict if no distributed cameras are found.
"""
if not logger:
logger = logger_module.get_root_logger()
# Get all distributed cameras
camera_uris = list_distributed_cameras(ns_host=camera_info.get('name_server_host', None),
logger=logger)
# Create the camera objects.
# TODO: do this in parallel because initialising cameras can take a while.
cameras = OrderedDict()
primary_id = camera_info.get('primary', '')
for cam_name, cam_uri in camera_uris.items():
logger.debug('Creating camera: {}'.format(cam_name))
cam = PyroCamera(port=cam_name, uri=cam_uri)
if primary_id == cam.uid or primary_id == cam.name:
cam.is_primary = True
logger.debug(f"Camera created: {cam}")
cameras[cam_name] = cam
return cameras
|
<reponame>buddwm/hubble
# -*- coding: utf-8 -*-
'''
Module for returning various status data about a minion.
These data can be useful for compiling into stats later.
'''
# Import python libs
import datetime
import logging
import os
import re
import time
import hubblestack.utils.files
import hubblestack.utils.path
import hubblestack.utils.platform
from hubblestack.exceptions import CommandExecutionError
log = logging.getLogger(__file__)
__virtualname__ = 'status'
__opts__ = {}
# Don't shadow built-in's.
__func_alias__ = {
'time_': 'time'
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Not all functions supported by Windows
'''
if hubblestack.utils.platform.is_windows():
return False, 'Windows platform is not supported by this module'
return __virtualname__
def pid(sig):
'''
Return the PID or an empty string if the process is running or not.
Pass a signature to use to find the process via ps. Note you can pass
a Python-compatible regular expression to return all pids of
processes matching the regexp.
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.pid <sig>
'''
cmd = __grains__['ps']
output = __mods__['cmd.run_stdout'](cmd, python_shell=True)
pids = ''
for line in output.splitlines():
if 'status.pid' in line:
continue
if re.search(sig, line):
if pids:
pids += '\n'
pids += line.split()[1]
return pids
def uptime():
'''
Return the uptime for this system.
.. versionchanged:: 2015.8.9
The uptime function was changed to return a dictionary of easy-to-read
key/value pairs containing uptime information, instead of the output
from a ``cmd.run`` call.
.. versionchanged:: 2016.11.0
Support for OpenBSD, FreeBSD, NetBSD, MacOS, and Solaris
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.uptime
'''
curr_seconds = time.time()
# Get uptime in seconds
if hubblestack.utils.platform.is_linux():
ut_path = "/proc/uptime"
if not os.path.exists(ut_path):
raise CommandExecutionError("File {ut_path} was not found.".format(ut_path=ut_path))
with hubblestack.utils.files.fopen(ut_path) as rfh:
seconds = int(float(rfh.read().split()[0]))
elif hubblestack.utils.platform.is_sunos():
# note: some flavors/versions report the host uptime inside a zone
# https://support.oracle.com/epmos/faces/BugDisplay?id=15611584
res = __mods__['cmd.run_all']('kstat -p unix:0:system_misc:boot_time')
if res['retcode'] > 0:
raise CommandExecutionError('The boot_time kstat was not found.')
seconds = int(curr_seconds - int(res['stdout'].split()[-1]))
elif hubblestack.utils.platform.is_openbsd() or hubblestack.utils.platform.is_netbsd():
bt_data = __mods__['sysctl.get']('kern.boottime')
if not bt_data:
raise CommandExecutionError('Cannot find kern.boottime system parameter')
seconds = int(curr_seconds - int(bt_data))
elif hubblestack.utils.platform.is_freebsd() or hubblestack.utils.platform.is_darwin():
# format: { sec = 1477761334, usec = 664698 } Sat Oct 29 17:15:34 2016
bt_data = __mods__['sysctl.get']('kern.boottime')
if not bt_data:
raise CommandExecutionError('Cannot find kern.boottime system parameter')
data = bt_data.split("{")[-1].split("}")[0].strip().replace(' ', '')
uptime = dict([(k, int(v, )) for k, v in [p.strip().split('=') for p in data.split(',')]])
seconds = int(curr_seconds - uptime['sec'])
elif hubblestack.utils.platform.is_aix():
seconds = _get_boot_time_aix()
else:
return __mods__['cmd.run']('uptime')
# Setup datetime and timedelta objects
boot_time = datetime.datetime.utcfromtimestamp(curr_seconds - seconds)
curr_time = datetime.datetime.utcfromtimestamp(curr_seconds)
up_time = curr_time - boot_time
# Construct return information
ut_ret = {
'seconds': seconds,
'since_iso': boot_time.isoformat(),
'since_t': int(curr_seconds - seconds),
'days': up_time.days,
'time': '{0}:{1}'.format(up_time.seconds // 3600, up_time.seconds % 3600 // 60),
}
if hubblestack.utils.path.which('who'):
who_cmd = 'who' if hubblestack.utils.platform.is_openbsd() else 'who -s' # OpenBSD does not support -s
ut_ret['users'] = len(__mods__['cmd.run'](who_cmd).split(os.linesep))
return ut_ret
def _get_boot_time_aix():
'''
Return the number of seconds since boot time on AIX
t=$(LC_ALL=POSIX ps -o etime= -p 1)
d=0 h=0
case $t in *-*) d=${t%%-*}; t=${t#*-};; esac
case $t in *:*:*) h=${t%%:*}; t=${t#*:};; esac
s=$((d*86400 + h*3600 + ${t%%:*}*60 + ${t#*:}))
t is 7-20:46:46
'''
boot_secs = 0
res = __mods__['cmd.run_all']('ps -o etime= -p 1')
if res['retcode'] > 0:
raise CommandExecutionError('Unable to find boot_time for pid 1.')
bt_time = res['stdout']
days = bt_time.split('-')
hms = days[1].split(':')
boot_secs = _number(days[0]) * 86400 + _number(hms[0]) * 3600 + _number(hms[1]) * 60 + _number(hms[2])
return boot_secs
def _number(text):
'''
Convert a string to a number.
Returns an integer if the string represents an integer, a floating
point number if the string is a real number, or the string unchanged
otherwise.
'''
if text.isdigit():
return int(text)
try:
return float(text)
except ValueError:
return text
def time_(format='%A, %d. %B %Y %I:%M%p'):
'''
.. versionadded:: 2016.3.0
Return the current time on the minion,
formatted based on the format parameter.
Default date format: Monday, 27. July 2015 07:55AM
CLI Example:
.. code-block:: bash
salt '*' status.time
salt '*' status.time '%s'
'''
dt = datetime.datetime.today()
return dt.strftime(format)
|
import os
import pytest
from ci_output_parser.log_file_parsers.log_file_parser import LogFileParser
@pytest.fixture
def mock_file_write_functions(mocker):
mocker.patch.object(LogFileParser, 'output_lint_lines_to_file', return_value=True)
mocker.patch.object(LogFileParser, 'output_lint_lines_to_json_file', return_value=True)
@pytest.fixture
def dummy_data_directory_path():
test_directory = os.path.dirname(os.path.abspath(__file__))
dummy_data_directory_relative_path = "dummy_data"
dummy_data_directory_path = os.path.join(test_directory, dummy_data_directory_relative_path)
return dummy_data_directory_path
@pytest.fixture
def invalid_file_path(dummy_data_directory_path):
docker_dummy_data_directory = os.path.join(dummy_data_directory_path, "docker")
relative_path = "bb_docker_invalid_dummy_log_text.txt"
return os.path.join(docker_dummy_data_directory, relative_path)
@pytest.fixture
def empty_file_path(dummy_data_directory_path):
docker_dummy_data_directory = os.path.join(dummy_data_directory_path, "docker")
relative_path = "bb_docker_empty_log_text.txt"
return os.path.join(docker_dummy_data_directory, relative_path)
@pytest.fixture
def docker_valid_file_path(dummy_data_directory_path):
docker_dummy_data_directory = os.path.join(dummy_data_directory_path, "docker")
docker_valid_path = os.path.join(
docker_dummy_data_directory, "bb_docker_dummy_log_text.txt")
docker_clean_valid_path = os.path.join(
docker_dummy_data_directory, "docker_clean_log_text.txt")
docker_valid_file_path = {
"docker_valid_path": docker_valid_path,
"docker_clean_valid_path": docker_clean_valid_path
}
return docker_valid_file_path
@pytest.fixture
def cmake_valid_file_path(dummy_data_directory_path):
docker_dummy_data_directory = os.path.join(dummy_data_directory_path, "docker")
docker_cmake_valid_path = os.path.join(
docker_dummy_data_directory, "docker_cmake_source_error_log_text.txt")
cmake_valid_file_path = {
"docker_cmake_valid_path": docker_cmake_valid_path
}
return cmake_valid_file_path
@pytest.fixture
def android_valid_file_path(dummy_data_directory_path):
android_dummy_data_directory = os.path.join(dummy_data_directory_path, "android")
android_pass_lint_relative_path = "android_dummy_pass_log_text.txt"
android_pass_lint_valid_path = os.path.join(
android_dummy_data_directory, android_pass_lint_relative_path)
android_fail_lint_relative_path = "android_dummy_fail_log_text.txt"
android_fail_lint_valid_path = os.path.join(
android_dummy_data_directory, android_fail_lint_relative_path)
android_checkstyle_lint_relative_path = "android_dummy_checkstyle_log_text.txt"
android_checkstyle_lint_valid_path = os.path.join(
android_dummy_data_directory, android_checkstyle_lint_relative_path)
android_checkstyle_clean_lint_relative_path = "android_dummy_checkstyle_clean_log_text.txt"
android_checkstyle_clean_lint_valid_path = os.path.join(
android_dummy_data_directory, android_checkstyle_clean_lint_relative_path)
android_clean_lint_relative_path = "android_dummy_clean_log_text.txt"
android_clean_lint_valid_path = os.path.join(
android_dummy_data_directory, android_clean_lint_relative_path)
android_app_lint_fail_lint_relative_path = "android_dummy_app_lint_fail_log_text.txt"
android_app_lint_fail_lint_valid_path = os.path.join(
android_dummy_data_directory, android_app_lint_fail_lint_relative_path)
android_build_lint_relative_path = "android_dummy_build_log_text.txt"
android_build_lint_valid_path = os.path.join(
android_dummy_data_directory, android_build_lint_relative_path)
android_clean_build_lint_relative_path = "android_dummy_clean_build_log_text.txt"
android_clean_build_lint_valid_path = os.path.join(
android_dummy_data_directory, android_clean_build_lint_relative_path)
android_warning_build_lint_relative_path = "android_dummy_warning_build_log_text.txt"
android_warning_build_lint_valid_path = os.path.join(
android_dummy_data_directory, android_warning_build_lint_relative_path)
android_javac_lint_relative_path = "android_dummy_javac_log_text.txt"
android_javac_lint_valid_path = os.path.join(
android_dummy_data_directory, android_javac_lint_relative_path)
android_valid_file_path = {
"android_pass_lint_valid_path": android_pass_lint_valid_path,
"android_fail_lint_valid_path": android_fail_lint_valid_path,
"android_clean_lint_valid_path": android_clean_lint_valid_path,
"android_checkstyle_lint_valid_path": android_checkstyle_lint_valid_path,
"android_checkstyle_clean_lint_valid_path": android_checkstyle_clean_lint_valid_path,
"android_app_lint_fail_lint_valid_path": android_app_lint_fail_lint_valid_path,
"android_build_lint_valid_path": android_build_lint_valid_path,
"android_clean_build_lint_valid_path": android_clean_build_lint_valid_path,
"android_warning_build_lint_valid_path": android_warning_build_lint_valid_path,
"android_javac_lint_valid_path": android_javac_lint_valid_path
}
return android_valid_file_path
@pytest.fixture
def pre_commit_valid_file_path(dummy_data_directory_path):
pre_commit_dummy_data_directory = os.path.join(dummy_data_directory_path, "pre_commit")
pre_commit_lint_relative_path = "pre_commit_dummy_log_text.txt"
pre_commit_lint_valid_path = os.path.join(
pre_commit_dummy_data_directory, pre_commit_lint_relative_path)
pre_commit_lint_clean_relative_path = "pre_commit_dummy_clean_log_text.txt"
pre_commit_lint_clean_valid_path = os.path.join(
pre_commit_dummy_data_directory, pre_commit_lint_clean_relative_path)
pre_commit_valid_file_path = {
"pre_commit_lint_valid_path": pre_commit_lint_valid_path,
"pre_commit_lint_clean_valid_path": pre_commit_lint_clean_valid_path
}
return pre_commit_valid_file_path
@pytest.fixture
def vale_valid_file_path(dummy_data_directory_path):
vale_dummy_data_directory = os.path.join(dummy_data_directory_path, "vale")
vale_lint_clean_relative_path = "vale_dummy_clean_log_text.txt"
vale_lint_clean_valid_path = os.path.join(
vale_dummy_data_directory, vale_lint_clean_relative_path)
vale_lint_relative_path = "vale_dummy_log_text.txt"
vale_lint_valid_path = os.path.join(
vale_dummy_data_directory, vale_lint_relative_path)
vale_valid_file_path = {
"vale_lint_clean_valid_path": vale_lint_clean_valid_path,
"vale_lint_valid_path": vale_lint_valid_path
}
return vale_valid_file_path
@pytest.fixture
def junit_valid_file_path(dummy_data_directory_path):
junit_dummy_data_directory = os.path.join(dummy_data_directory_path, "junit")
junit_lint_clean_relative_path = "junit_dummy_clean_log_text.txt"
junit_lint_clean_valid_path = os.path.join(
junit_dummy_data_directory, junit_lint_clean_relative_path)
junit_lint_relative_path = "junit_dummy_log_text.txt"
junit_lint_valid_path = os.path.join(
junit_dummy_data_directory, junit_lint_relative_path)
junit_valid_file_path = {
"junit_lint_clean_valid_path": junit_lint_clean_valid_path,
"junit_lint_valid_path": junit_lint_valid_path
}
return junit_valid_file_path
@pytest.fixture
def gtest_valid_file_path(dummy_data_directory_path):
gtest_dummy_data_directory = os.path.join(dummy_data_directory_path, "gtest")
gtest_lint_clean_relative_path = "gtest_dummy_clean_log_text.txt"
gtest_lint_clean_valid_path = os.path.join(
gtest_dummy_data_directory, gtest_lint_clean_relative_path)
gtest_lint_relative_path = "gtest_dummy_log_text.txt"
gtest_lint_valid_path = os.path.join(
gtest_dummy_data_directory, gtest_lint_relative_path)
gtest_valid_file_path = {
"gtest_lint_clean_valid_path": gtest_lint_clean_valid_path,
"gtest_lint_valid_path": gtest_lint_valid_path
}
return gtest_valid_file_path
@pytest.fixture
def firebase_valid_file_path(dummy_data_directory_path):
firebase_dummy_data_directory = os.path.join(dummy_data_directory_path, "firebase")
firebase_lint_clean_relative_path = "firebase_dummy_clean_log_text.txt"
firebase_lint_clean_valid_path = os.path.join(
firebase_dummy_data_directory, firebase_lint_clean_relative_path)
firebase_lint_relative_path = "firebase_dummy_log_text.txt"
firebase_lint_valid_path = os.path.join(
firebase_dummy_data_directory, firebase_lint_relative_path)
firebase_lint_test_relative_path = "firebase_dummy_test_log_text.txt"
firebase_test_lint_valid_path = os.path.join(
firebase_dummy_data_directory, firebase_lint_test_relative_path)
firebase_lint_test_clean_relative_path = "firebase_dummy_test_clean_log_text.txt"
firebase_lint_test_clean_valid_path = os.path.join(
firebase_dummy_data_directory, firebase_lint_test_clean_relative_path)
firebase_lint_single_test_relative_path = "firebase_dummy_single_test_log_text.txt"
firebase_single_test_lint_valid_path = os.path.join(
firebase_dummy_data_directory, firebase_lint_single_test_relative_path)
firebase_lint_test_crash_relative_path = "firebase_dummy_test_crash_log_text.txt"
firebase_test_crash_lint_valid_path = os.path.join(
firebase_dummy_data_directory, firebase_lint_test_crash_relative_path)
firebase_lint_test_crash_second_relative_path = "firebase_dummy_test_crash_second_log_text.txt"
firebase_test_crash_second_lint_valid_path = os.path.join(
firebase_dummy_data_directory, firebase_lint_test_crash_second_relative_path)
firebase_lint_test_timeout_relative_path = "firebase_dummy_test_timeout_log_text.txt"
firebase_test_timeout_lint_valid_path = os.path.join(
firebase_dummy_data_directory, firebase_lint_test_timeout_relative_path)
firebase_valid_file_path = {
"firebase_lint_clean_valid_path": firebase_lint_clean_valid_path,
"firebase_lint_valid_path": firebase_lint_valid_path,
"firebase_test_lint_valid_path": firebase_test_lint_valid_path,
"firebase_lint_test_clean_valid_path": firebase_lint_test_clean_valid_path,
"firebase_single_test_lint_valid_path": firebase_single_test_lint_valid_path,
"firebase_test_crash_lint_valid_path": firebase_test_crash_lint_valid_path,
"firebase_test_crash_second_lint_valid_path": firebase_test_crash_second_lint_valid_path,
"firebase_test_timeout_lint_valid_path": firebase_test_timeout_lint_valid_path
}
return firebase_valid_file_path
|
# imoot matplotlib and pandas toolkit
# Python SQL toolkit and Object Relational Mapper
# import flask
from matplotlib import style
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# create engine
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# Create an app, being sure to pass __name__
app = Flask(__name__)
#create available routes
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end"
)
# create precipitation route
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all percipitarion values"""
# Query all precipitations
results = session.query(Measurement.date, Measurement.prcp).all()
# close session
session.close()
#create empty dictionary and run loop to place values in dict
all_precipitations = {}
for date, prcp in results:
all_precipitations[date] = prcp
#return json for all precipitations
return jsonify(all_precipitations)
#create stations route
@app.route("/api/v1.0/stations")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
#query all stations id's
stations = session.query(Station.station).all()
# close session
session.close()
# return json for all station id's
return jsonify(stations)
# create route for temps
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
#find previous year
one_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
#query temps
stations = session.query(Measurement.tobs)
# filter for most active station, filter by date of previous year and jigher, and order by dates
main_stations = stations.filter(Measurement.station == 'USC00519281').filter(Measurement.date >= one_year).order_by(Measurement.date).all()
#close session
session.close()
# return json for temps of previous year
return jsonify(main_stations)
# create a starting route
@app.route("/api/v1.0/<start>")
def calc_temps(start):
# Create our session (link) from Python to the DB
session = Session(engine)
# query min temp, max temp, avg temp and create start point
start_date=session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).all()
# close session
session.close()
#return json for start date
return jsonify(start_date)
# create a ending route
@app.route("/api/v1.0/<start>/<end>")
def calc_temps2(start, end):
# Create our session (link) from Python to the DB
session = Session(engine)
# query min temp, max temp, avg temp and create start point and end point
end_date = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).all()
# close session
session.close()
# return json for end date
return jsonify(end_date)
# run app
if __name__ == '__main__':
app.run(debug=True)
|
"""
This package implements pytorch functions for Fourier-based convolutions.
While this may not be relevant for GPU-implementations, convolutions in the spatial domain are slow on CPUs. Hence, this function should be useful for memory-intensive models that need to be run on the CPU or CPU-based computations involving convolutions in general.
.. todo::
Create a CUDA version of these convolutions functions. There is already a CUDA based FFT implementation available which could be built upon. Alternatively, spatial smoothing may be sufficiently fast on the GPU.
"""
from __future__ import print_function
from __future__ import absolute_import
# TODO
from builtins import range
from builtins import object
import torch
from torch.autograd import Function
import numpy as np
from torch.autograd import gradcheck
from .data_wrapper import USE_CUDA, FFTVal,AdaptVal, MyTensor
# if USE_CUDA:
# import pytorch_fft.fft as fft
from . import utils
def _symmetrize_filter_center_at_zero_1D(filter):
sz = filter.shape
if sz[0] % 2 == 0:
# symmetrize if it is even
filter[1:sz[0] // 2] = filter[sz[0]:sz[0] // 2:-1]
else:
# symmetrize if it is odd
filter[1:sz[0] // 2 + 1] = filter[sz[0]:sz[0] // 2:-1]
def _symmetrize_filter_center_at_zero_2D(filter):
sz = filter.shape
if sz[0] % 2 == 0:
# symmetrize if it is even
filter[1:sz[0] // 2,:] = filter[sz[0]:sz[0] // 2:-1,:]
else:
# symmetrize if it is odd
filter[1:sz[0] // 2 + 1,:] = filter[sz[0]:sz[0] // 2:-1,:]
if sz[1] % 2 == 0:
# symmetrize if it is even
filter[:,1:sz[1] // 2] = filter[:,sz[1]:sz[1] // 2:-1]
else:
# symmetrize if it is odd
filter[:,1:sz[1] // 2 + 1] = filter[:,sz[1]:sz[1] // 2:-1]
def _symmetrize_filter_center_at_zero_3D(filter):
sz = filter.shape
if sz[0] % 2 == 0:
# symmetrize if it is even
filter[1:sz[0] // 2,:,:] = filter[sz[0]:sz[0] // 2:-1,:,:]
else:
# symmetrize if it is odd
filter[1:sz[0] // 2 + 1,:,:] = filter[sz[0]:sz[0] // 2:-1,:,:]
if sz[1] % 2 == 0:
# symmetrize if it is even
filter[:,1:sz[1] // 2,:] = filter[:,sz[1]:sz[1] // 2:-1,:]
else:
# symmetrize if it is odd
filter[:,1:sz[1] // 2 + 1,:] = filter[:,sz[1]:sz[1] // 2:-1,:]
if sz[2] % 2 == 0:
# symmetrize if it is even
filter[:,:,1:sz[2] // 2] = filter[:,:,sz[2]:sz[2] // 2:-1]
else:
# symmetrize if it is odd
filter[:,:,1:sz[2] // 2 + 1] = filter[:,:,sz[2]:sz[2] // 2:-1]
def symmetrize_filter_center_at_zero(filter,renormalize=False):
"""
Symmetrizes filter. The assumption is that the filter is already in the format for input to an FFT.
I.e., that it has been transformed so that the center of the pixel is at zero.
:param filter: Input filter (in spatial domain). Will be symmetrized (i.e., will change its value)
:param renormalize: (bool) if true will normalize so that the sum is one
:return: n/a (returns via call by reference)
"""
sz = filter.shape
dim = len(sz)
if dim==1:
_symmetrize_filter_center_at_zero_1D(filter)
elif dim==2:
_symmetrize_filter_center_at_zero_2D(filter)
elif dim==3:
_symmetrize_filter_center_at_zero_3D(filter)
else:
raise ValueError('Only implemented for dimensions 1,2, and 3 so far')
if renormalize:
filter = filter / filter.sum()
def are_indices_close(loc):
"""
This function takes a set of indices (as produced by np.where) and determines
if they are roughly closeby. If not it returns *False* otherwise *True*.
:param loc: Index locations as outputted by np.where
:return: Returns if the indices are roughly closeby or not
.. todo::
There should be a better check for closeness of points. The implemented one is very crude.
"""
# TODO: potentially do a better check here, this one is very crude
for cloc in loc:
cMaxDist = (abs(cloc - cloc.max())).max()
if cMaxDist > 2:
return False
return True
def create_complex_fourier_filter(spatial_filter, sz, enforceMaxSymmetry=True, maxIndex=None, renormalize=False):
"""
Creates a filter in the Fourier domain given a spatial array defining the filter
:param spatial_filter: Array defining the filter.
:param sz: Desired size of the filter in the Fourier domain.
:param enforceMaxSymmetry: If set to *True* (default) forces the filter to be real and hence forces the filter
in the spatial domain to be symmetric
:param maxIndex: specifies the index of the maximum which will be used to enforceMaxSymmetry. If it is not
defined, the maximum is simply computed
:param renormalize: (bool) if true, the filter is renormalized to sum to one (useful for Gaussians for example)
:return: Returns the complex coefficients for the filter in the Fourier domain and the maxIndex
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
sz = np.array(sz)
if enforceMaxSymmetry:
if maxIndex is None:
maxIndex = np.unravel_index(np.argmax(spatial_filter), spatial_filter.shape)
maxValue = spatial_filter[maxIndex]
loc = np.where(spatial_filter == maxValue)
nrOfMaxValues = len(loc[0])
if nrOfMaxValues > 1:
# now need to check if they are close to each other
if not are_indices_close(loc):
raise ValueError('Cannot enforce max symmetry as maximum is not unique')
spatial_filter_max_at_zero = np.roll(spatial_filter, -np.array(maxIndex),
list(range(len(spatial_filter.shape))))
symmetrize_filter_center_at_zero(spatial_filter_max_at_zero,renormalize=renormalize)
# we assume this is symmetric and hence take the absolute value
# as the FT of a symmetric kernel has to be real
if USE_CUDA:
f_filter = create_cuda_filter(spatial_filter_max_at_zero, sz)
ret_filter = f_filter[...,0] # only the real part
else:
f_filter = create_numpy_filter(spatial_filter_max_at_zero, sz)
ret_filter = f_filter.real
return ret_filter,maxIndex
else:
if USE_CUDA:
return create_cuda_filter(spatial_filter),maxIndex
else:
return create_numpy_filter(spatial_filter, sz),maxIndex
def create_cuda_filter(spatial_filter, sz):
"""
create cuda version filter, another one dimension is added to the output for computational convenient
besides the output will not be full complex result of shape (∗,2),
where ∗ is the shape of input, but instead the last dimension will be halfed as of size ⌊Nd/2⌋+1.
:param spatial_filter: N1 x...xNd, no batch dimension, no channel dimension
:param sz: [N1,..., Nd]
:return: filter, with size [1,N1,..Nd-1,⌊Nd/2⌋+1,2⌋
"""
fftn = torch.rfft
spatial_filter_th = torch.from_numpy(spatial_filter).float().cuda()
spatial_filter_th = spatial_filter_th[None, ...]
spatial_filter_th_fft = fftn(spatial_filter_th, len(sz))
return spatial_filter_th_fft
def create_numpy_filter(spatial_filter, sz):
return np.fft.fftn(spatial_filter, s=sz)
# todo: maybe check if we can use rfft's here for better performance
def sel_fftn(dim):
"""
sel the gpu and cpu version of the fft
:param dim:
:return: function pointer
"""
if USE_CUDA:
if dim in[1,2,3]:
f= torch.rfft
else:
print('Warning, fft more than 3d is supported but not tested')
return f
else:
if dim == 1:
f = np.fft.fft
elif dim == 2:
f = np.fft.fft2
elif dim == 3:
f = np.fft.fftn
else:
raise ValueError('Only 3D cpu ifft supported')
return f
def sel_ifftn(dim):
"""
select the cpu and gpu version of the ifft
:param dim:
:return: function pointer
"""
if USE_CUDA:
if dim in [1,2,3]:
f = torch.irfft
else:
print('Warning, fft more than 3d is supported but not tested')
else:
if dim == 1:
f = np.fft.ifft
elif dim == 2:
f = np.fft.ifft2
elif dim == 3:
f = np.fft.ifftn
else:
raise ValueError('Only 3D cpu ifft supported')
return f
class FourierConvolution(Function):
"""
pyTorch function to compute convolutions in the Fourier domain: f = g*h
"""
def __init__(self, complex_fourier_filter):
"""
Constructor for the Fouier-based convolution
:param complex_fourier_filter: Filter in the Fourier domain as created by *createComplexFourierFilter*
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierConvolution, self).__init__()
self.complex_fourier_filter = complex_fourier_filter
if USE_CUDA:
self.dim = complex_fourier_filter.dim() -1
else:
self.dim = len(complex_fourier_filter.shape)
self.fftn = sel_fftn(self.dim)
self.ifftn = sel_ifftn(self.dim)
"""The filter in the Fourier domain"""
def forward(self, input):
"""
Performs the Fourier-based filtering
the 3d cpu fft is not implemented in fftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because fft and fft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the rfft is used for efficiency, which means the filter should be symmetric
(input_real+input_img)(filter_real+filter_img) = (input_real*filter_real-input_img*filter_img) + (input_img*filter_real+input_real*filter_img)i
filter_img =0, then get input_real*filter_real + (input_img*filter_real)i ac + bci
:param input: Image
:return: Filtered-image
"""
if USE_CUDA:
input = FFTVal(input,ini=1)
f_input = self.fftn(input,self.dim,onesided=True)
f_filter_real = self.complex_fourier_filter[0]
f_filter_real=f_filter_real.expand_as(f_input[...,0])
f_filter_real = torch.stack((f_filter_real,f_filter_real),-1)
f_conv = f_input * f_filter_real
dim_input = len(input.shape)
dim_input_batch = dim_input-self.dim
conv_ouput_real = self.ifftn(f_conv, self.dim,onesided=True,signal_sizes=input.shape[dim_input_batch::])
result = conv_ouput_real
return FFTVal(result, ini=-1)
else:
if self.dim <3:
conv_output = self.ifftn(self.fftn(input.detach().cpu().numpy()) * self.complex_fourier_filter)
result = conv_output.real # should in principle be real
elif self.dim==3:
result = np.zeros(input.shape)
for batch in range(input.size()[0]):
for ch in range(input.size()[1]):
conv_output = self.ifftn(self.fftn(input[batch,ch].detach().cpu().numpy()) * self.complex_fourier_filter)
result[batch,ch] = conv_output.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(result)
# print( 'max(imag) = ' + str( (abs( conv_output.imag )).max() ) )
# print( 'max(real) = ' + str( (abs( conv_output.real )).max() ) )
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
the 3d cpu ifft is not implemented in ifftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because ifft and ifft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the irfft is used for efficiency, which means the filter should be symmetric
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# input_imag =0, then get ac + bci
if USE_CUDA:
grad_output = FFTVal(grad_output, ini=1)
#print grad_output.view(-1,1).sum()
f_go = self.fftn(grad_output,self.dim,onesided=True)
f_filter_real = self.complex_fourier_filter[0]
f_filter_real = f_filter_real.expand_as(f_go[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_go * f_filter_real
dim_input = len(grad_output.shape)
dim_input_batch = dim_input - self.dim
grad_input = self.ifftn(f_conv,self.dim,onesided=True,signal_sizes=grad_output.shape[dim_input_batch::])
# print(grad_input)
# print((grad_input[0,0,12:15]))
return FFTVal(grad_input, ini=-1)
else:
# if self.needs_input_grad[0]:
numpy_go = grad_output.detach().cpu().numpy()
# we use the conjugate because the assumption was that the spatial filter is real
# THe following two lines should be correct
if self.dim < 3:
grad_input_c = (self.ifftn(np.conjugate(self.complex_fourier_filter) * self.fftn(numpy_go)))
grad_input = grad_input_c.real
elif self.dim == 3:
grad_input = np.zeros(numpy_go.shape)
assert grad_output.dim() == 5 # to ensure the behavior correct, we avoid more than 3 dimension fftn method
for batch in range(grad_output.size()[0]):
for ch in range(grad_output.size()[1]):
grad_input_c = (self.ifftn(np.conjugate(self.complex_fourier_filter) *self.fftn(numpy_go[batch,ch])))
grad_input[batch,ch] = grad_input_c.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
# print(grad_input)
# print((grad_input[0,0,12:15]))
return torch.FloatTensor(grad_input)
# print( 'grad max(imag) = ' + str( (abs( grad_input_c.imag )).max() ) )
# print( 'grad max(real) = ' + str( (abs( grad_input_c.real )).max() ) )
class InverseFourierConvolution(Function):
"""
pyTorch function to compute convolutions in the Fourier domain: f = g*h
But uses the inverse of the smoothing filter
"""
def __init__(self, complex_fourier_filter):
"""
Constructor for the Fouier-based convolution (WARNING: EXPERIMENTAL)
:param complex_fourier_filter: Filter in the Fourier domain as created by *createComplexFourierFilter*
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(InverseFourierConvolution, self).__init__()
self.complex_fourier_filter = complex_fourier_filter
if USE_CUDA:
self.dim = complex_fourier_filter.dim() - 1
else:
self.dim = len(complex_fourier_filter.shape)
self.fftn = sel_fftn(self.dim)
self.ifftn = sel_ifftn(self.dim)
"""Fourier filter"""
self.alpha = 0.1
"""Regularizing weight"""
def set_alpha(self, alpha):
"""
Sets the regularizing weight
:param alpha: regularizing weight
"""
self.alpha = alpha
def get_alpha(self):
"""
Returns the regularizing weight
:return: regularizing weight
"""
return self.alpha
def forward(self, input):
"""
Performs the Fourier-based filtering
:param input: Image
:return: Filtered-image
"""
# do the filtering in the Fourier domain
# (a+bi)/(c) = (a/c) + (b/c)i
if USE_CUDA:
input = FFTVal(input, ini=1)
f_input = self.fftn(input,self.dim,onesided=True)
f_filter_real = self.complex_fourier_filter[0]
f_filter_real += self.alpha
f_filter_real = f_filter_real.expand_as(f_input[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_input/f_filter_real
dim_input = len(input.shape)
dim_input_batch = dim_input - self.dim
conv_ouput_real = self.ifftn(f_conv,self.dim,onesided=True,signal_sizes=input.shape[dim_input_batch::])
result = conv_ouput_real
return FFTVal(result, ini=-1)
else:
result = np.zeros(input.shape)
if self.dim <3:
conv_output = self.ifftn(self.fftn(input.detach().cpu().numpy()) / (self.alpha + self.complex_fourier_filter))
# result = abs(conv_output) # should in principle be real
result = conv_output.real
elif self.dim == 3:
result = np.zeros(input.shape)
for batch in range(input.size()[0]):
for ch in range(input.size()[1]):
conv_output = self.ifftn(
self.fftn(input[batch,ch].detach().cpu().numpy()) / (self.alpha + self.complex_fourier_filter))
result[batch, ch] = conv_output.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(result)
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# if self.needs_input_grad[0]:
if USE_CUDA:
grad_output =FFTVal(grad_output, ini=1)
f_go = self.fftn(grad_output, self.dim, onesided=True)
f_filter_real = self.complex_fourier_filter[0]
f_filter_real += self.alpha
f_filter_real = f_filter_real.expand_as(f_go[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_go / f_filter_real
dim_input = len(grad_output.shape)
dim_input_batch = dim_input - self.dim
grad_input = self.ifftn(f_conv, self.dim, onesided=True, signal_sizes=grad_output.shape[dim_input::])
return FFTVal(grad_input, ini=-1)
else:
# if self.needs_input_grad[0]:
numpy_go = grad_output.detach().cpu().numpy()
# we use the conjugate because the assumption was that the spatial filter is real
# THe following two lines should be correct
if self.dim<3:
grad_input_c = (self.ifftn(self.fftn(numpy_go) / (self.alpha + np.conjugate(self.complex_fourier_filter))))
grad_input = grad_input_c.real
elif self.dim == 3:
grad_input = np.zeros(numpy_go.shape)
for batch in range(grad_output.size()[0]):
for ch in range(grad_output.size()[1]):
grad_input_c = (
self.ifftn(self.fftn(numpy_go[batch,ch]) / (self.alpha + np.conjugate(self.complex_fourier_filter))))
grad_input[batch, ch] = grad_input_c.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(grad_input)
def fourier_convolution(input, complex_fourier_filter):
"""
Convenience function for Fourier-based convolutions. Make sure to use this one (instead of directly
using the class FourierConvolution). This will assure that each call generates its own instance
and hence autograd will work properly
:param input: Input image
:param complex_fourier_filter: Filter in Fourier domain as generated by *createComplexFourierFilter*
:return:
"""
# First braces create a Function object. Any arguments given here
# will be passed to __init__. Second braces will invoke the __call__
# operator, that will then use forward() to compute the result and
# return it.
return FourierConvolution(complex_fourier_filter)(input)
def inverse_fourier_convolution(input, complex_fourier_filter):
# just filtering with inverse filter
return InverseFourierConvolution(complex_fourier_filter)(input)
class GaussianFourierFilterGenerator(object):
def __init__(self, sz, spacing, nr_of_slots=1):
self.sz = sz
"""image size"""
self.spacing = spacing
"""image spacing"""
self.volumeElement = self.spacing.prod()
"""volume of pixel/voxel"""
self.dim = len(spacing)
"""dimension"""
self.nr_of_slots = nr_of_slots
"""number of slots to hold Gaussians (to be able to support multi-Gaussian); this is related to storage"""
"""typically should be set to the number of total desired Gaussians (so that none of them need to be recomputed)"""
self.mus = np.zeros(self.dim)
# TODO: storing the identity map may be a little wasteful
self.centered_id = utils.centered_identity_map(self.sz,self.spacing)
self.complex_gaussian_fourier_filters = [None] * self.nr_of_slots
self.max_indices = [None]*self.nr_of_slots
self.sigmas_complex_gaussian_fourier_filters = [None]*self.nr_of_slots
self.complex_gaussian_fourier_xsqr_filters = [None]*self.nr_of_slots
self.sigmas_complex_gaussian_fourier_xsqr_filters = [None]*self.nr_of_slots
self.sigmas_complex_gaussian_fourier_filters_np=[]
def get_number_of_slots(self):
return self.nr_of_slots
def get_number_of_currently_stored_gaussians(self):
nr_of_gaussians = 0
for s in self.sigmas_complex_gaussian_fourier_filters:
if s is not None:
nr_of_gaussians += 1
return nr_of_gaussians
def get_dimension(self):
return self.dim
def _compute_complex_gaussian_fourier_filter(self,sigma):
stds = sigma.detach().cpu().numpy() * np.ones(self.dim)
gaussian_spatial_filter = utils.compute_normalized_gaussian(self.centered_id, self.mus, stds)
complex_gaussian_fourier_filter,max_index = create_complex_fourier_filter(gaussian_spatial_filter,self.sz,True)
return complex_gaussian_fourier_filter,max_index
def _compute_complex_gaussian_fourier_xsqr_filter(self,sigma,max_index=None):
if max_index is None:
raise ValueError('A Gaussian filter needs to be generated / requested *before* any other filter')
# TODO: maybe compute this jointly with the gaussian filter itself to avoid computing the spatial filter twice
stds = sigma.detach().cpu().numpy() * np.ones(self.dim)
gaussian_spatial_filter = utils.compute_normalized_gaussian(self.centered_id, self.mus, stds)
gaussian_spatial_xsqr_filter = gaussian_spatial_filter*(self.centered_id**2).sum(axis=0)
complex_gaussian_fourier_xsqr_filter,max_index = create_complex_fourier_filter(gaussian_spatial_xsqr_filter,self.sz,True,max_index)
return complex_gaussian_fourier_xsqr_filter,max_index
def _find_closest_sigma_index(self, sigma, available_sigmas):
"""
For a given sigma, finds the closest one in a list of available sigmas
- If a sigma is already computed it finds its index
- If the sigma has not been computed (it finds the next empty slot (None)
- If no empty slots are available it replaces the closest
:param available_sigmas: a list of sigmas that have already been computed (or None if they have not)
:return: returns the index for the closest sigma among the available_sigmas
"""
closest_i = None
same_i = None
empty_slot_i = None
current_dist_sqr = None
for i,s in enumerate(available_sigmas):
if s is not None:
# keep track of the one with the closest distance
new_dist_sqr = (s-sigma)**2
if current_dist_sqr is None:
current_dist_sqr = new_dist_sqr
closest_i = i
else:
if new_dist_sqr<current_dist_sqr:
current_dist_sqr = new_dist_sqr
closest_i = i
# also check if this is the same
# if it is records the first occurrence
if torch.isclose(sigma,s):
if same_i is None:
same_i = i
else:
# found an empty slot, record it if it is the first one that was found
if empty_slot_i is None:
empty_slot_i = i
# if we found the same we return it
if same_i is not None:
# we found the same; i.e., already computed
return same_i
elif empty_slot_i is not None:
# it was not already computed, but we found an empty slot to put it in
return empty_slot_i
elif closest_i is not None:
# no empty slot, so just overwrite the closest one if there is one
return closest_i
else:
# nothing has been computed yet, so return the 0 index (this should never execute, as it should be taken care of by the empty slot
return 0
def get_gaussian_xsqr_filters(self,sigmas):
"""
Returns complex Gaussian Fourier filter multiplied with x**2 with standard deviation sigma.
Only recomputes the filter if sigma has changed.
:param sigmas: standard deviation of the filter as a list
:return: Returns the complex Gaussian Fourier filters as a list (in the same order as requested)
"""
current_complex_gaussian_fourier_xsqr_filters = []
# only recompute the ones that need to be recomputed
for sigma in sigmas:
# now find the index that corresponds to this
i = self._find_closest_sigma_index(sigma, self.sigmas_complex_gaussian_fourier_xsqr_filters)
if self.sigmas_complex_gaussian_fourier_xsqr_filters[i] is None:
need_to_recompute = True
elif self.complex_gaussian_fourier_xsqr_filters[i] is None:
need_to_recompute = True
elif torch.isclose(sigma,self.sigmas_complex_gaussian_fourier_xsqr_filters[i]):
need_to_recompute = False
else:
need_to_recompute = True
if need_to_recompute:
print('INFO: Recomputing gaussian xsqr filter for sigma={:.2f}'.format(sigma))
self.sigmas_complex_gaussian_fourier_xsqr_filters[i] = sigma #.clone()
self.complex_gaussian_fourier_xsqr_filters[i],_ = self._compute_complex_gaussian_fourier_xsqr_filter(sigma,self.max_indices[i])
current_complex_gaussian_fourier_xsqr_filters.append(self.complex_gaussian_fourier_xsqr_filters[i])
return current_complex_gaussian_fourier_xsqr_filters
def get_gaussian_filters(self,sigmas):
"""
Returns a complex Gaussian Fourier filter with standard deviation sigma.
Only recomputes the filter if sigma has changed.
:param sigma: standard deviation of filter.
:return: Returns the complex Gaussian Fourier filter
"""
current_complex_gaussian_fourier_filters = []
# only recompute the ones that need to be recomputed
for sigma in sigmas:
# now find the index that corresponds to this
sigma_value = sigma.item()
if sigma_value in self.sigmas_complex_gaussian_fourier_filters_np:
i = self.sigmas_complex_gaussian_fourier_filters_np.index(sigma_value)
else:
i = self._find_closest_sigma_index(sigma,self.sigmas_complex_gaussian_fourier_filters)
if self.sigmas_complex_gaussian_fourier_filters[i] is None:
need_to_recompute = True
elif self.complex_gaussian_fourier_filters[i] is None:
need_to_recompute = True
elif torch.isclose(sigma,self.sigmas_complex_gaussian_fourier_filters[i]):
need_to_recompute = False
else:
need_to_recompute = True
if need_to_recompute: # todo not comment this warning
print('INFO: Recomputing gaussian filter for sigma={:.2f}'.format(sigma))
self.sigmas_complex_gaussian_fourier_filters[i] = sigma #.clone()
self.sigmas_complex_gaussian_fourier_filters_np.append(sigma_value)
self.complex_gaussian_fourier_filters[i], self.max_indices[i] = self._compute_complex_gaussian_fourier_filter(sigma)
current_complex_gaussian_fourier_filters.append(self.complex_gaussian_fourier_filters[i])
return current_complex_gaussian_fourier_filters
class FourierGaussianConvolution(Function):
"""
pyTorch function to compute Gaussian convolutions in the Fourier domain: f = g*h.
Also allows to differentiate through the Gaussian standard deviation.
"""
def __init__(self, gaussian_fourier_filter_generator):
"""
Constructor for the Fouier-based convolution
:param sigma: standard deviation for the filter
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierGaussianConvolution, self).__init__()
self.gaussian_fourier_filter_generator = gaussian_fourier_filter_generator
self.dim = self.gaussian_fourier_filter_generator.get_dimension()
self.fftn = sel_fftn(self.dim)
self.ifftn = sel_ifftn(self.dim)
def _compute_convolution_CUDA(self,input,complex_fourier_filter):
input = FFTVal(input, ini=1)
f_input = self.fftn(input, self.dim, onesided=True)
f_filter_real = complex_fourier_filter[0]
f_filter_real = f_filter_real.expand_as(f_input[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_input * f_filter_real
dim_input = len(input.shape)
dim_input_batch = dim_input - self.dim
conv_ouput_real = self.ifftn(f_conv, self.dim, onesided=True, signal_sizes=input.shape[dim_input_batch::])
result = conv_ouput_real
return FFTVal(result, ini=-1)
def _compute_convolution_CPU(self,input,complex_fourier_filter):
if self.dim < 3:
conv_output = self.ifftn(self.fftn(input.detach().cpu().numpy()) * complex_fourier_filter)
result = conv_output.real # should in principle be real
elif self.dim == 3:
result = np.zeros(input.shape)
for batch in range(input.size()[0]):
for ch in range(input.size()[1]):
conv_output = self.ifftn(self.fftn(input[batch, ch].detach().cpu().numpy()) * complex_fourier_filter)
result[batch, ch] = conv_output.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(result)
# print( 'max(imag) = ' + str( (abs( conv_output.imag )).max() ) )
# print( 'max(real) = ' + str( (abs( conv_output.real )).max() ) )
def _compute_input_gradient_CUDA(self,grad_output,complex_fourier_filter):
grad_output = FFTVal(grad_output, ini=1)
# print grad_output.view(-1,1).sum()
f_go = self.fftn(grad_output, self.dim, onesided=True)
f_filter_real = complex_fourier_filter[0]
f_filter_real = f_filter_real.expand_as(f_go[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_go * f_filter_real
dim_input = len(grad_output.shape)
dim_input_batch = dim_input - self.dim
grad_input = self.ifftn(f_conv, self.dim, onesided=True, signal_sizes=grad_output.shape[dim_input_batch::])
return FFTVal(grad_input, ini=-1)
def _compute_input_gradient_CPU(self,grad_output,complex_fourier_filter):
numpy_go = grad_output.detach().cpu().numpy()
# we use the conjugate because the assumption was that the spatial filter is real
# THe following two lines should be correct
if self.dim < 3:
grad_input_c = (self.ifftn(np.conjugate(complex_fourier_filter) * self.fftn(numpy_go)))
grad_input = grad_input_c.real
elif self.dim == 3:
grad_input = np.zeros(numpy_go.shape)
assert grad_output.dim() == 5 # to ensure the behavior correct, we avoid more than 3 dimension fftn method
for batch in range(grad_output.size()[0]):
for ch in range(grad_output.size()[1]):
grad_input_c = (
self.ifftn(np.conjugate(complex_fourier_filter) * self.fftn(numpy_go[batch, ch])))
grad_input[batch, ch] = grad_input_c.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(grad_input)
def _compute_sigma_gradient_CUDA(self,input,sigma,grad_output,complex_fourier_filter,complex_fourier_xsqr_filter):
convolved_input = self._compute_convolution_CUDA(input, complex_fourier_filter)
grad_sigma = -1. / sigma * self.dim * (grad_output.detach().cpu().numpy() * convolved_input).sum()
convolved_input_xsqr = self._compute_convolution_CUDA(input, complex_fourier_xsqr_filter)
grad_sigma += 1. / (sigma ** 3) * (grad_output.detach().cpu().numpy() * convolved_input_xsqr).sum()
return grad_sigma
# TODO: gradient appears to be incorrect
def _compute_sigma_gradient_CPU(self,input,sigma,grad_output,complex_fourier_filter,complex_fourier_xsqr_filter):
convolved_input = self._compute_convolution_CPU(input,complex_fourier_filter)
grad_sigma = -1./sigma*self.dim*(grad_output.detach().cpu().numpy()*convolved_input).sum()
convolved_input_xsqr = self._compute_convolution_CPU(input,complex_fourier_xsqr_filter)
grad_sigma += 1./(sigma**3)*(grad_output.detach().cpu().numpy()*convolved_input_xsqr).sum()
return grad_sigma
class FourierSingleGaussianConvolution(FourierGaussianConvolution):
"""
pyTorch function to compute Gaussian convolutions in the Fourier domain: f = g*h.
Also allows to differentiate through the Gaussian standard deviation.
"""
def __init__(self, gaussian_fourier_filter_generator, compute_std_gradient):
"""
Constructor for the Fouier-based convolution
:param sigma: standard deviation for the filter
:param compute_std_gradient: if True computes the gradient with respect to the std, otherwise set to 0
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierSingleGaussianConvolution, self).__init__(gaussian_fourier_filter_generator)
self.gaussian_fourier_filter_generator = gaussian_fourier_filter_generator
self.complex_fourier_filter = None
self.complex_fourier_xsqr_filter = None
self.input = None
self.sigma = None
self.compute_std_gradient = compute_std_gradient
def forward(self, input, sigma):
"""
Performs the Fourier-based filtering
the 3d cpu fft is not implemented in fftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because fft and fft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the rfft is used for efficiency, which means the filter should be symmetric
:param input: Image
:return: Filtered-image
"""
self.input = input
self.sigma = sigma
self.complex_fourier_filter = self.gaussian_fourier_filter_generator.get_gaussian_filters(self.sigma)[0]
self.complex_fourier_xsqr_filter = self.gaussian_fourier_filter_generator.get_gaussian_xsqr_filters(self.sigma)[0]
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# filter_imag =0, then get ac + bci
if USE_CUDA:
return self._compute_convolution_CUDA(input,self.complex_fourier_filter)
else:
return self._compute_convolution_CPU(input,self.complex_fourier_filter)
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
the 3d cpu ifft is not implemented in ifftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because ifft and ifft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the irfft is used for efficiency, which means the filter should be symmetric
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_sigma = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# first compute the gradient with respect to the input
if self.needs_input_grad[0]:
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# input_imag =0, then get ac + bci
if USE_CUDA:
grad_input = self._compute_input_gradient_CUDA(grad_output,self.complex_fourier_filter)
else:
grad_input = self._compute_input_gradient_CPU(grad_output,self.complex_fourier_filter)
# now compute the gradient with respect to the standard deviation of the filter
if self.compute_std_gradient:
if self.needs_input_grad[1]:
if USE_CUDA:
grad_sigma = self._compute_sigma_gradient_CUDA(self.input,self.sigma,grad_output,self.complex_fourier_filter,self.complex_fourier_xsqr_filter)
else:
grad_sigma = self._compute_sigma_gradient_CPU(self.input,self.sigma,grad_output,self.complex_fourier_filter,self.complex_fourier_xsqr_filter)
else:
grad_sigma = torch.zeros_like(self.sigma)
# now return the computed gradients
return grad_input, grad_sigma
def fourier_single_gaussian_convolution(input, gaussian_fourier_filter_generator,sigma,compute_std_gradient):
"""
Convenience function for Fourier-based Gaussian convolutions. Make sure to use this one (instead of directly
using the class FourierGaussianConvolution). This will assure that each call generates its own instance
and hence autograd will work properly
:param input: Input image
:param gaussian_fourier_filter_generator: generator which will create Gaussian Fourier filter (and caches them)
:param sigma: standard deviation for the Gaussian filter
:param compute_std_gradient: if set to True computes the gradient otherwise sets it to 0
:return:
"""
# First braces create a Function object. Any arguments given here
# will be passed to __init__. Second braces will invoke the __call__
# operator, that will then use forward() to compute the result and
# return it.
return FourierSingleGaussianConvolution(gaussian_fourier_filter_generator,compute_std_gradient)(input,sigma)
class FourierMultiGaussianConvolution(FourierGaussianConvolution):
"""
pyTorch function to compute multi Gaussian convolutions in the Fourier domain: f = g*h.
Also allows to differentiate through the Gaussian standard deviation.
"""
def __init__(self, gaussian_fourier_filter_generator,compute_std_gradients,compute_weight_gradients):
"""
Constructor for the Fouier-based convolution
:param gaussian_fourier_filter_generator: class instance that creates and caches the Gaussian filters
:param compute_std_gradients: if set to True the gradients for std are computed, otherwise they are filled w/ zero
:param compute_weight_gradients: if set to True the gradients for weights are computed, otherwise they are filled w/ zero
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierMultiGaussianConvolution, self).__init__(gaussian_fourier_filter_generator)
self.gaussian_fourier_filter_generator = gaussian_fourier_filter_generator
self.complex_fourier_filters = None
self.complex_fourier_xsqr_filters = None
self.input = None
self.weights = None
self.sigmas = None
self.nr_of_gaussians = None
self.compute_std_gradients = compute_std_gradients
self.compute_weight_gradients = compute_weight_gradients
def forward(self, input, sigmas, weights):
"""
Performs the Fourier-based filtering
the 3d cpu fft is not implemented in fftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because fft and fft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the rfft is used for efficiency, which means the filter should be symmetric
:param input: Image
:return: Filtered-image
"""
self.input = input
self.sigmas = sigmas
self.weights = weights
self.nr_of_gaussians = len(self.sigmas)
nr_of_weights = len(self.weights)
assert(self.nr_of_gaussians==nr_of_weights)
self.complex_fourier_filters = self.gaussian_fourier_filter_generator.get_gaussian_filters(self.sigmas)
self.complex_fourier_xsqr_filters = self.gaussian_fourier_filter_generator.get_gaussian_xsqr_filters(self.sigmas)
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# filter_imag =0, then get ac + bci
ret = torch.zeros_like(input)
for i in range(self.nr_of_gaussians):
if USE_CUDA:
ret += self.weights[i]*self._compute_convolution_CUDA(input,self.complex_fourier_filters[i])
else:
ret+= self.weights[i]*self._compute_convolution_CPU(input,self.complex_fourier_filters[i])
return ret
def _compute_input_gradient_CUDA_multi_gaussian(self,grad_output,complex_fourier_filters):
grad_input = torch.zeros_like(self.input)
for i in range(self.nr_of_gaussians):
grad_input += self.weights[i]*self._compute_input_gradient_CUDA(grad_output, complex_fourier_filters[i])
return grad_input
def _compute_input_gradient_CPU_multi_gaussian(self,grad_output,complex_fourier_filters):
grad_input = torch.zeros_like(self.input)
for i in range(self.nr_of_gaussians):
grad_input += self.weights[i] * self._compute_input_gradient_CPU(grad_output,complex_fourier_filters[i])
return grad_input
def _compute_sigmas_gradient_CUDA_multi_gaussian(self,input,sigmas,grad_output,complex_fourier_filters,complex_fourier_xsqr_filters):
grad_sigmas = torch.zeros_like(sigmas)
for i in range(self.nr_of_gaussians):
grad_sigmas[i] = self.weights[i] * self._compute_sigma_gradient_CUDA(input,sigmas[i],grad_output,
complex_fourier_filters[i],
complex_fourier_xsqr_filters[i])
return grad_sigmas
def _compute_sigmas_gradient_CPU_multi_gaussian(self,input,sigmas,grad_output,complex_fourier_filters,complex_fourier_xsqr_filters):
grad_sigmas = torch.zeros_like(sigmas)
for i in range(self.nr_of_gaussians):
grad_sigmas[i] = self.weights[i] * self._compute_sigma_gradient_CPU(input,sigmas[i],grad_output,
complex_fourier_filters[i],
complex_fourier_xsqr_filters[i])
return grad_sigmas
def _compute_weights_gradient_CUDA_multi_gaussian(self,input,weights,grad_output,complex_fourier_filters):
grad_weights = torch.zeros_like(weights)
for i in range(self.nr_of_gaussians):
grad_weights[i] = (grad_output*self._compute_convolution_CUDA(input,complex_fourier_filters[i])).sum()
return grad_weights
def _compute_weights_gradient_CPU_multi_gaussian(self,input,weights,grad_output,complex_fourier_filters):
grad_weights = torch.zeros_like(weights)
for i in range(self.nr_of_gaussians):
grad_weights[i] = (grad_output * self._compute_convolution_CPU(input, complex_fourier_filters[i])).sum()
return grad_weights
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
the 3d cpu ifft is not implemented in ifftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because ifft and ifft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the irfft is used for efficiency, which means the filter should be symmetric
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_sigmas = grad_weights = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# first compute the gradient with respect to the input
if self.needs_input_grad[0]:
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# input_imag =0, then get ac + bci
if USE_CUDA:
grad_input = self._compute_input_gradient_CUDA_multi_gaussian(grad_output,self.complex_fourier_filters)
else:
grad_input = self._compute_input_gradient_CPU_multi_gaussian(grad_output,self.complex_fourier_filters)
# now compute the gradient with respect to the standard deviation of the filter
if self.needs_input_grad[1]:
if self.compute_std_gradients:
if USE_CUDA:
grad_sigmas = self._compute_sigmas_gradient_CUDA_multi_gaussian(self.input,self.sigmas,grad_output,self.complex_fourier_filters,self.complex_fourier_xsqr_filters)
else:
grad_sigmas = self._compute_sigmas_gradient_CPU_multi_gaussian(self.input,self.sigmas,grad_output,self.complex_fourier_filters,self.complex_fourier_xsqr_filters)
else:
grad_sigmas = torch.zeros_like(self.sigmas)
if self.needs_input_grad[2]:
if self.compute_weight_gradients:
if USE_CUDA:
grad_weights = self._compute_weights_gradient_CUDA_multi_gaussian(self.input,self.weights,grad_output,self.complex_fourier_filters)
else:
grad_weights = self._compute_weights_gradient_CPU_multi_gaussian(self.input,self.weights,grad_output,self.complex_fourier_filters)
else:
grad_weights = torch.zeros_like(self.weights)
# now return the computed gradients
#print('gsigmas: min=' + str(grad_sigmas.min()) + '; max=' + str(grad_sigmas.max()))
#print('gweight: min=' + str(grad_weights.min()) + '; max=' + str(grad_weights.max()))
#print( 'gsigmas = ' + str( grad_sigmas))
#print( 'gweight = ' + str( grad_weights))
return grad_input, grad_sigmas, grad_weights
def fourier_multi_gaussian_convolution(input, gaussian_fourier_filter_generator,sigma,weights,compute_std_gradients=True,compute_weight_gradients=True):
"""
Convenience function for Fourier-based multi Gaussian convolutions. Make sure to use this one (instead of directly
using the class FourierGaussianConvolution). This will assure that each call generates its own instance
and hence autograd will work properly
:param input: Input image
:param gaussian_fourier_filter_generator: generator which will create Gaussian Fourier filter (and caches them)
:param sigma: standard deviations for the Gaussian filter (need to be positive)
:param weights: weights for the multi-Gaussian kernel (need to sum up to one and need to be positive)
:param compute_std_gradients: if set to True computes the gradients with respect to the standard deviation
:param compute_weight_gradients: if set to True then gradients for weight are computed, otherwise they are replaced w/ zero
:return:
"""
# First braces create a Function object. Any arguments given here
# will be passed to __init__. Second braces will invoke the __call__
# operator, that will then use forward() to compute the result and
# return it.
return FourierMultiGaussianConvolution(gaussian_fourier_filter_generator,compute_std_gradients,compute_weight_gradients)(input,sigma,weights)
class FourierSetOfGaussianConvolutions(FourierGaussianConvolution):
"""
pyTorch function to compute a set of Gaussian convolutions (as in the multi-Gaussian) in the Fourier domain: f = g*h.
Also allows to differentiate through the standard deviations. THe output is not a smoothed field, but the
set of all of them. This can then be fed into a subsequent neural network for further processing.
"""
def __init__(self, gaussian_fourier_filter_generator,compute_std_gradients):
"""
Constructor for the Fouier-based convolution
:param gaussian_fourier_filter_generator: class instance that creates and caches the Gaussian filters
:param compute_std_gradients: if set to True the gradients for the stds are computed, otherwise they are filled w/ zero
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierSetOfGaussianConvolutions, self).__init__(gaussian_fourier_filter_generator)
self.gaussian_fourier_filter_generator = gaussian_fourier_filter_generator
self.complex_fourier_filters = None
self.complex_fourier_xsqr_filters = None
self.input = None
self.sigmas = None
self.nr_of_gaussians = None
self.compute_std_gradients = compute_std_gradients
def forward(self, input, sigmas):
"""
Performs the Fourier-based filtering
the 3d cpu fft is not implemented in fftn, to avoid fusing with batch and channel, here 3d is calculated in loop
1d 2d cpu works well because fft and fft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the rfft is used for efficiency, which means the filter should be symmetric
:param input: Image
:return: Filtered-image
"""
self.input = input
self.sigmas = sigmas
self.nr_of_gaussians = len(self.sigmas)
self.complex_fourier_filters = self.gaussian_fourier_filter_generator.get_gaussian_filters(self.sigmas)
if self.compute_std_gradients:
self.complex_fourier_xsqr_filters = self.gaussian_fourier_filter_generator.get_gaussian_xsqr_filters(self.sigmas)
# TODO check if the xsqr should be put into an if statement here
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# filter_imag =0, then get ac + bci
sz = input.size()
new_sz = [self.nr_of_gaussians] + list(sz)
ret = AdaptVal(MyTensor(*new_sz))
for i in range(self.nr_of_gaussians):
if USE_CUDA:
ret[i,...] = self._compute_convolution_CUDA(input,self.complex_fourier_filters[i])
else:
ret[i,...] = self._compute_convolution_CPU(input,self.complex_fourier_filters[i])
return ret
def _compute_input_gradient_CUDA_multi_gaussian(self,grad_output,complex_fourier_filters):
grad_input = torch.zeros_like(self.input)
for i in range(self.nr_of_gaussians):
grad_input += self._compute_input_gradient_CUDA(grad_output[i,...], complex_fourier_filters[i])
return grad_input
def _compute_input_gradient_CPU_multi_gaussian(self,grad_output,complex_fourier_filters):
grad_input = torch.zeros_like(self.input)
for i in range(self.nr_of_gaussians):
grad_input += self._compute_input_gradient_CPU(grad_output[i,...],complex_fourier_filters[i])
return grad_input
def _compute_sigmas_gradient_CUDA_multi_gaussian(self,input,sigmas,grad_output,complex_fourier_filters,complex_fourier_xsqr_filters):
grad_sigmas = torch.zeros_like(sigmas)
for i in range(self.nr_of_gaussians):
grad_sigmas[i] = self._compute_sigma_gradient_CUDA(input,sigmas[i],grad_output[i,...],
complex_fourier_filters[i],
complex_fourier_xsqr_filters[i])
return grad_sigmas
def _compute_sigmas_gradient_CPU_multi_gaussian(self,input,sigmas,grad_output,complex_fourier_filters,complex_fourier_xsqr_filters):
grad_sigmas = torch.zeros_like(sigmas)
for i in range(self.nr_of_gaussians):
grad_sigmas[i] = self._compute_sigma_gradient_CPU(input,sigmas[i],grad_output[i,...],
complex_fourier_filters[i],
complex_fourier_xsqr_filters[i])
return grad_sigmas
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
the 3d cpu ifft is not implemented in ifftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because ifft and ifft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the irfft is used for efficiency, which means the filter should be symmetric
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_sigmas = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# first compute the gradient with respect to the input
if self.needs_input_grad[0]:
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# input_imag =0, then get ac + bci
if USE_CUDA:
grad_input = self._compute_input_gradient_CUDA_multi_gaussian(grad_output,self.complex_fourier_filters)
else:
grad_input = self._compute_input_gradient_CPU_multi_gaussian(grad_output,self.complex_fourier_filters)
# now compute the gradient with respect to the standard deviation of the filter
if self.needs_input_grad[1]:
if self.compute_std_gradients:
if USE_CUDA:
grad_sigmas = self._compute_sigmas_gradient_CUDA_multi_gaussian(self.input,self.sigmas,grad_output,self.complex_fourier_filters,self.complex_fourier_xsqr_filters)
else:
grad_sigmas = self._compute_sigmas_gradient_CPU_multi_gaussian(self.input,self.sigmas,grad_output,self.complex_fourier_filters,self.complex_fourier_xsqr_filters)
else:
grad_sigmas = torch.zeros_like(self.sigmas)
# now return the computed gradients
return grad_input, grad_sigmas
def fourier_set_of_gaussian_convolutions(input, gaussian_fourier_filter_generator,sigma,compute_std_gradients=False):
"""
Convenience function for Fourier-based multi Gaussian convolutions. Make sure to use this one (instead of directly
using the class FourierGaussianConvolution). This will assure that each call generates its own instance
and hence autograd will work properly
:param input: Input image
:param gaussian_fourier_filter_generator: generator which will create Gaussian Fourier filter (and caches them)
:param sigma: standard deviations for the Gaussian filter (need to be positive)
:param compute_weight_std_gradients: if set to True then gradients for standard deviation are computed, otherwise they are replaced w/ zero
:return:
"""
# First braces create a Function object. Any arguments given here
# will be passed to __init__. Second braces will invoke the __call__
# operator, that will then use forward() to compute the result and
# return it.
return FourierSetOfGaussianConvolutions(gaussian_fourier_filter_generator,compute_std_gradients)(input,sigma)
def check_fourier_conv():
"""
Convenience function to check the gradient. Fails, as pytorch's check appears to have difficulty
:return: True if analytical and numerical gradient are the same
.. todo::
The current check seems to fail in pyTorch. However, the gradient appears to be correct. Potentially an issue with the numerical gradient approximiaton.
"""
# gradcheck takes a tuple of tensor as input, check if your gradient
# evaluated with these tensors are close enough to numerical
# approximations and returns True if they all verify this condition.
# TODO: Seems to fail at the moment, check why if there are issues with the gradient
sz = np.array([20, 20], dtype='int64')
# f = np.ones(sz)
f = 1 / 400. * np.ones(sz)
dim = len(sz)
mus = np.zeros(dim)
stds = np.ones(dim)
spacing = np.ones(dim)
centered_id = utils.centered_identity_map(sz,spacing)
g = 100 * utils.compute_normalized_gaussian(centered_id, mus, stds)
FFilter,_ = create_complex_fourier_filter(g, sz)
input = AdaptVal(torch.randn([1, 1] + list(sz)))
input.requires_grad = True
test = gradcheck(FourierConvolution(FFilter), input, eps=1e-6, atol=1e-4)
print(test)
def check_run_forward_and_backward():
"""
Convenience function to check running the function forward and backward
s
:return:
"""
sz = [20, 20]
f = 1 / 400. * np.ones(sz)
FFilter,_ = create_complex_fourier_filter(f, sz, False)
input = torch.randn(sz).float()
input.requires_grad = True
fc = FourierConvolution(FFilter)(input)
# print( fc )
fc.backward(torch.randn(sz).float())
print(input.grad)
|
<gh_stars>1-10
# Copyright (c) 2021 slbotzone <https://t.me/slbotzone>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import os
import aiohttp
import asyncio
import json
import sys
import time
from youtubesearchpython import SearchVideos
from pyrogram import filters, Client
from sample_config import Config
from youtube_dl import YoutubeDL
from youtube_dl.utils import (
ContentTooShortError,
DownloadError,
ExtractorError,
GeoRestrictedError,
MaxDownloadsReached,
PostProcessingError,
UnavailableVideoError,
XAttrMetadataError,
)
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, InlineQuery, InputTextMessageContent
Jebot = Client(
"Song Downloader",
api_id=Config.APP_ID,
api_hash=Config.API_HASH,
bot_token=Config.TG_BOT_TOKEN,
)
#For private messages
#Ignore commands
#No bots also allowed
@Jebot.on_message(filters.private & ~filters.bot & ~filters.command("help") & ~filters.command("start") & ~filters.command("s"))
async def song(client, message):
#ImJanindu #JEBotZ
cap = "@fastsongdownloderslbzbot"
url = message.text
rkp = await message.reply("🌟 Processing...")
search = SearchVideos(url, offset=1, mode="json", max_results=1)
test = search.result()
p = json.loads(test)
q = p.get("search_result")
try:
url = q[0]["link"]
except BaseException:
return await rkp.edit("🌟Failed to find that song.")
type = "audio"
if type == "audio":
opts = {
"format": "bestaudio",
"addmetadata": True,
"key": "FFmpegMetadata",
"writethumbnail": True,
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "320",
}
],
"outtmpl": "%(id)s.mp3",
"quiet": True,
"logtostderr": False,
}
song = True
try:
await rkp.edit("📥 Downloading...")
with YoutubeDL(opts) as rip:
rip_data = rip.extract_info(url)
except DownloadError as DE:
await rkp.edit(f"`{str(DE)}`")
return
except ContentTooShortError:
await rkp.edit("`📥 The download content was too short.`")
return
except GeoRestrictedError:
await rkp.edit(
"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`"
)
return
except MaxDownloadsReached:
await rkp.edit("`Max-downloads limit has been reached.`")
return
except PostProcessingError:
await rkp.edit("`There was an error during post processing.`")
return
except UnavailableVideoError:
await rkp.edit("`Media is not available in the requested format.`")
return
except XAttrMetadataError as XAME:
await rkp.edit(f"`{XAME.code}: {XAME.msg}\n{XAME.reason}`")
return
except ExtractorError:
await rkp.edit("`There was an error during info extraction.`")
return
except Exception as e:
await rkp.edit(f"{str(type(e)): {str(e)}}")
return
time.time()
if song:
await rkp.edit(" 📤 Uploading...") #ImJanindu
lol = "./thumb.jpg"
lel = await message.reply_audio(
f"{rip_data['id']}.mp3",
duration=int(rip_data["duration"]),
title=str(rip_data["title"]),
performer=str(rip_data["📤uploader"]),
thumb=lol,
caption=cap) #JEBotZ
await rkp.delete()
os.system("rm -rf *.mp3")
os.system("rm -rf *.webp")
@Jebot.on_message(filters.command("song") & ~filters.edited & filters.group)
async def song(client, message):
cap = "@fastsongdownloderslbzbot"
url = message.text.split(None, 1)[1]
rkp = await message.reply("Processing...")
if not url:
await rkp.edit("**🤦What's the song you want?**\nUsage`/song <song name>`")
search = SearchVideos(url, offset=1, mode="json", max_results=1)
test = search.result()
p = json.loads(test)
q = p.get("search_result")
try:
url = q[0]["link"]
except BaseException:
return await rkp.edit("Failed to find that song.")
type = "audio"
if type == "audio":
opts = {
"format": "bestaudio",
"addmetadata": True,
"key": "FFmpegMetadata",
"writethumbnail": True,
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "320",
}
],
"outtmpl": "%(id)s.mp3",
"quiet": True,
"logtostderr": False,
}
song = True
try:
await rkp.edit("Downloading...")
with YoutubeDL(opts) as rip:
rip_data = rip.extract_info(url)
except DownloadError as DE:
await rkp.edit(f"`{str(DE)}`")
return
except ContentTooShortError:
await rkp.edit("`The download content was too short.`")
return
except GeoRestrictedError:
await rkp.edit(
"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`"
)
return
except MaxDownloadsReached:
await rkp.edit("`Max-downloads limit has been reached.`")
return
except PostProcessingError:
await rkp.edit("`There was an error during post processing.`")
return
except UnavailableVideoError:
await rkp.edit("`Media is not available in the requested format.`")
return
except XAttrMetadataError as XAME:
await rkp.edit(f"`{XAME.code}: {XAME.msg}\n{XAME.reason}`")
return
except ExtractorError:
await rkp.edit("`There was an error during info extraction.`")
return
except Exception as e:
await rkp.edit(f"{str(type(e)): {str(e)}}")
return
time.time()
if song:
await rkp.edit("Uploading...") #ImJanindu
lol = "./thumb.jpg"
lel = await message.reply_audio(
f"{rip_data['id']}.mp3",
duration=int(rip_data["duration"]),
title=str(rip_data["title"]),
performer=str(rip_data["uploader"]),
thumb=lol,
caption=cap) #JEBotZ
await rkp.delete()
os.system("rm -rf *.mp3")
os.system("rm -rf *.webp")
@Jebot.on_message(filters.command("start"))
async def start(client, message):
if message.chat.type == 'private':
await Jebot.send_message(
chat_id=message.chat.id,
sticker("CAACAgIAAxkBAAEJp3Bg1CErSDjmDGLFaJZt7DW1x2H97QACqAEAAjDUnREPZpQTbub_3h4E")
text="""<b>👋 Hey There, I'm a Song Downloader Bot. A bot by 👨💻 @slbotzone.
Hit help button to find out more about how to use me</b>""",
reply_markup=InlineKeyboardMarkup(
[[
InlineKeyboardButton(
" 💫 Help 💫", callback_data="help"),
InlineKeyboardButton(
"🔥 guid for create this bot🔥 ", url="https://www.youtube.com/channel/UCvYfJcTr8RY72dIapzMqFQA?sub_confirmation=1")
]]
),
disable_web_page_preview=True,
parse_mode="html",
reply_to_message_id=message.message_id
)
else:
await Jebot.send_message(
chat_id=message.chat.id,
text="""<b> 👋Song Downloader Is Online.\n\n</b>""",
reply_markup=InlineKeyboardMarkup(
[[
InlineKeyboardButton(
"Help", callback_data="help")
]]
),
disable_web_page_preview=True,
parse_mode="html",
reply_to_message_id=message.message_id
)
@Jebot.on_message(filters.command("help"))
async def help(client, message):
if message.chat.type == 'private':
await Jebot.send_message(
chat_id=message.chat.id,
text="""<b>Send a song name to download song
@slbotzone</b>""",
reply_to_message_id=message.message_id
)
else:
await Jebot.send_message(
chat_id=message.chat.id,
text="<b>Song Downloader Help.\n\nSyntax: `/song guleba`</b>",
reply_to_message_id=message.message_id
)
@Jebot.on_callback_query()
async def button(Jebot, update):
cb_data = update.data
if "help" in cb_data:
await update.message.delete()
await help(Jebot, update.message)
print(
"""
Bot Started!
Join @slbotzone
"""
)
Jebot.run()
|
<filename>AnalyzeResults.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 15:49:01 2020
Analyze results
@author: <NAME>, <NAME>, <NAME>
"""
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib, glob
import seaborn as sns
from numpy.linalg import norm
from calcFreeEnergy import ZPE_calc, enthalpy_entropy, au_to_wvno
from calcFreeEnergy import free_energy_vtst
import numpy as np
from matplotlib.lines import Line2D
from scipy.io import loadmat
import ZCT
T_tunnel = [300] # ZCT Temperature, K.
def get_eigenvalue_matrix(df, tag="All"):
evals = []
energies = []
if tag in ["R", "P", "T"]:
for index, row in df.iterrows():
if row.tag == tag:
evalrow = []
for x in row.Eigenvalues:
# if abs(x)>cutoff:
evalrow.append(x)
evals.append(evalrow)
energies.append(row.Energy)
elif tag == "All":
for index, row in df.iterrows():
evalrow = []
for x in row.Eigenvalues:
# if abs(x)>cutoff:
evalrow.append(x)
evals.append(evalrow)
energies.append(row.Energy)
elif tag == "Rhalf":
for index, row in df.iterrows():
if index <= 0:
evalrow = []
for x in row.Eigenvalues:
# if abs(x)>cutoff:
evalrow.append(x)
evals.append(evalrow)
energies.append(row.Energy)
elif tag == "Phalf":
for index, row in df.iterrows():
if index >= 0:
evalrow = []
for x in row.Eigenvalues:
# if abs(x)>cutoff:
evalrow.append(x)
evals.append(evalrow)
energies.append(row.Energy)
return evals, energies
def count_acc(dfcol, acc):
count = 0
for val in dfcol:
if abs(val) <= acc:
count += 1
return count
x = loadmat("results.mat")
query_system = x["system"][0]
filename_true = glob.glob("MatrixPKLs/*" + query_system + "*.pkl")
alldf_zct = pd.read_pickle(filename_true[0])
mirror = False # true if using half of a symmetric reaction, e.g. Sn2
if "Sn2" in filename_true[0]:
mirror = True
if mirror:
df_copy = alldf_zct.copy()
df_copy = df_copy.drop(0) # Try 0 first. If error, use 1
df_copy.index = -1 * df_copy.index
df_copy["s"] = -1 * df_copy["s"]
df_copy = df_copy.sort_index(ascending=True)
alldf_zct = df_copy.append(alldf_zct)
s = alldf_zct["s"]
s_list = []
for i in s.index:
s_list.append(s[i])
evals = alldf_zct["Eigenvalues"]
Vmep_zct = alldf_zct["Energy"]
index_zct = alldf_zct.index
ZCT_true, E0_true, VAG_true, SAG_true, V_aG_true = ZCT.zct(
T_tunnel, evals, Vmep_zct, s, "calc", "calc", "calc"
)
print("True ZCT:", ZCT_true)
Trange = [100, 200, 500, 1000]
font = {"family": "sans-serif", "weight": "normal", "size": 15}
matplotlib.rc("font", **font)
system = query_system
accuracy = 2.39 # kcal/mol = 10kJ/mol - chemical accuracy
# index corresponding to starting pt/reactant. For half reactions, it is -1, for
# full reactions it is 0
indreact = -1
Xinit = x["Xinit_c"]
Xfinal = x["Xvmc2"] # Xvmc2 for QS based matrix
Xtrue = x["Xtrue"]
Energies = list(alldf_zct["Energy"])
ntrials = 1
alldf = pd.DataFrame(
{
"Trial": [0],
"Energies": [Energies],
"Xfinal": [Xfinal],
"Xtrue": [Xtrue],
"Xinit": [Xinit],
}
) # Empty Dataframe,
#######
# THERMO
#######
# ZPE
ZPE_true = []
for column in Xtrue.T:
cm = np.array([au_to_wvno(x) for x in column]) # if abs(x)>cutoff])
zpe = ZPE_calc(cm)
ZPE_true.append(zpe)
ZPE_true = np.array(ZPE_true)
# Free Energy
G_true = []
cm_true = []
for index, column in enumerate(Xtrue.T):
cm = np.array([au_to_wvno(x) for x in column]) # if abs(x)>cutoff])
freeEgyTemp = []
for T in Trange:
H, S = enthalpy_entropy(cm, T)
# print(index,T,Energies)
# freeEgyTemp.append(free_energy(Energies[index],H,S,T))
freeEgyTemp.append(free_energy_vtst(Energies[index], cm, T))
G_true.append(freeEgyTemp)
cm_true.append(cm)
G_true = np.array(G_true).T
transpose = True
thermodict = []
error_init = []
error_final = []
all_colerr_init = []
all_colerr_vmc = []
ZPE_all = []
labels = []
for Tindex, T in enumerate(Trange):
label = "G, True, " + str(T) + "K"
labels.append(label)
G_true_dict = {
labels[0]: G_true[0, :],
labels[1]: G_true[1, :],
labels[2]: G_true[2, :],
labels[3]: G_true[3, :],
}
G_true_df = pd.DataFrame(G_true_dict)
for index, row in alldf.iterrows():
Xfinal = row["Xfinal"]
Xinit = row["Xinit"]
# Matrix errors
materror_init = norm(np.subtract(Xinit, Xtrue)) / norm(Xtrue)
materror_final = norm(np.subtract(Xfinal, Xtrue)) / norm(Xtrue)
error_init.append(materror_init)
error_final.append(materror_final)
ZPE_final = []
for column in Xfinal.T:
cm = np.array([au_to_wvno(x) for x in column]) # if abs(x)>cutoff])
zpe = ZPE_calc(cm)
ZPE_final.append(zpe)
ZPE_error = np.subtract(ZPE_final, ZPE_true)
ZPE_all.append(ZPE_final)
G_final = []
# Free energy, VMC
i = 0
for subindex, column in enumerate(Xfinal.T):
cm = np.array([au_to_wvno(x) for x in column]) # if abs(x)>cutoff])
freeEgyTemp = []
for T in Trange:
H, S = enthalpy_entropy(cm, T)
# freeEgyTemp.append(free_energy(Energies[subindex],H,S,T))
# freeEgyTemp.append(free_energy_vtst(row['Energies'][subindex],cm,T,cm_true[i],True))
freeEgyTemp.append(free_energy_vtst(row["Energies"][subindex], cm, T))
G_final.append(freeEgyTemp)
i = i + 1
G_final = np.array(G_final).T
G_error = np.subtract(G_final, G_true)
labels = []
error_labels = []
for Tindex, T in enumerate(Trange):
label = "G, VMC, " + str(T) + "K"
error_label = "G error, " + str(T) + "K"
labels.append(label)
error_labels.append(error_label)
# alldf.loc[index,label]= DeltG_final[Tindex,:]
thermodict.append(
{
"Trial": index,
"ZPE, VMC": ZPE_final,
"ZPE error": ZPE_error,
labels[0]: G_final[0, :],
labels[1]: G_final[1, :],
labels[2]: G_final[2, :],
labels[3]: G_final[3, :],
error_labels[0]: G_error[0, :],
error_labels[1]: G_error[1, :],
error_labels[2]: G_error[2, :],
error_labels[3]: G_error[3, :],
}
)
# Column errors
colerr_init = []
colerr_vmc = []
errortol = 1.0e-3 # 1e-3
if transpose:
for j in range(Xtrue.shape[1]):
colerr_init.append(norm(Xinit[:, j] - Xtrue[:, j]) / norm(Xtrue[:, j]))
colerr_vmc.append(norm(Xfinal[:, j] - Xtrue[:, j]) / norm(Xtrue[:, j]))
else:
for j in range(Xtrue.shape[0]):
colerr_init.append(norm(Xinit[j, :] - Xtrue[j, :]) / norm(Xtrue[j, :]))
colerr_vmc.append(norm(Xfinal[j, :] - Xtrue[j, :]) / norm(Xtrue[j, :]))
colerr_init.sort()
colerr_init = colerr_init[::-1]
count_init = sum(map(lambda x: x <= errortol, colerr_init))
colerr_vmc.sort()
colerr_vmc = colerr_vmc[::-1]
count_vmc = sum(map(lambda x: x <= errortol, colerr_vmc))
all_colerr_init.append(colerr_init)
all_colerr_vmc.append(colerr_vmc)
# raise Exception("This is just to get out, comment me when unneeded")
thermodf = pd.DataFrame(thermodict)
for column in thermodf.columns.values:
if column != "Trial":
alldf[column] = thermodf[column].values
for index, row in alldf.iterrows():
maxindzpe = abs(row["ZPE error"]).argmax()
alldf.loc[index, "ZPE error, max"] = row["ZPE error"][maxindzpe]
alldf.loc[index, "ZPE error, mean"] = row["ZPE error"].mean()
for Tindex, T in enumerate(Trange):
stringlabmax = "G error, max, " + str(T) + "K"
stringlabelmean = "G error, mean, " + str(T) + "K"
maxindg = abs(row[error_labels[Tindex]]).argmax()
alldf.loc[index, stringlabmax] = row[error_labels[Tindex]][maxindg]
alldf.loc[index, stringlabelmean] = row[error_labels[Tindex]].mean()
# Error vs. Column plot
if transpose:
cols = np.arange(Xtrue.shape[1])
else:
cols = np.arange(Xtrue.shape[0])
fig, ax = plt.subplots(figsize=(8, 4), dpi=200)
ax.set_yscale("log")
ax.set_ylim([1.0e-8, 1.0e2])
for i in range(ntrials):
ax.plot(
cols, all_colerr_init[i], linestyle="solid", color="r", label="Initial", lw=1.75
)
ax.plot(
cols, all_colerr_vmc[i], linestyle="dashed", color="b", label="HVMC", lw=1.75
)
if i == 0:
ax.legend()
ax.set_xlabel("Columns")
ax.set_ylabel("Error")
ax.grid()
# anntext = r'$\rho$ = '+str(density)+', '+str(ntrials)+' trials'
anntext = str(ntrials) + " trials"
ax.annotate(anntext, (0, 1.0e1), fontsize=13)
# Custom the inside plot: options are: “scatter” | “reg” | “resid” | “kde” | “hex”
errors = sns.jointplot(
x=alldf["ZPE error, max"],
y=alldf["G error, max, 1000K"],
kind="scatter",
color="m",
s=100,
)
errors.set_axis_labels("Max. deviation, ZPE", r"Max. deviation, $G_{vib}$, 1000K")
# errors.set_axis_labels("ZPE",r"$G_{vib}$, 1000K")
plt.tight_layout()
plt.savefig("Figures/" + "MaxDeviation_" + system)
# plt.figure()
errors = sns.jointplot(
x=alldf["ZPE error, mean"],
y=alldf["G error, mean, 1000K"],
kind="scatter",
color="olive",
s=100,
)
errors.set_axis_labels("Mean error, ZPE", r"Mean error, $G_{vib}$, 1000K")
# errors.set_axis_labels("ZPE",r"$G_{vib}$, 1000K")
plt.tight_layout()
# Count success
GaccFreqMax = count_acc(
alldf["G error, max, 1000K"], accuracy
) # (abs(alldf['G error, max, 1000K'])<=accuracy).describe()['freq']
ZPEaccFreqMax = count_acc(alldf["ZPE error, max"], accuracy)
GaccFreqMean = count_acc(alldf["G error, mean, 1000K"], accuracy)
ZPEaccFreqMean = count_acc(alldf["ZPE error, mean"], accuracy)
plt.savefig("Figures/" + "MeanError_" + system)
print("#Trials with maximum error within chemical accuracy: ")
print("ZPE: ", ZPEaccFreqMax)
print("Delta G, 1000K: ", GaccFreqMax)
print("#Trials with mean error within chemical accuracy: ")
print("ZPE: ", ZPEaccFreqMean)
print("Delta G, 1000K: ", GaccFreqMean)
print("Average initial error (%):", "{:.2f}".format(np.mean(error_init) * 100.0))
print(
"Average VMC error (%):",
"{:.2f} +- {:.2f}".format(
np.mean(error_final) * 100.0, np.std(error_final) * 100.0
),
)
# Parity plots, model predictions
count_deltaG = 0
Trueval = G_true[-1, :] - G_true[-1, indreact]
Predval = []
theta = []
G_mean = []
G_max = []
count_G_mean = 0
count_G_max = 0
for index, row in alldf.iterrows():
deltG = [g - row["G, VMC, 1000K"][indreact] for g in row["G, VMC, 1000K"]]
col = deltG - Trueval
Predval.append(deltG)
theta.append(col)
Predval = np.array(Predval)
fig, ax = plt.subplots(figsize=(5, 5))
for index, row in enumerate(Predval):
ax.scatter(Trueval, row) # , c=theta[index])
for i in range(len(row)):
if abs(Trueval[i] - row[i]) > accuracy:
count_deltaG += 1
diff_G = abs(Trueval - row)
G_mean.append(np.mean(diff_G))
G_max.append(max(diff_G))
for i in range(ntrials):
alldf["G error, max, 1000K"]
if G_mean[i] > accuracy:
count_G_mean += 1
for i in range(len(G_max)):
if G_max[i] > accuracy:
count_G_max += 1
print("DeltaG_mean - within chemical accuracy (%)", (GaccFreqMean / ntrials) * 100)
print("DeltaG_max - within chemical accuracy (%)", (GaccFreqMax / ntrials) * 100)
xlow = min(Trueval) - 2
xhigh = max(Trueval) + 5
shade = np.arange(xlow, xhigh, step=0.5)
ax.set_xlim([xlow, xhigh])
ax.set_ylim([xlow, xhigh])
ax.grid()
ax.set_xlabel("Target (kcal/mol)")
ax.set_ylabel("Prediction (kcal/mol)")
ax.set_title(r"Model prediction: $\Delta G_{vib}$, 1000K")
l = Line2D([xlow, xhigh], [xlow, xhigh], color="k")
llower = Line2D(
[xlow, xhigh], [xlow - accuracy, xhigh - accuracy], color="grey", lw=0.5, alpha=0.2
)
lupper = Line2D(
[xlow, xhigh], [xlow + accuracy, xhigh + accuracy], color="grey", lw=0.5, alpha=0.2
)
ax.add_line(l)
ax.add_line(llower)
ax.add_line(lupper)
ax.fill_between(shade, shade - accuracy, shade + accuracy, color="grey", alpha=0.2)
plt.savefig("Figures/" + "ParityPlot_" + system)
ZPE_mean = []
ZPE_max = []
count_ZPE = 0
count_ZPE_max = 0
count_ZPE_mean = 0
for i in range(ntrials):
difference = abs(ZPE_all[i] - ZPE_true)
ZPE_mean.append(np.mean(difference))
ZPE_max.append(max(difference))
for j in range(len(ZPE_true)):
if abs(ZPE_all[i][j] - ZPE_true[j]) > accuracy:
count_ZPE += 1
# print('ZPE - within chemical accuracy (%)',(1-count_ZPE/(ntrials*len(ZPE_true)))*100)
for i in range(len(ZPE_mean)):
if ZPE_mean[i] > accuracy:
count_ZPE_mean += 1
for i in range(len(ZPE_max)):
if ZPE_max[i] > accuracy:
count_ZPE_max += 1
print("ZPE_mean - within chemical accuracy (%)", (ZPEaccFreqMean / ntrials) * 100)
print("ZPE_max - within chemical accuracy (%)", (ZPEaccFreqMax / ntrials) * 100)
# ZCT Calculations
if mirror:
for i in range(ntrials):
copy = ZPE_all[i]
copy.remove(copy[0])
copy = copy[::-1]
ZPE_all[i] = copy + ZPE_all[i]
ZCT_err = []
ZCT_all = []
E0_list = []
VAG_list = []
SAG_list = []
# repeats_array = np.tile(index_zct, (len(alldf),1))
# alldf.insert(2,'index_zct',repeats_array)
for index, row in alldf.iterrows():
dict_zct = []
if mirror:
evals_trial = row["Xfinal"].T
evals_trial = evals_trial.tolist()
evals_copy = evals_trial.copy()
evals_trial.remove(evals_trial[0])
evals_trial = evals_trial[::-1]
evals_mirrored = np.array(evals_trial + evals_copy)
for i in range(len(np.array(index_zct))):
if mirror:
MEP = np.array(Vmep_zct)
dict_zct.append(
{
"index_zct": np.array(index_zct)[i],
"Vmep": MEP[i],
"evals": evals_mirrored[i],
}
)
else:
dict_zct.append(
{
"index_zct": np.array(index_zct)[i],
"Vmep": row["Energies"][i],
"evals": row["Xfinal"].T[i],
}
)
df_zct = pd.DataFrame.from_dict(dict_zct)
df_zct = df_zct.set_index("index_zct")
# use True E0 and VAG
ZCT_trial, E0_trial, VAG_trial, SAG_trial, V_aG_trial = ZCT.zct(
T_tunnel, df_zct["evals"], df_zct["Vmep"], s, E0_true, VAG_true, SAG_true
)
# use calculated (Trial) data
# ZCT_trial,E0_trial,VAG_trial,SAG_trial=ZCT.zct(T_tunnel,index_zct,df_zct['evals'],df_zct['Vmep'],s_list,s,'calc','calc','calc')
ZCT_all.append(ZCT_trial)
ZCT_err.append(abs(ZCT_trial - ZCT_true) / ZCT_true * 100)
E0_list.append(E0_trial)
VAG_list.append(VAG_trial)
SAG_list.append(SAG_trial)
print("Trial #", index, "\nZCT:", ZCT_trial)
|
<reponame>Melca-G/Aeolus<gh_stars>0
import sys
import ConfigParser
from os.path import expanduser
# Set system path
home = expanduser("~")
cfgfile = open(home + "\\STVTools.ini", 'r')
config = ConfigParser.ConfigParser()
config.read(home + "\\STVTools.ini")
# Master Path
syspath1 = config.get('SysDir','MasterPackage')
sys.path.append(syspath1)
# Built Path
syspath2 = config.get('SysDir','SecondaryPackage')
sys.path.append(syspath2)
from pyrevit.framework import List
from pyrevit import revit, DB, forms
import re, clr, os, threading
import EwrQcUtils
import xlsxwriter
clr.AddReference('RevitAPI')
clr.AddReference("System")
from Autodesk.Revit.DB import FilteredElementCollector, Transaction, ImportInstance, \
OpenOptions,WorksetConfiguration, WorksetConfigurationOption, DetachFromCentralOption,\
ModelPathUtils, SaveAsOptions, WorksharingSaveAsOptions
from System.Collections.Generic import List
from Autodesk.Revit.UI.Events import DialogBoxShowingEventArgs
from Autodesk.Revit.UI import UIApplication
from Autodesk.Revit.ApplicationServices import Application
clr.AddReferenceByPartialName('PresentationCore')
clr.AddReferenceByPartialName('PresentationFramework')
clr.AddReferenceByPartialName('System.Windows.Forms')
clr.AddReference('RevitAPIUI')
# Collect Save location and Rvt Files
collectorFiles = forms.pick_file(file_ext='rvt', multi_file=True, unc_paths=False)
destinationFolder = forms.pick_folder()
def RVTFileCollector(dir):
files = []
for file in os.listdir(dir):
if file.endswith(".rvt"):
#print(str(file))
files.append(str(file))
return files
def OpenFiles(oFile, app, audit):
openOpt = OpenOptions()
if audit == True:
openOpt.Audit = True
else:
openOpt.Audit = False
openOpt.DetachFromCentralOption = DetachFromCentralOption.DetachAndPreserveWorksets
wsopt = WorksetConfiguration(WorksetConfigurationOption.CloseAllWorksets)
# wsopt.Open(worksetList)
openOpt.SetOpenWorksetsConfiguration(wsopt)
modelPath = ModelPathUtils.ConvertUserVisiblePathToModelPath(oFile)
currentdoc = app.OpenDocumentFile(modelPath, openOpt)
try:
DialogBoxShowingEventArgs.OverrideResult(1)
except:
pass
return currentdoc
# Main
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
__doc__ = 'Report the Model Element Quality Check outcome in an Excel file according to PA standard.'
uiapp = UIApplication(doc.Application)
application = uiapp.Application
def DimensionProcessing(openedDoc):
collectorDim = EwrQcUtils.DimensionsCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'DIMENSIONS', 1, 0, collectorDim)
def SettingsProcessing(openedDoc):
collectorSettings = EwrQcUtils.SettingsCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'SETTINGS', 1, 0, collectorSettings)
def ViewsProcessing(openedDoc):
collectorView = EwrQcUtils.ViewsCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'VIEWS', 1, 0, collectorView)
def FamiliesProcessing(openedDoc):
collectorFamily = EwrQcUtils.FamilyNameCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'FAMILY NAME', 1, 0, collectorFamily)
def LinksProcessing(openedDoc):
collectorLink = EwrQcUtils.LinkCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'LINKS', 1, 0, collectorLink)
def TitleBlocksProcessing(openedDoc):
collectorTitleBlock = EwrQcUtils.TitleBlockCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'TITLE BLOCK', 1, 0, collectorTitleBlock)
def SheetsProcessing(openedDoc):
collectorSheet = EwrQcUtils.SheetsCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'SHEETS', 1, 0, collectorSheet)
def TextsProcessing(openedDoc):
collectorText = EwrQcUtils.TextCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'TEXT STYLE', 1, 0, collectorText)
def PositionProcessing(openedDoc):
collectorPosition = EwrQcUtils.PositionCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'PROJECT INFO', 1, 0, collectorPosition)
def CateinWorksetsProcessing(openedDoc):
collectorCateinWorkset = EwrQcUtils.CateinWorksetCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'CATEGORIES IN WORKSETS', 1, 0, collectorCateinWorkset)
def LevelsProcessing(openedDoc):
collectorLevels = EwrQcUtils.LevelCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'LEVEL', 1, 0, collectorLevels)
def SheetElementsProcessing(openedDoc):
collectorSheetElements = EwrQcUtils.SheetElementCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'SHEET ELEMENT', 1, 0, collectorSheetElements)
def LinesProcessing(openedDoc):
collectorLines = EwrQcUtils.LineCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'LINE', 1, 0, collectorLines)
def FilledRegionsProcessing(openedDoc):
collectorFilledRegion = EwrQcUtils.FilledRegionCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'FILLED REGIONS', 1, 0, collectorFilledRegion)
def AnnotationsProcessing(openedDoc):
collectorAnnotationSymbol = EwrQcUtils.AnnotationSymbolCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'ANNOTATION SYMBOLS', 1, 0, collectorAnnotationSymbol)
def CadImportsProcessing(openedDoc):
collectorCADImports = EwrQcUtils.CadImportsCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'CAD LINKS AND IMPORTS', 1, 0, collectorCADImports)
def WorksetsProcessing(openedDoc):
collectorWorkset = EwrQcUtils.WorksetCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, 'WORKSETS', 1, 0, collectorWorkset)
# Transaction
if len(collectorFiles) > 0:
t = Transaction(doc, 'Check QAQC Elements')
t.Start()
for aDoc in collectorFiles:
openedDoc = OpenFiles(aDoc, application, audit = False)
print(str(openedDoc.Title) + ' Opened')
workshareOp = WorksharingSaveAsOptions()
# Define the name and location of excel file
rawTitle = re.split('detached', openedDoc.Title)[0]
title = rawTitle[0:len(rawTitle) -1]
fileName = destinationFolder +'\\' + title + '.xlsx'
# Define and Open Excel File
excelFile = EwrQcUtils.ExcelOpener(fileName)
# Create a blank intro Sheet
blank =[]
EwrQcUtils.ExcelWriter(excelFile, 'INTRO', 1, 0, blank)
# Checking
threading.Thread(name='DimensionsCheck', target = DimensionProcessing(openedDoc))
threading.Thread(name='SettingsCheck', target=SettingsProcessing(openedDoc))
threading.Thread(name='ViewssCheck', target=ViewsProcessing(openedDoc))
threading.Thread(name='FamiliesCheck', target=FamiliesProcessing(openedDoc))
threading.Thread(name='LinksCheck', target=LinksProcessing(openedDoc))
threading.Thread(name='TitleBlockCheck', target=TitleBlocksProcessing(openedDoc))
threading.Thread(name='SheetsCheck', target=SheetsProcessing(openedDoc))
threading.Thread(name='TextsCheck', target=TextsProcessing(openedDoc))
threading.Thread(name='PositionsCheck', target=PositionProcessing(openedDoc))
threading.Thread(name='CateinWorksetsCheck', target=CateinWorksetsProcessing(openedDoc))
threading.Thread(name='LevelssCheck', target=LevelsProcessing(openedDoc))
threading.Thread(name='SheetElementsCheck', target=SheetElementsProcessing(openedDoc))
threading.Thread(name='LinesCheck', target=LinesProcessing(openedDoc))
threading.Thread(name='FilledRegionsCheck', target=FilledRegionsProcessing(openedDoc))
threading.Thread(name='AnnotationsCheck', target=AnnotationsProcessing(openedDoc))
threading.Thread(name='CadImportCheck', target=CadImportsProcessing(openedDoc))
threading.Thread(name='WorksetsCheck', target=WorksetsProcessing(openedDoc))
# Close Excel and Revit File
excelFile.close()
openedDoc.Close(False)
print('File Saved' + fileName)
# EwrQaUtils.FormattingLine(wb['LINE'])
t.Commit()
else:
forms.alert('No File is selected', title='', sub_msg=None, expanded=None, footer='', ok=True, cancel=False, yes=False,
no=False, retry=False, warn_icon=True, options=None, exitscript=False) |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: tester
# language: python
# name: tester
# ---
# %% [markdown]
# This notebook contains examples for the `SMILESTokenizer` class to illustrate its usage.
# %% [markdown]
# # Imports
# %%
# %load_ext autoreload
# %autoreload 2
import sys
#sys.path.append("..")
import pickle
import matplotlib.pyplot as plt
from pysmilesutils.tokenize import *
from pysmilesutils.analyze import analyze_smiles_tokens
# %% [markdown]
# # Basic Usage
# %% [markdown]
# ## `SMILESTokenizer`
# %% [markdown]
# The easiest way to initialize the `SMILESTokenizer` is to call the constructor with a list of tokens, used when parsing smiles, and a list of SMILES which are used to create the vocabulary.
# %%
smiles = ['ClCCCN1CCCC1', 'CI.Oc1ncccc1Br']
tokenizer = SMILESTokenizer(tokens=["Br", "Cl"], smiles=smiles)
print("Encoded SMILES:", tokenizer(smiles))
print("Vocabulary dictionary:", tokenizer.vocabulary)
# %% [markdown]
# The vocabulary will change based on the SMILES supplied since the tokenizer treats all single characters as tokens, except for the ones supplied in the token list.
#
# It is possible to add more tokens to an already initialized tokenizer, but this will require an update to the vocabulary. Which can be made by supplying a list of SMILES.
# %%
tokenizer.add_tokens(["CCCC", "CCC", "CC"], smiles=smiles)
print("Encoded SMILES:", tokenizer(smiles))
print("Vocabulary dictionary:", tokenizer.vocabulary)
# %% [markdown]
# In this instance we added different number of carbons as separate tokens. Note that the tokenizer prioritizes the first tokens in the list when parsing. Thus, if we hade reversed the list, `["CC", "CCC", "CCCC"]`, the token `"CCCC"` would never have been considered a token, since it is treated as two pairs of `"CC"`.
# %% [markdown]
# Another way to accomplish this is to use regular expressions.
# %%
regex_tokens = ["C+","c+"]
regex_tokenizer = SMILESTokenizer(
tokens=["Br", "Cl"],
regex_token_patterns=regex_tokens,
smiles=smiles
)
print("Encoded SMILES:", regex_tokenizer(smiles))
print("Vocabulary dictionary:", regex_tokenizer.vocabulary)
# %% [markdown]
# Here we have included two regular expression tokens: 1 or more `"C"`, and 1 or more `"c"`. Note that these are present in the vocabulary as tokens, not regular expressions.
# %% [markdown]
# ## `SMILESAtomTokenizer`
# %% [markdown]
# There also exists a pre built extension of the `SMILESTokenizer` called `SMILESAtomTokenizer`. This tokenizer treats all atoms as tokens.
# %%
smiles=['CI.Oc1ncccc1Br', 'COC(=O)Cc1c(C)nn(Cc2ccc(C=O)cc2)c1C.[Mg+]Cc1ccccc1']
atom_tokenizer = SMILESAtomTokenizer(smiles=smiles)
print("Encoded SMILES:", atom_tokenizer(smiles))
print("Vocabulary dictionary:", atom_tokenizer.vocabulary)
# %% [markdown]
# Note that both `Mg` and `Br` are part of the vocabulary.
# %% [markdown]
# # Useful Functions
# %% [markdown]
# ## Tokenizer Vocabulary
# %% [markdown]
# We can also manually update a vocabulary for a tokenizer, for example if we have another set of smiles. This is accomplished using the function `create_vocabulary_from_smiles`.
# %%
smiles = ['ClCCCN1CCCC1', 'CI.Oc1ncccc1Br']
tokenizer = SMILESTokenizer(smiles=smiles)
print(tokenizer.vocabulary)
new_smiles = ['N#Cc1ccnc(CO)c1', 'O=C(CCCl)c1ccccc1']
tokenizer.create_vocabulary_from_smiles(new_smiles)
print(tokenizer.vocabulary)
# %% [markdown]
# Note here that the two vocabularys are different.
# %% [markdown]
# ## Token Statistics
# %% [markdown]
# There also exists a function in `pysmilesutils.analyze` which provides som statistics for tokens in a set of SMILES
# %%
# Load reactant SMILES fro the Pande dataset
with open("data/pande_dataset.pickle", "rb") as f:
ds = pickle.load(f)
reactants = list(ds.reactants)
smiles_tokenizer = SMILESAtomTokenizer(smiles=reactants)
data_stats = analyze_smiles_tokens(smiles_tokenizer, reactants)
print(data_stats.keys())
# %%
num_tokens = data_stats["num_tokens"]
_ = plt.bar(*num_tokens)
# %%
token_freq = data_stats["token_frequency"]
for token, n in zip(*token_freq):
print(f"{token:4}{n}")
# %%
|
import time
import pygame as pg
from Player_Objects import *
from main import *
from Values import *
# Text Back grount image
def text_box_background():
text_box = pg.image.load(os.path.join('Art', 'Text Box.png'))
textbox = pg.Rect((5 * TILESIZE, 12 * TILESIZE, 14 * TILESIZE, 3 * TILESIZE))
text_container = pg.Rect((6 * TILESIZE, 12 * TILESIZE, 12 * TILESIZE, 3 * TILESIZE))
WINDOW.blit(text_box, textbox)
#pg.draw.rect(WINDOW, GREY, text_container)
# Enter box for every box
def End_of_Box(endx, endy, color):
font = pg.font.Font('Early GameBoy.ttf', 7)
text = font.render('Press Return to continue...', False, color)
textRect = text.get_rect()
textRect.center = ( endx, endy)
WINDOW.blit(text, textRect)
def Look_around_more():
font = pg.font.Font('Early GameBoy.ttf', 9)
text = font.render('Maybe you should look around a bit more, its', False, BLACK)
text2 = font.render('been a while since you were awake.', False, BLACK)
textRect = text.get_rect()
textRect2 = text2.get_rect()
textRect.center = (12 * TILESIZE - 6, (13 * TILESIZE) - 8)
textRect2.center = (11 * TILESIZE - 14, (14 * TILESIZE) - 24)
text_box_background()
WINDOW.blit(text, textRect)
WINDOW.blit(text2, textRect2)
End_of_Box(15 * TILESIZE, (14 * TILESIZE) + 10, BLACK)
def Room_Unavailable():
font = pg.font.Font('Early GameBoy.ttf', 9)
text = font.render('the door is locked.', False, BLACK)
textRect = text.get_rect()
textRect.center = (9 * TILESIZE - 12, (13 * TILESIZE) - 8)
text_box_background()
WINDOW.blit(text, textRect)
End_of_Box(15 * TILESIZE, (14 * TILESIZE) + 10, BLACK)
def Title_Box():
title_sequence = pg.image.load(os.path.join('Art', 'Title Sequence.png'))
Titlebox = pg.Rect((6 * TILESIZE, 6 * TILESIZE, 12 * TILESIZE, 8 * TILESIZE))
#pg.draw.rect(WINDOW, GREY, Titlebox)
WINDOW.blit(title_sequence, Titlebox)
End_of_Box(15 * TILESIZE, (14 * TILESIZE) - 16, WHITE)
def photo_of_alice():
PhotoofAlice = pg.image.load(os.path.join('Art', 'Photoofalice.png'))
Photobox = pg.Rect((10 * TILESIZE, 5 * TILESIZE, 4 * TILESIZE, 6 * TILESIZE))
#pg.draw.rect(WINDOW, GREY, Photobox)
WINDOW.blit(PhotoofAlice, Photobox)
#End_of_Box(15 * TILESIZE, (14 * TILESIZE) - 16, WHITE)
def text_box_1():
font = pg.font.Font('Early GameBoy.ttf', 9)
text = font.render('You wake up aboard the S.S. Voyager, the', False, BLACK)
text2 = font.render('largests of the space fleet vessels, WAY', False, BLACK)
text3 = font.render('BOUND TO THE RED STAR ...', False, BLACK)
textRect = text.get_rect()
textRect2 = text2.get_rect()
textRect3 = text3.get_rect()
textRect.center = (12 * TILESIZE -16, (13 * TILESIZE) - 8)
textRect2.center = (12 * TILESIZE - 14, (14 * TILESIZE) - 24)
textRect3.center = (10 * TILESIZE -14, (14 * TILESIZE) - 8)
text_box_background()
WINDOW.blit(text, textRect)
WINDOW.blit(text2, textRect2)
WINDOW.blit(text3, textRect3)
End_of_Box(15 * TILESIZE, (14 * TILESIZE) + 10, BLACK)
def text_box_2():
font = pg.font.Font('Early GameBoy.ttf', 9)
text = font.render('YOU LOOK OUT THE WINDOW INTO A DEEP DARKNESS', False, BLACK)
text2 = font.render('WITH SPECKLES OF LIGHT, THOUSANDS OF STARS', False, BLACK)
text3 = font.render('GLISTEN IN THE DISTANCE!', False, BLACK)
textRect = text.get_rect()
textRect2 = text2.get_rect()
textRect3 = text3.get_rect()
textRect.center = (12 * TILESIZE -10, (13 * TILESIZE) - 8)
textRect2.center = (12 * TILESIZE - 14, (14 * TILESIZE) - 24)
textRect3.center = (10 * TILESIZE -24, (14 * TILESIZE) - 8)
text_box_background()
WINDOW.blit(text, textRect)
WINDOW.blit(text2, textRect2)
WINDOW.blit(text3, textRect3)
End_of_Box(15 * TILESIZE, (14 * TILESIZE) + 10, BLACK)
def text_box_3():
font = pg.font.Font('Early GameBoy.ttf', 9)
text = font.render('A picture of you and Alice ... She sent it just', False, BLACK)
text2 = font.render("before we took off. It's been 10 light years", False, BLACK)
text3 = font.render('since we left Terra Base, I wonder if shes okay...', False, BLACK)
textRect = text.get_rect()
textRect2 = text2.get_rect()
textRect3 = text3.get_rect()
textRect.center = (12 * TILESIZE - 6, (13 * TILESIZE) - 8)
textRect2.center = (12 * TILESIZE - 14, (14 * TILESIZE) - 24)
textRect3.center = (12 * TILESIZE + 8, (14 * TILESIZE) - 8)
text_box_background()
WINDOW.blit(text, textRect)
WINDOW.blit(text2, textRect2)
WINDOW.blit(text3, textRect3)
End_of_Box(15 * TILESIZE, (14 * TILESIZE) + 10, BLACK)
# First Arrival of West Hall
def text_box_4():
font = pg.font.Font('Early GameBoy.ttf', 9)
text = font.render('Ruby: Ren, Quickly!', False, BLACK)
text2 = font.render("something is happening at the Atrium!", False, BLACK)
textRect = text.get_rect()
textRect2 = text2.get_rect()
textRect.center = (9 * TILESIZE - 6, (13 * TILESIZE) - 8)
textRect2.center = (11 * TILESIZE + 2, (14 * TILESIZE) - 24)
text_box_background()
WINDOW.blit(text, textRect)
WINDOW.blit(text2, textRect2)
End_of_Box(15 * TILESIZE, (14 * TILESIZE) + 10, BLACK) |
from dataclasses import dataclass
from inspect import getmodule, getsourcelines, getsource
from operator import attrgetter
from textwrap import indent
from time import process_time
from typing import List
@dataclass
class Measure:
name: str
hertz: float
code: str
highlight: bool = False
def summary(self):
return f'{self.name}: {self.hertz:,.0f}'
def rst_tuple(self):
name = self.name
hz = format(self.hertz, ',.0f')
if self.highlight:
name = '**' + name + '**'
hz = '**' + hz + '**'
return name, hz
class Benchmark:
def __init__(self, name: str):
self.name = name
self.measures: List[Measure] = []
self.min_runs = 100
self.max_time = 1
def measure(self, name: str = ..., source=(...,), highlight=False):
def ret(func):
nonlocal name
if name is ...:
name = func.__name__
code_parts = []
mod_code, _ = getsourcelines(getmodule(func))
for s in source:
if s is ...:
source_lines = getsource(func).splitlines(keepends=True)
code_parts.append(''.join(source_lines[1:]))
elif callable(s):
lines, lnum = getsourcelines(s)
lnum -= 1
last_line = lnum + len(lines) + 1
while lnum > 0:
if not mod_code[lnum - 1].startswith('@'):
break
lnum -= 1
code_parts.append(''.join(mod_code[lnum: last_line]))
elif isinstance(s, int):
code_parts.append(mod_code[s])
else:
start, end = s
code_parts.append(''.join(mod_code[start:end]))
code = "\n\n".join(code_parts)
start_time = process_time()
runs = 0
while True:
func()
runs += 1
elapsed = process_time() - start_time
if elapsed > self.max_time and runs >= self.min_runs:
break
self.measures.append(Measure(name=name, hertz=runs / elapsed, code=code, highlight=highlight))
return func
return ret
def summary(self):
parts = [f'{self.name}:']
parts.extend(('\t' + m.summary()) for m in self.measures)
return '\n'.join(parts)
def rst(self):
ret = [
self.name,
'=' * (len(self.name) + 1)
]
for m in self.measures:
ret.extend((
m.name,
'-' * (len(m.name) + 1),
'.. code-block:: python',
''
))
ret.append(indent(m.code, ' '))
sorted_measures = sorted(self.measures, key=attrgetter('hertz'), reverse=True)
rows = [m.rst_tuple() for m in sorted_measures]
name_len = max(max(len(r[0]) for r in rows), 5)
hz_len = max(max(len(r[0]) for r in rows), 8)
head_row = '=' * name_len + ' ' + '=' * hz_len
ret.extend((
'results:',
'--------',
head_row,
'usage'.ljust(name_len) + ' ' + 'runs/sec'.ljust(hz_len),
head_row,
))
for n, h in rows:
ret.append(n.ljust(name_len) + ' ' + h.ljust(hz_len))
ret.append(head_row)
ret.append('\n')
return '\n'.join(ret)
|
<gh_stars>1-10
import os
import json
import argparse
import numpy as np
from time import time
from flowmatch.utils import load_config
from combine.utils import evaluate, nms_wrapper, plot_pr_curves
from combine.scores import HardAND
from combine.evaluator import Evaluator
COLORS = ['tab:blue', 'tab:red', 'tab:green']
LINESTYLE = ['-', '-', '--']
OUT_DIR = 'plots'
def make_plot(aps, names, recs, title):
plot_pr_curves( aps,
names,
recs,
'plots/',
'{}'.format(title),
COLORS[:len(aps)],
LINESTYLE[:len(aps)])
def evaluate_detector(cfg, cat_ids, dt_json_pth):
with open(dt_json_pth, 'r') as f:
dt_json_data = json.load(f)
det_ap, det_ar, recs = evaluate(os.path.join(cfg.root, cfg.gt_json),
dt_json_data,
False,
cat_ids=cat_ids)
return det_ap, det_ar, recs
def generate_nms_score_files(dt_json_pth, iou_thresh, ratio_thresh, run_dir,
nvp=15):
with open(dt_json_pth, 'r') as f:
dt_json_data = json.load(f)
out_pth = os.path.join(run_dir,
'nms_{:.1f}_{:.1f}.json'.format(iou_thresh, ratio_thresh))
# if not os.path.exists(out_pth):
nms_keep = nms_wrapper(dt_json_data, iou_thresh, ratio_thresh)
keep_ids = set([x['id'] for x in nms_keep])
nms_scores = []
for dt in dt_json_data:
nms_scores.append({'id': dt['id'],
'scores': [1 if dt['id'] in keep_ids else 0] * nvp})
with open(out_pth, 'w') as f:
json.dump(nms_scores, f)
def evaluate_scorers(cfg, cat_ids, dt_pth, scorers, params_list,
names, plot_title):
assert(len(scorers) == len(names))
aps = []
for scorer, params in zip(scorers, params_list):
if params is None:
ap, ar, recs = evaluate_detector(cfg, cat_ids, dt_pth)
else:
evaluator = Evaluator(scorer=scorer,
run_dir=os.path.join(cfg.root,cfg.run_dir),
gt_json_file=os.path.join(cfg.root, cfg.gt_json),
dt_json_file=os.path.join(cfg.root, cfg.dt_json),
use_det_score=True,
cat_ids=cat_ids)
ap, ar, recs = evaluator.get_pr_curve(params)
aps.append(ap)
if len(aps) <= 3:
make_plot(aps, names, recs, plot_title)
print(['{}:({:.3f},{:.3f})'.format(name, np.mean(x), np.max(x)) for x,name in zip(aps, names)])
if __name__== '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='config.yaml', help='config file path')
parser.add_argument('--eval_ds', default='gmu_test', help='dataset to evaluate')
parser.set_defaults()
args = parser.parse_args()
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
eval_ds = args.eval_ds
plot_title = '{}'.format(eval_ds)
cfg = load_config(args.config).combine
cfg[eval_ds].root = cfg.root
dt_pth = os.path.join(cfg[eval_ds].root, cfg[eval_ds].dt_json)
scorers, names, params_list = [], [], []
for name in cfg.scorers.keys():
if cfg.scorers[name].score_names == 'None':
scorer = None
params = None
else:
scorer = HardAND(cfg.scorers[name].score_names)
params = cfg.scorers[name].params
if 'nvps' in cfg.scorers[name]:
scorer.nvps = cfg.scorers[name].nvps
names.append(name)
scorers.append(scorer)
params_list.append(params)
cfg = cfg[eval_ds]
cat_ids = cfg.catIds
evaluate_scorers(cfg, cat_ids, dt_pth, scorers, params_list,
names, plot_title)
|
<filename>kg/triple_extract/triple_extract_rule.py
"""
基于依存句法和语义角色标注的三元组抽取
"""
from pyhanlp import HanLP
import os, re
class TripleExtractor:
def __init__(self):
self.parser = HanlpParser()
'''文章分句处理, 切分长句,冒号,分号,感叹号等做切分标识'''
def split_sents(self, content):
return [sentence for sentence in re.split(r'[??!!。;;::\n\r]', content) if sentence]
'''三元组抽取主函数'''
def ruler2(self, words, postags, child_dict_list, arcs):
svos = []
for index in range(len(postags)):
# 使用依存句法进行抽取
if postags[index]:
# 抽取以谓词为中心的事实三元组
child_dict = child_dict_list[index]
# 主谓宾
if '主谓关系' in child_dict and '动宾关系' in child_dict:
r = words[index]
e1 = self.complete_e(words, postags, child_dict_list, child_dict['主谓关系'][0])
e2 = self.complete_e(words, postags, child_dict_list, child_dict['动宾关系'][0])
svos.append([e1, r, e2])
print("谓语中心",[e1, r, e2])
# 定语后置,动宾关系
relation = arcs[index][0]
head = arcs[index][2]
if relation == '定中关系':
if '动宾关系' in child_dict:
e1 = self.complete_e(words, postags, child_dict_list, head - 1)
r = words[index]
e2 = self.complete_e(words, postags, child_dict_list, child_dict['动宾关系'][0])
temp_string = r + e2
if temp_string == e1[:len(temp_string)]:
e1 = e1[len(temp_string):]
if temp_string not in e1:
svos.append([e1, r, e2])
print("定语后置", [e1, r, e2])
# 含有介宾关系的主谓动补关系
if '主谓关系' in child_dict and '动补结构' in child_dict:
e1 = self.complete_e(words, postags, child_dict_list, child_dict['主谓关系'][0])
cmp_index = child_dict['动补结构'][0]
r = words[index] + words[cmp_index]
if '介宾关系' in child_dict_list[cmp_index]:
e2 = self.complete_e(words, postags, child_dict_list, child_dict_list[cmp_index]['介宾关系'][0])
svos.append([e1, r, e2])
print("介宾", [e1, r, e2])
return svos
'''对找出的主语或者宾语进行扩展'''
def complete_e(self, words, postags, child_dict_list, word_index):
child_dict = child_dict_list[word_index]
prefix = ''
if '定中关系' in child_dict:
for i in range(len(child_dict['定中关系'])):
prefix += self.complete_e(words, postags, child_dict_list, child_dict['定中关系'][i])
postfix = ''
if postags[word_index] == 'v' or postags[word_index] == 'vn':
if '动宾关系' in child_dict:
postfix += self.complete_e(words, postags, child_dict_list, child_dict['动宾关系'][0])
if '主谓关系' in child_dict:
prefix = self.complete_e(words, postags, child_dict_list, child_dict['主谓关系'][0]) + prefix
return prefix + words[word_index] + postfix
'''程序主控函数'''
def triples_main(self, content):
sentences = self.split_sents(content)
svos = []
for sentence in sentences:
words, postags, child_dict_list, arcs = self.parser.parser_main(sentence)
svo = self.ruler2(words, postags, child_dict_list, arcs)
svos += svo
return svos
class HanlpParser:
def __init__(self):
pass
'''句法分析---为句子中的每个词语维护一个保存句法依存儿子节点的字典'''
def build_parse_child_dict(self, arcs):
words, postags = [], []
child_dict_list = []
format_parse_list = []
for index in range(len(arcs)):
words.append(arcs[index].LEMMA)
postags.append(arcs[index].POSTAG)
child_dict = dict()
for arc_index in range(len(arcs)):
if arcs[arc_index].HEAD.ID == index+1: #arcs的索引从1开始
if arcs[arc_index].DEPREL in child_dict:
child_dict[arcs[arc_index].DEPREL].append(arc_index)
else:
child_dict[arcs[arc_index].DEPREL] = []
child_dict[arcs[arc_index].DEPREL].append(arc_index)
child_dict_list.append(child_dict)
rely_id = [arc.HEAD.ID for arc in arcs] # 提取依存父节点id
relation = [arc.DEPREL for arc in arcs] # 提取依存关系
heads = ['Root' if id == 0 else words[id - 1] for id in rely_id] # 匹配依存父节点词语
for i in range(len(words)):
# ['定中关系', '李克强', 0, 'nh', '总理', 1, 'n']
a = [relation[i], words[i], i, postags[i], heads[i], rely_id[i]-1, postags[rely_id[i]-1]]
format_parse_list.append(a)
return words, postags, child_dict_list, format_parse_list
'''parser主函数'''
def parser_main(self, sentence):
arcs = HanLP.parseDependency(sentence).word
words, postags, child_dict_list, format_parse_list = self.build_parse_child_dict(arcs)
return words, postags, child_dict_list, format_parse_list
if __name__ == "__main__":
sentence = "李克强总理今天来我家了,我感到非常荣幸"
sentence2 = "以色列国防军20日对加沙地带实施轰炸,造成3名巴勒斯坦武装人员死亡。此外,巴勒斯坦人与以色列士兵当天在加沙地带与以交界地区发生冲突,一名巴勒斯坦人被打死。当天的冲突还造成210名巴勒斯坦人受伤。当天,数千名巴勒斯坦人在加沙地带边境地区继续“回归大游行”抗议活动。部分示威者燃烧轮胎,并向以军投掷石块、燃烧瓶等,驻守边境的以军士兵向示威人群发射催泪瓦斯并开枪射击。"
# 分词和词性标注
# terms = HanLP.segment(sentence)
# for term in terms:
# print(term.word, term.nature)
# 依存句法分析
ret_dep = HanLP.parseDependency(sentence)
print(ret_dep)
extractor = TripleExtractor()
svos = extractor.triples_main(sentence)
print(svos)
|
# encoding=utf8
# pylint: disable=mixed-indentation, multiple-statements, line-too-long, expression-not-assigned, len-as-condition, no-self-use, unused-argument, no-else-return, old-style-class, dangerous-default-value
from numpy import random as rand, inf, ndarray, asarray, array_equal
from NiaPy.util import Task, OptimizationType
__all__ = ['Algorithm', 'Individual']
class Algorithm:
r"""Class for implementing algorithms.
**Date:** 2018
**Author:** <NAME>
**License:** MIT
"""
Name = ['Algorithm', 'AAA']
def __init__(self, **kwargs):
r"""Initialize algorithm and create name for an algorithm.
**Arguments:**
name {string} -- full name of algorithm
shortName {string} -- short name of algorithm
NP {integer} -- population size
D {integer} -- dimension of the problem
nGEN {integer} -- number of generations/iterations
nFES {integer} -- number of function evaluations
benchmark {object} -- benchmark implementation object
task {Task} -- optimization task to perform
**Raises:**
TypeError -- raised when given benchmark function does not exist
**See**:
Algorithm.setParameters(self, **kwargs)
"""
task, self.Rand = kwargs.pop('task', None), rand.RandomState(kwargs.pop('seed', None))
self.task = task if task is not None else Task(kwargs.pop('D', 10), kwargs.pop('nFES', inf), kwargs.pop('nGEN', inf), kwargs.pop('benchmark', 'ackley'), optType=kwargs.pop('optType', OptimizationType.MINIMIZATION))
self.setParameters(**kwargs)
def setParameters(self, **kwargs):
r"""Set the parameters/arguments of the algorithm.
**Arguments:**
kwargs {dict} -- parameter values dictionary
"""
pass
def setTask(self, task):
r"""Set the benchmark function for the algorithm.
**Arguments**:
bech {Task} -- optimization task to perform
"""
self.task = task
return self
def setBenchmark(self, bech):
r"""Set the benchmark for the algorithm.
**Arguments**:
bech {Task} -- optimization task to perform
**See**:
Algorithm.setTask
"""
return self.setTask(bech)
def rand(self, D=1):
r"""Get random distribution of shape D in range from 0 to 1.
**Arguments:**
D {array} or {int} -- shape of returned random distribution
"""
if isinstance(D, (ndarray, list)): return self.Rand.rand(*D)
elif D > 1: return self.Rand.rand(D)
else: return self.Rand.rand()
def uniform(self, Lower, Upper, D=None):
r"""Get uniform random distribution of shape D in range from "Lower" to "Upper".
**Arguments:**
Lower {array} or {real} or {int} -- lower bound
Upper {array} or {real} or {int} -- upper bound
D {array} or {int} -- shape of returned uniform random distribution
"""
return self.Rand.uniform(Lower, Upper, D) if D is not None else self.Rand.uniform(Lower, Upper)
def normal(self, loc, scale, D=None):
r"""Get normal random distribution of shape D with mean "loc" and standard deviation "scale".
**Arguments:**
loc {} -- mean of the normal random distribution
scale {} -- standard deviation of the normal random distribution
D {array} or {int} -- shape of returned normal random distribution
"""
return self.Rand.normal(loc, scale, D) if D is not None else self.Rand.normal(loc, scale)
def randn(self, D=None):
r"""Get standard normal distribution of shape D.
**Arguments**:
D {array} -- shape of returned standard normal distribution
"""
if D is None: return self.Rand.randn()
elif isinstance(D, int): return self.Rand.randn(D)
return self.Rand.randn(*D)
def randint(self, Nmax, D=1, Nmin=0, skip=[]):
r"""Get discrete uniform (integer) random distribution of D shape in range from "Nmin" to "Nmax".
**Arguments:**
Nmin {integer} -- lower integer bound
Nmax {integer} -- one above upper integer bound
D {array} or {int} -- shape of returned discrete uniform random distribution
skip {array} -- numbers to skip
"""
r = None
if isinstance(D, (list, tuple, ndarray)): r = self.Rand.randint(Nmin, Nmax, D)
elif D > 1: r = self.Rand.randint(Nmin, Nmax, D)
else: r = self.Rand.randint(Nmin, Nmax)
return r if r not in skip else self.randint(Nmax, D, Nmin, skip)
def run(self):
r"""Start the optimization.
**See**:
Algorithm.runTask(self, taks)
"""
return self.runTask(self.task)
def runYield(self, task):
r"""Run the algorithm for a single iteration and return the best solution.
**Arguments:**
task {Task} -- task with bounds and objective function for optimization
Return:
solution {array} -- point of the best solution
fitness {real} -- fitness value of the best solution
"""
yield None, None
def runTask(self, task):
r"""Start the optimization.
**Arguments:**
task {Task} -- task with bounds and objective function for optimization
**Return:**
solution {array} -- point of the best solution
fitness {real} -- fitness value of best solution
"""
return None, None
class Individual:
r"""Class that represents one solution in population of solutions.
**Date:** 2018
**Author:** <NAME>
**License:** MIT
"""
def __init__(self, **kwargs):
task, rnd, x = kwargs.pop('task', None), kwargs.pop('rand', rand), kwargs.pop('x', [])
self.f = task.optType.value * inf if task is not None else inf
if len(x) > 0: self.x = x if isinstance(x, ndarray) else asarray(x)
else: self.generateSolution(task, rnd)
if kwargs.pop('e', True) and task is not None: self.evaluate(task, rnd)
def generateSolution(self, task, rnd=rand):
r"""Generate new solution.
**Arguments:**
task {Task}
e {bool} -- evaluate the solution
rnd {random} -- random numbers generator object
"""
if task is not None: self.x = task.bcLower() + task.bcRange() * rnd.rand(task.dim())
def evaluate(self, task, rnd=rand):
r"""Evaluate the solution.
**Arguments:**
task {Task} -- objective function object
"""
self.repair(task, rnd=rnd)
self.f = task.eval(self.x)
def repair(self, task, rnd=rand):
r"""Repair solution and put the solution in the bounds of problem.
**Arguments:**
task {Task}
"""
self.x = task.repair(self.x, rnd=rnd)
def __eq__(self, other):
r"""Compare the individuals for equalities."""
return array_equal(self.x, other.x) and self.f == other.f
def __str__(self):
r"""Print the individual with the solution and objective value."""
return '%s -> %s' % (self.x, self.f)
def __getitem__(self, i):
r"""Get the value of i-th component of the solution.
**Arguments:**
i {integer} -- position of the solution component
"""
return self.x[i]
def __len__(self):
r"""Get the length of the solution or the number of components."""
return len(self.x)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
|
<reponame>richardsonlima/amonone
import unittest
from nose.tools import eq_
from amonone.web.apps.alerts.models import AlertsModel, AlertGroupsModel
class AlertGroupsModelTest(unittest.TestCase):
def setUp(self):
self.model = AlertGroupsModel()
self.collection = self.model.mongo.get_collection('alert_groups')
self.servers_collection = self.model.mongo.get_collection('servers')
self.history_collection = self.model.mongo.get_collection('alert_groups_history')
self.alerts_collection = self.model.mongo.get_collection('alerts')
def save_test(self):
self.collection.remove()
self.model.save({'name': 'group'})
result = self.collection.find_one()
eq_(result['name'], 'group')
def get_alerts_for_group_test(self):
self.alerts_collection.remove()
self.alerts_collection.insert({'group': 'test', 'rule_type': 'group'})
self.alerts_collection.insert({'group': 'test', 'rule_type': 'group'})
result = self.model.get_alerts_for_group('test')
eq_(len(result), 2)
def save_occurence_test(self):
self.collection.remove()
self.history_collection.remove()
self.model.save({'name': 'group'})
group = self.collection.find_one()
group_id = str(group['_id'])
self.servers_collection.remove()
self.servers_collection.insert({'alert_group': group_id, 'name': 'test'})
server = self.servers_collection.find_one()
rule = {
"metric_value": "0.1",
"metric": "CPU",
"metric_type": "%",
"threshold": "1",
"above_below": "above",
"rule_type": "group",
"group": group_id,
}
self.alerts_collection.remove()
self.alerts_collection.insert(rule)
rule = self.alerts_collection.find_one()
rule_id = str(rule['_id'])
alerts = {'cpu': [{'alert_on': 14, 'rule': rule_id}]}
self.model.save_occurence(alerts, server)
alerts = {'cpu': [{'alert_on': 25, 'rule': rule_id}]}
self.model.save_occurence(alerts, server)
result = self.history_collection.find_one()
eq_(len(result['history']), 2)
eq_(result['server'], server['_id'])
eq_(result['alert'], rule['_id'])
def clear_alert_history_test(self):
self.collection.remove()
self.history_collection.remove()
self.model.save({'name': 'group'})
group = self.collection.find_one()
group_id = str(group['_id'])
self.servers_collection.remove()
self.servers_collection.insert({'alert_group': group_id, 'name': 'test'})
server = self.servers_collection.find_one()
rule = {
"metric_value": "0.1",
"metric": "CPU",
"metric_type": "%",
"threshold": "1",
"above_below": "above",
"rule_type": "group",
"group": group_id,
}
self.alerts_collection.remove()
self.alerts_collection.insert(rule)
rule = self.alerts_collection.find_one()
rule_id = str(rule['_id'])
alerts = {'cpu': [{'alert_on': 14, 'rule': rule_id}]}
self.model.save_occurence(alerts, server)
result = self.history_collection.find_one()
eq_(len(result['history']), 1)
self.model.clear_alert_history(rule['_id'], server['_id'], {})
result = self.history_collection.find_one()
eq_(result['history'], [])
def get_history_test(self):
self.collection.remove()
self.history_collection.remove()
self.model.save({'name': 'group'})
group = self.collection.find_one()
group_id = str(group['_id'])
self.servers_collection.remove()
self.servers_collection.insert({'alert_group': group_id, 'name': 'test'})
server = self.servers_collection.find_one()
rule = {
"metric_value": "0.1",
"metric": "CPU",
"metric_type": "%",
"threshold": "1",
"above_below": "above",
"rule_type": "group",
"group": group_id,
}
self.alerts_collection.remove()
self.alerts_collection.insert(rule)
rule = self.alerts_collection.find_one()
rule_id = str(rule['_id'])
alerts = {'cpu': [{'alert_on': 14, 'rule': rule_id}]}
self.model.save_occurence(alerts, server)
history = self.model.get_history(rule['_id'], server['_id'])
eq_(len(history), 1)
def delete_alerts_for_group_test(self):
self.alerts_collection.remove()
self.collection.remove()
self.history_collection.remove()
self.model.save({'name': 'group'})
group = self.collection.find_one()
group_id = str(group['_id'])
self.servers_collection.remove()
self.servers_collection.insert({'alert_group': group_id, 'name': 'test'})
server = self.servers_collection.find_one()
rule = {
"metric_value": "0.1",
"metric": "CPU",
"metric_type": "%",
"threshold": "1",
"above_below": "above",
"rule_type": "group",
"group": group_id,
}
self.alerts_collection.remove()
self.alerts_collection.insert(rule)
result = self.alerts_collection.count()
eq_(result, 1)
self.model.delete_alerts_for_group(group_id)
result = self.alerts_collection.count()
eq_(result, 0)
class AlertsModelTest(unittest.TestCase):
def setUp(self):
self.model = AlertsModel()
self.collection = self.model.mongo.get_collection('alerts')
self.server_collection = self.model.mongo.get_collection('servers')
def save_alert_test(self):
self.collection.remove()
self.model.save({'rule': "test"})
eq_(self.collection.count(), 1)
def update_test(self):
self.collection.remove()
self.model.save({'rule': "test"})
alert = self.collection.find_one()
alert_id = str(alert['_id'])
self.model.update({'rule': 'updated_test'}, alert_id)
alert = self.collection.find_one()
eq_(alert['rule'], 'updated_test')
def mute_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
alert = self.collection.find_one()
alert_id = str(alert['_id'])
self.model.mute(alert_id)
result = self.collection.find_one()
eq_(result["mute"], True)
self.model.mute(alert_id)
result = self.collection.find_one()
eq_(result["mute"], False)
def get_server_alerts_test(self):
self.collection.remove()
self.server_collection.remove()
self.server_collection.insert({"name" : "test", "key": "test_me"})
server = self.server_collection.find_one()
server_id = str(server['_id'])
rule = { "server": server_id, "rule_type": 'server', 'metric': 2}
self.collection.insert(rule)
rule = { "server": server_id, "rule_type": 'server', 'metric': 1}
self.collection.insert(rule)
rules = self.model.get_alerts_for_server(type='server', server_id=server_id)
eq_(len(rules), 2)
self.collection.remove()
def get_alerts_test(self):
self.collection.remove()
rule = { "server": 'test' , "rule_type": 'bla', 'metric': 2}
self.collection.insert(rule)
rules = self.model.get_all_alerts(type='bla')
eq_(len(rules), 1)
self.collection.remove()
def delete_alerts_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
rule = self.collection.find_one()
self.model.delete(rule['_id'])
result = self.collection.count()
eq_(result,0)
def save_occurence_test(self):
self.collection.remove()
self.collection.insert({"rule_type" : "server",
"metric_type_value" : "%",
"metric_value" : "10", "metric_type" : "more_than", "metric" : "CPU", "threshold": 4})
rule = self.collection.find_one()
rule_id = str(rule['_id'])
self.model.save_occurence({'cpu': [{'alert_on': 11, 'rule': rule_id}]})
rule = self.collection.find_one()
eq_(len(rule['history']), 1)
self.model.save_occurence({'cpu': [{'alert_on': 11, 'rule': rule_id}]})
self.model.save_occurence({'cpu': [{'alert_on': 11, 'rule': rule_id}]})
rule = self.collection.find_one()
eq_(len(rule['history']), 3)
# Test with unicode
self.model.save_occurence({'cpu': [{'alert_on': u'22.0', 'rule': rule_id}]})
rule = self.collection.find_one()
eq_(len(rule['history']), 4)
self.collection.remove()
eq_(rule['history'][3]['trigger'], True)
def get_all_alerts_test(self):
self.collection.remove()
self.collection.insert({"rule_type" : "server"})
self.collection.insert({"rule_type" : "process"})
result = self.model.get_all_alerts()
eq_(len(result), 2)
self.collection.remove()
def delete_server_alerts_test(self):
self.collection.remove()
self.collection.insert({"rule_type" : "process", "server": "test-server"})
self.collection.insert({"rule_type" : "server", "server": "test-server"})
self.collection.insert({"rule_type" : "log", "server": "test-server"})
self.collection.insert({"rule_type" : "dummy", "server": "test-server"})
self.collection.insert({"rule_type" : "dummy", "server": "test-server"})
self.model.delete_server_alerts("test-server")
eq_(self.collection.count(), 3)
self.collection.remove()
def get_by_id_test(self):
self.collection.remove()
self.collection.insert({"rule_type" : "process", "server": "test-server"})
alert = self.collection.find_one()
alert_from_model = self.model.get_by_id(alert['_id'])
eq_(alert, alert_from_model)
def clear_alert_history_test(self):
self.collection.remove()
self.collection.insert({"rule_type" : "server",
"metric_type_value" : "%",
"metric_value" : "10", "metric_type" : "more_than", "metric" : "CPU", "threshold": 4})
rule = self.collection.find_one()
rule_id = str(rule['_id'])
self.model.save_occurence({'cpu': [{'alert_on': 11, 'rule': rule_id}]})
self.model.save_occurence({'cpu': [{'alert_on': 11, 'rule': rule_id}]})
self.model.save_occurence({'cpu': [{'alert_on': 11, 'rule': rule_id}]})
rule = self.collection.find_one()
eq_(len(rule['history']), 3)
self.model.clear_alert_history(rule_id)
rule = self.collection.find_one()
eq_(len(rule['history']), 0)
eq_(rule['last_trigger'], 1) |
<reponame>ZhenghengLi/lcls2
from psdaq.configdb.get_config import get_config
from p4p.client.thread import Context
import json
import time
import pprint
class xpm_link:
def __init__(self,value):
self.value = value
def is_xpm(self):
return (int(self.value)>>24)&0xff == 0xff
def xpm_num(self):
print('xpm_num {:x} {:}'.format(self.value,(int(self.value)>>20)&0xf))
return (int(self.value)>>20)&0xf
class ts_connector:
def __init__(self,json_connect_info):
self.connect_info = json.loads(json_connect_info)
print('*** connect_info')
pp = pprint.PrettyPrinter()
pp.pprint(self.connect_info)
control_info=self.connect_info['body']['control']['0']['control_info']
self.xpm_base = control_info['pv_base']+':XPM:'
master_xpm_num = control_info['xpm_master']
self.master_xpm_pv = self.xpm_base+str(master_xpm_num)+':'
self.ctxt = Context('pva')
self.get_xpm_info()
self.get_readout_group_mask()
# unfortunately, the hsd needs the Rx link reset before the Tx,
# otherwise we get CRC errors on the link.
# try commenting this out since Matt has made the links more reliable
#self.xpm_link_reset('Rx')
#self.xpm_link_reset('Tx')
# must come after clear readout because clear readout increments
# the event counters, and the pgp eb needs them to start from zero
# comment this out since it was moved to control.py
#self.l0_count_reset()
# enables listening to deadtime
self.xpm_link_enable()
self.ctxt.close()
def get_readout_group_mask(self):
self.readout_group_mask = 0
for _,_,readout_group in self.xpm_info:
self.readout_group_mask |= (1<<readout_group)
def get_xpm_info(self):
self.xpm_info = []
# FIXME: cpo/weaver think this doesn't work for digitizers,
# for example, where the DRP node can't learn which XPM port
# is feeding it timing information. Currently think we should
# try to get the information from the XPM side, instead of the
# drp side.
for key,node_info in self.connect_info['body']['drp'].items():
try:
# FIXME: should have a better method to map xpm ip
# address to xpm number (used to create pv names)
xpm_id = int(node_info['connect_info']['xpm_id'])
xpm_port = node_info['connect_info']['xpm_port']
readout_group = node_info['det_info']['readout']
self.xpm_info.append((xpm_id,xpm_port,readout_group))
except KeyError:
pass
def xpm_link_disable(self, pv, groups):
pv_names = []
for xpm_port in range(14):
pv_names.append(pv+'RemoteLinkId' +str(xpm_port))
print('link_ids: {:}'.format(pv_names))
link_ids = self.ctxt.get(pv_names)
pv_names = []
downstream_xpm_names = []
for xpm_port in range(14):
pv_names.append(pv+'LinkGroupMask'+str(xpm_port))
link_masks = self.ctxt.get(pv_names)
for i in range(14):
xlink = xpm_link(link_ids[i])
# this gets run for all xpm's "downstream" of the master xpm
if xlink.is_xpm():
downstream_xpm_names.append(self.xpm_base+str(xlink.xpm_num()))
self.xpm_link_disable(self.xpm_base+str(xlink.xpm_num())+':',groups)
link_masks[i] = 0xff # xpm to xpm links should be enabled for everything
else:
link_masks[i] &= ~groups
self.ctxt.put(pv_names,link_masks)
# this code disables the "master" feature for each of the
# downstream xpm's for the readout groups used by the new xpm master
pv_names_downstream_xpm_master_enable = []
for name in downstream_xpm_names:
for igroup in range(8):
if (1<<igroup)&groups:
pv_names_downstream_xpm_master_enable.append(name+':PART:%d:Master'%igroup)
num_master_disable = len(pv_names_downstream_xpm_master_enable)
if (num_master_disable):
print('*** Disable downstream xpm readout group master:',pv_names_downstream_xpm_master_enable)
self.ctxt.put(pv_names_downstream_xpm_master_enable,[0]*num_master_disable)
def xpm_link_disable_all(self):
# Start from the master and recursively remove the groups from each downstream link
self.xpm_link_disable(self.master_xpm_pv, self.readout_group_mask)
def xpm_link_enable(self):
self.xpm_link_disable_all()
pv_names = []
values = []
for xpm_num,xpm_port,readout_group in self.xpm_info:
pvname = self.xpm_base+str(xpm_num)+':'+'LinkGroupMask'+str(xpm_port)
pv_names.append(pvname)
values.append((1<<readout_group))
print('*** setting xpm link enables',pv_names,values)
self.ctxt.put(pv_names,values)
def xpm_link_reset(self,style):
# make pv name that looks like DAQ:LAB2:XPM:1:RxLinkReset11
# for xpm_num 1 and xpm_port 11
pv_names = []
for xpm_num,xpm_port,_ in self.xpm_info:
pvname = self.xpm_base+str(xpm_num)+':'+style+'LinkReset'+str(xpm_port)
pv_names.append(pvname)
print('*** xpm link resetting',pv_names)
self.ctxt.put(pv_names,len(pv_names)*[1])
# unfortunately need to wait for the links to relock, which
# matt says takes "an appreciable fraction of a second".
# empirically, the links seem unreliable unless we wait 2s.
time.sleep(2)
def l0_count_reset(self):
pvL0Reset = self.master_xpm_pv+'GroupL0Reset'
print('*** resetting l0 count',self.readout_group_mask)
self.ctxt.put(pvL0Reset,self.readout_group_mask)
def ts_connect(json_connect_info):
connector = ts_connector(json_connect_info)
return json.dumps({})
|
<filename>gym_ultrasonic/tests/test_env.py
import math
import random
import unittest
import gym
import numpy as np
from numpy.testing import assert_array_almost_equal
from gym_ultrasonic.envs.obstacle import Obstacle
class TestUltrasonicEnv(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(27)
np.random.seed(27)
def setUp(self):
self.env = gym.make('UltrasonicServo-v0')
self.env.robot.speed = 1
self.env.reset()
# self.env.robot.position = np.divide([self.env.width, self.env.height], 2.)
# self.env.robot.angle = 0
def tearDown(self):
self.env.close()
def test_reset(self):
self.env.reset()
self.assertGreater(self.env.state[0], 0, msg="Robot should not collide")
self.assertFalse(self.env.robot.collision(self.env.obstacles))
def test_step_do_nothing(self):
robot_pos = np.copy(self.env.robot.position)
robot_angle = self.env.robot.angle
speed, angle_turn = 0, 0
_, _, done, _ = self.env.step(action=(speed, angle_turn))
self.assertFalse(done)
assert_array_almost_equal(robot_pos, self.env.robot.position)
self.assertEqual(robot_angle, self.env.robot.angle)
self.assertFalse(self.env.robot.collision(self.env.obstacles))
def test_step_rotate_360(self):
speed = 0
for angle_turn in (180, 360):
with self.subTest(angle_turn=angle_turn):
_, _, done, _ = self.env.step(action=(speed, angle_turn))
self.assertFalse(done)
self.assertFalse(self.env.robot.collision(self.env.obstacles))
def test_step_collide_any(self):
obstacle = self.env.obstacles[0]
vec_to_obstacle = obstacle.position - self.env.robot.position
vx, vy = vec_to_obstacle
angle_target = np.arctan2(vy, vx)
if angle_target < 0:
angle_target += 2 * math.pi
angle_turn = angle_target - self.env.robot.angle
self.env.robot.turn(angle_turn)
dist_to_obstacle = np.linalg.norm(vec_to_obstacle)
wheel_velocity = dist_to_obstacle / self.env.time_step
observation, reward, done, _ = self.env.step(action=(wheel_velocity, wheel_velocity))
self.assertAlmostEqual(observation[0], 0, places=4)
self.assertTrue(reward < 0)
self.assertTrue(done)
def test_step_collide_towards(self):
dist_to_obstacle, _ = self.env.robot.ray_cast(self.env.obstacles)
wheel_velocity = dist_to_obstacle / self.env.time_step
observation, reward, done, _ = self.env.step(action=(wheel_velocity, wheel_velocity))
self.assertAlmostEqual(observation[0], 0, places=4)
self.assertTrue(reward < 0)
self.assertTrue(done)
def test_large_robot(self):
"""
A robot is so large - immediate collision.
"""
self.env.robot.width = self.env.width
_, _, done, _ = self.env.step(action=(0, 0))
self.assertTrue(done)
def test_render(self):
self.env.render()
class TestTrajectory(unittest.TestCase):
def setUp(self):
self.env = gym.make('UltrasonicServo-v0', time_step=1)
self.env.robot.speed = 1
self.env.reset()
self.env.robot.position = np.divide([self.env.width, self.env.height], 2.)
self.env.robot.angle = 0
self.env.env.obstacles = [
Obstacle(self.env.robot.position + [100, -10], width=5, height=5),
Obstacle(self.env.robot.position + [100, -10], width=5, height=5)
]
def test_trajectory_collision(self):
action = (130, 130) # wheels velocity
observation, reward, done, info = self.env.step(action)
robot_collision = self.env.robot.collision(self.env.obstacles)
self.assertFalse(robot_collision)
self.assertTrue(done) # trajectory, not curr pos, collision with obstacles
if __name__ == '__main__':
unittest.main()
|
import tensorflow as tf
import os
import time
import math
import sys
import shutil
import numpy
import keras
import keras.backend as K
import h5py
import logging
import argparse
import random
import inspect
import matplotlib.pyplot as plt
from functools import *
import vtxops
import vtx
parser = argparse.ArgumentParser(description='Training of networks')
parser.add_argument('-i','--input',nargs="+", dest='inputFiles', default=[], action='append',
help='Input files',required=True)
parser.add_argument('-o','--output', dest='outputFolder', type=str,
help='Output directory', required=True)
parser.add_argument('-f','--force', dest='force', default=False, action='store_true',
help='Force override of output directory')
parser.add_argument('-n','--epochs', dest='epochs', default=50, type=int,
help='Number of epochs')
parser.add_argument('-b','--batch', dest='batchSize', default=1000, type=int,
help='Batch size')
parser.add_argument('--full', dest='trainFull',action='store_true', default=False,
help='Train full network (default: only z0)')
parser.add_argument('-t','--testFrac', dest='testFraction', default=0.15, type=float,
help='Test fraction.')
parser.add_argument('-v', dest='logLevel', default='Info', type=str,
help='Verbosity level: Debug, Info, Warning, Error, Critical')
parser.add_argument('--seed', dest='seed', default=int(time.time()), type=int,
help='Random seed')
parser.add_argument('--gpu', dest='gpu', default=False,action="store_true",
help='Force GPU usage')
parser.add_argument('--lr', dest='lr', default=0.01,type=float,
help='Learning rate')
parser.add_argument('--kappa', dest='kappa', default=0.9,type=float,
help='Learning rate decay')
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s', level=getattr(logging, args.logLevel.upper(), None))
logging.info("Python: %s (%s)"%(sys.version_info,sys.executable))
logging.info("Keras: %s (%s)"%(keras.__version__,os.path.dirname(keras.__file__)))
logging.info("TensorFlow: %s (%s)"%(tf.__version__,os.path.dirname(tf.__file__)))
devices = vtx.Devices(requireGPU=args.gpu)
logging.info("Output folder: %s"%args.outputFolder)
logging.info("Learning rate: %.3e"%args.lr)
logging.info("Epochs: %i"%args.epochs)
logging.info("Batch size: %i"%args.batchSize)
logging.info("Test fraction: %.2f"%args.testFraction)
logging.info("Train full network: %s"%args.trainFull)
logging.info("Random seed: %i"%args.seed)
random.seed(args.seed)
numpy.random.seed(args.seed)
tf.set_random_seed(args.seed)
inputFiles = [f for fileList in args.inputFiles for f in fileList]
if len(inputFiles)==0:
logging.critical("No input files specified")
sys.exit(1)
if os.path.exists(args.outputFolder):
if not args.force:
logging.critical("Output folder '%s' already exists. Use --force to override."%(args.outputFolder))
sys.exit(1)
else:
logging.info("Creating output folder: "+args.outputFolder)
os.mkdir(args.outputFolder)
inputFiles = vtx.InputFiles(inputFiles)
pipeline = vtx.Pipeline(inputFiles,testFraction=args.testFraction)
#TODO: make each model part a separate model; build the full model by calling them
from vtx.nn import E2ERef as Network
shutil.copyfile(inspect.getsourcefile(Network),os.path.join(args.outputFolder,"Network.py"))
network = Network(
nbins=256,
ntracks=250,
nfeatures=10,
nweights=1,
nlatent=0,
activation='relu',
regloss=1e-6
)
'''
inputFeatureLayer = keras.layers.Input(shape=(250,10))
weights = keras.layers.Lambda(lambda x: x[:,1:])(inputFeatureLayer)
for _ in range(2):
weights = keras.layers.Dense(20,activation='relu')(weights)
weights = keras.layers.Dropout(0.1)(weights)
weights = keras.layers.Dense(1,activation=None)(weights)
weightModel = keras.models.Model(inputs=[inputFeatureLayer],outputs=[weights])
weightModel.add_loss(tf.reduce_mean(tf.square(weights)))
weightModel.summary()
inputWeightLayer = keras.layers.Input(shape=(250,1))
hists = vtxops.KDELayer()([inputFeatureLayer,inputWeightLayer])
histModel = keras.models.Model(inputs=[inputFeatureLayer,inputWeightLayer],outputs=[hists])
histModel.summary()
positionInput = keras.layers.Input(shape=(256,1))
position = keras.layers.Flatten()(positionInput)
for _ in range(2):
position = keras.layers.Dense(100,activation='relu')(position)
position = keras.layers.Dropout(0.1)(position)
position = keras.layers.Dense(1,activation=None)(position)
positionModel = keras.models.Model(inputs=[positionInput],outputs=[position])
positionModel.summary()
weightResult = weightModel([inputFeatureLayer])
histResult = histModel([inputFeatureLayer,weightResult])
positionResult = positionModel([histResult])
model = keras.models.Model(inputs=[inputFeatureLayer],outputs=[positionResult])
model.summary()
optimizer = keras.optimizers.Adam(lr=0.01)
model.compile(optimizer,loss='mae')
#model = network.makeZ0Model(optimizer)
#sys.exit(1)
'''
learning_rate = args.lr
fhAlgo = vtx.FastHisto()
history = {'lr':[],'trainLoss':[],'testLoss':[]}
for epoch in range(args.epochs):
#distributions = []
#learning_rate = args.lr/(1+args.kappa*epoch)
optimizer = keras.optimizers.Adam(lr=learning_rate)
model = network.createModel()
model.compile(optimizer)
if epoch==0:
model.summary()
if epoch>0:
model.load_weights(os.path.join(args.outputFolder,"weights_%i.hdf5"%(epoch)))
stepTrain = 0
totalLossTrain = 0.
for batch in pipeline.generate(
batchSize=args.batchSize,
nFiles=max(3,len(inputFiles)),
isTraining=True
):
stepTrain+=1
#if stepTrain>10:
# break
#add random shift and flip for extra regularization
randomZ0Shift = numpy.random.uniform(-1,1,size=(batch['X'].shape[0],1,1))
randomZ0Flip = numpy.sign(numpy.random.uniform(-1,1,size=(batch['X'].shape[0],1,1)))
randomZ0ShiftX = numpy.concatenate([
numpy.repeat(randomZ0Shift,batch['X'].shape[1],axis=1),
numpy.zeros((batch['X'].shape[0],batch['X'].shape[1],batch['X'].shape[2]-1))
],axis=2)
randomZ0SFlipX = numpy.concatenate([
numpy.repeat(randomZ0Flip,batch['X'].shape[1],axis=1),
numpy.ones((batch['X'].shape[0],batch['X'].shape[1],batch['X'].shape[2]-1))
],axis=2)
batch['X']+=randomZ0ShiftX
batch['X']*=randomZ0SFlipX
batch['y']+=randomZ0Shift[:,:,0]
batch['y']*=randomZ0Flip[:,:,0]
batch['y_avg']+=randomZ0Shift[:,:,0]
batch['y_avg']*=randomZ0Flip[:,:,0]
'''
for i in range(batch['X'].shape[0]):
for c in range(batch['X'].shape[1]):
if batch['X'][i,c,1] > 500:
for j in range(batch['X'].shape[2]):
batch['X'][i,c,j] = 0.
'''
'''
flatX = numpy.reshape(batch['X'],[-1,batch['X'].shape[2]])
flatX = flatX[flatX[:,1]>0]
distributions.append(flatX)
'''
#print (numpy.amax(numpy.reshape(batch['X'],[-1,batch['X'].shape[2]]),axis=0))
#print (numpy.mean(numpy.reshape(batch['X'],[-1,batch['X'].shape[2]]),axis=0))
lossTrain = model.train_on_batch(batch)
totalLossTrain+=lossTrain
if stepTrain%10==0:
logging.info("Training %i-%i: loss=%.3e"%(
stepTrain,
epoch+1,
lossTrain
))
totalLossTrain = totalLossTrain/stepTrain if stepTrain>0 else 0
logging.info("Done training for %i-%i: lr=%.3e total loss=%.3e"%(
stepTrain,
epoch+1,
learning_rate,
totalLossTrain
))
'''
flatX = numpy.concatenate(distributions,axis=0)
minX = numpy.nanmin(flatX,axis=0)
maxX = numpy.nanmax(flatX,axis=0)
meanX = numpy.mean(flatX,axis=0)
names = ['z0','pt','eta','chi2']#,'bendchi2','nstub']
uselog=[False,True,False,True]#,True,False]
for i in range(flatX.shape[1]):
if i>=len(names):
break
plt.figure(figsize=(9, 3))
if uselog[i]:
plt.hist(flatX[:,i], bins=numpy.logspace(math.log10(minX[i]),math.log10(maxX[i]),100))
plt.xscale('log')
else:
plt.hist(flatX[:,i], bins=100,range=(minX[i],maxX[i]))
plt.yscale('log')
plt.title("Feature: %s"%names[i])
plt.savefig("Feature_%s_ref.png"%names[i])
'''
model.save_weights(os.path.join(args.outputFolder,"weights_%i.hdf5"%(epoch+1)))
stepTest = 0
totalLossTest = 0.
predictedZ0NN = []
predictedZ0FH = []
z0FH = fhAlgo.predictZ0(batch['X'][:,:,0],batch['X'][:,:,1])
trueZ0 = []
for batch in pipeline.generate(
batchSize=args.batchSize,
nFiles=1,
isTraining=False
):
stepTest += 1
lossTest = model.test_on_batch(batch)
totalLossTest+=lossTest
if stepTest%10==0:
logging.info("Testing %i-%i: loss=%.3e"%(
stepTest,
epoch+1,
lossTest
))
z0NN,assoc = model.predict_on_batch(batch)
predictedZ0NN.append(z0NN)
predictedZ0FH.append(fhAlgo.predictZ0(
batch['X'][:,:,0],
batch['X'][:,:,1]
))
trueZ0.append(batch['y_avg'])
predictedZ0NN = numpy.concatenate(predictedZ0NN,axis=0)
predictedZ0FH = numpy.concatenate(predictedZ0FH,axis=0)
trueZ0 = numpy.concatenate(trueZ0,axis=0)
totalLossTest = totalLossTest/stepTest if stepTest>0 else 0
logging.info("Done testing for %i-%i: total loss=%.3e"%(
stepTest,
epoch+1,
totalLossTest
))
history['lr'].append(learning_rate)
history['trainLoss'].append(totalLossTrain)
history['testLoss'].append(totalLossTest)
if len(history["trainLoss"])>5 and numpy.mean(history["trainLoss"][-3:])<totalLossTrain:
learning_rate = 0.92*learning_rate
print ("Q: ",list(map(lambda x: "%6.1f%%"%x,[5.,15.87,50.,84.13,95.])))
print ("NN: ",list(map(lambda x: "%+6.4f"%x,numpy.percentile(predictedZ0NN-trueZ0,[5.,15.87,50.,84.13,95.]))))
print ("FH: ",list(map(lambda x: "%+6.4f"%x,numpy.percentile(predictedZ0FH-trueZ0,[5.,15.87,50.,84.13,95.]))))
|
r"""
Interface to GAP3
This module implements an interface to GAP3.
AUTHORS:
- <NAME> (February 2010)
- <NAME> (March 2016)
.. WARNING::
The experimental package for GAP3 is Jean Michel's pre-packaged GAP3,
which is a minimal GAP3 distribution containing packages that have
no equivalent in GAP4, see :trac:20107 and also
https://webusers.imj-prg.fr/~jean.michel/gap3/
Obtaining GAP3
--------------
Instead of installing the experimental GAP3 package, one can as well install
by hand either of the following two versions of GAP3:
- <NAME> maintains a GAP3 Linux executable, optimized
for i686 and statically linked for jobs of 2 GByte or more:
http://www.math.rwth-aachen.de/~Frank.Luebeck/gap/GAP3
- or you can download GAP3 from the GAP website below. Since GAP3
is no longer supported, it may not be easy to install this version.
http://www.gap-system.org/Gap3/Download3/download.html
Changing which GAP3 is used
---------------------------
.. WARNING::
There is a bug in the pexpect module (see :trac:`8471`) that
prevents the following from working correctly. For now, just make sure
that ``gap3`` is in your ``PATH``.
Sage assumes that GAP3 can be launched with the command ``gap3``; that is,
Sage assumes that the command ``gap3`` is in your ``PATH``. If this is not
the case, then you can start GAP3 using the following command::
sage: gap3 = Gap3(command='/usr/local/bin/gap3') #not tested
Functionality and Examples
--------------------------
The interface to GAP3 offers the following functionality.
#. ``gap3(expr)`` - Evaluation of arbitrary GAP3 expressions, with the
result returned as a Sage object wrapping the corresponding GAP3 element::
sage: a = gap3('3+2') #optional - gap3
sage: a #optional - gap3
5
sage: type(a) #optional - gap3
<class 'sage.interfaces.gap3.GAP3Element'>
::
sage: S5 = gap3('SymmetricGroup(5)') #optional - gap3
sage: S5 #optional - gap3
Group( (1,5), (2,5), (3,5), (4,5) )
sage: type(S5) #optional - gap3
<class 'sage.interfaces.gap3.GAP3Record'>
This provides a Pythonic interface to GAP3. If ``gap_function`` is the
name of a GAP3 function, then the syntax ``gap_element.gap_function()``
returns the ``gap_element`` obtained by evaluating the command
``gap_function(gap_element)`` in GAP3::
sage: S5.Size() #optional - gap3
120
sage: S5.CharTable() #optional - gap3
CharTable( Group( (1,5), (2,5), (3,5), (4,5) ) )
Alternatively, you can instead use the syntax
``gap3.gap_function(gap_element)``::
sage: gap3.DerivedSeries(S5) #optional - gap3
[ Group( (1,5), (2,5), (3,5), (4,5) ),
Subgroup( Group( (1,5), (2,5), (3,5), (4,5) ),
[ (1,2,5), (1,3,5), (1,4,5) ] ) ]
If ``gap_element`` corresponds to a GAP3 record, then
``gap_element.recfield`` provides a means to access the record element
corresponding to the field ``recfield``::
sage: S5.IsRec() #optional - gap3
true
sage: S5.recfields() #optional - gap3
['isDomain', 'isGroup', 'identity', 'generators', 'operations',
'isPermGroup', 'isFinite', '1', '2', '3', '4', 'degree']
sage: S5.identity #optional - gap3
()
sage: S5.degree #optional - gap3
5
sage: S5.1 #optional - gap3
(1,5)
sage: S5.2 #optional - gap3
(2,5)
#. By typing ``%gap3`` or ``gap3.interact()`` at the command-line, you can
interact directly with the underlying GAP3 session.
::
sage: gap3.interact() #not tested
--> Switching to Gap3 <--
gap3:
#. You can start a new GAP3 session as follows::
sage: gap3.console() #not tested
######## Lehrstuhl D fuer Mathematik
### #### RWTH Aachen
## ##
## # ####### #########
## # ## ## # ##
## # # ## # ##
#### ## ## # # ##
##### ### ## ## ## ##
######### # ######### #######
# #
## Version 3 #
### Release 4.4 #
## # 18 Apr 97 #
## #
## # <NAME>, <NAME>, <NAME>
## # <NAME>, <NAME>, <NAME>
## # <NAME>, <NAME>, <NAME>
### ## <NAME>, <NAME>, <NAME>
###### <NAME>, <NAME>, <NAME>
<NAME>, <NAME>, <NAME>
<NAME>
For help enter: ?<return>
gap>
#. The interface also has access to the GAP3 help system::
sage: gap3.help('help', pager=False) #not tested
Help _______________________________________________________...
This section describes together with the following sections the GAP
help system. The help system lets you read the manual interactively...
Common Pitfalls
---------------
#. If you want to pass a string to GAP3, then you need to wrap it in
single quotes as follows::
sage: gap3('"This is a GAP3 string"') #optional - gap3
"This is a GAP3 string"
This is particularly important when a GAP3 package is loaded via the
``RequirePackage`` method (note that one can instead use the
``load_package`` method)::
sage: gap3.RequirePackage('"chevie"') #optional - gap3
Examples
--------
Load a GAP3 package::
sage: gap3.load_package("chevie") #optional - gap3
sage: gap3.version() # random #optional - gap3
'lib: v3r4p4 1997/04/18, src: v3r4p0 1994/07/10, sys: usg gcc ansi'
Working with GAP3 lists. Note that GAP3 lists are 1-indexed::
sage: L = gap3([1,2,3]) #optional - gap3
sage: L[1] #optional - gap3
1
sage: L[2] #optional - gap3
2
sage: 3 in L #optional - gap3
True
sage: 4 in L #optional - gap3
False
sage: m = gap3([[1,2],[3,4]]) #optional - gap3
sage: m[2,1] #optional - gap3
3
sage: [1,2] in m #optional - gap3
True
sage: [3,2] in m #optional - gap3
False
sage: gap3([1,2]) in m #optional - gap3
True
Controlling variable names used by GAP3::
sage: gap3('2', name='x') #optional - gap3
2
sage: gap3('x') #optional - gap3
2
sage: gap3.unbind('x') #optional - gap3
sage: gap3('x') #optional - gap3
Traceback (most recent call last):
...
TypeError: Gap3 produced error output
Error, Variable: 'x' must have a value
...
"""
#*****************************************************************************
# Copyright (C) 2010 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from sage.misc.cachefunc import cached_method
from sage.interfaces.expect import Expect
from sage.interfaces.gap import Gap_generic, GapElement_generic
# gap3_cmd should point to the gap3 executable
gap3_cmd = 'gap3'
class Gap3(Gap_generic):
r"""
A simple Expect interface to GAP3.
EXAMPLES::
sage: from sage.interfaces.gap3 import Gap3
sage: gap3 = Gap3(command='gap3')
TESTS::
sage: gap3(2) == gap3(3) #optional - gap3
False
sage: gap3(2) == gap3(2) #optional - gap3
True
sage: gap3._tab_completion() #optional - gap3
[]
We test the interface behaves correctly after a keyboard interrupt::
sage: gap3(2) #optional - gap3
2
sage: try:
....: gap3._keyboard_interrupt()
....: except KeyboardInterrupt:
....: pass
sage: gap3(2) #optional - gap3
2
We test that the interface busts out of GAP3's break loop correctly::
sage: f = gap3('function(L) return L[0]; end;;') #optional - gap3
sage: f([1,2,3]) #optional - gap3
Traceback (most recent call last):
...
RuntimeError: Gap3 produced error output
Error, List Element: <position> must be a positive integer at
return L[0] ...
AUTHORS:
- <NAME> (Feb 2010)
"""
_identical_function = "IsIdentical"
def __init__(self, command=gap3_cmd):
r"""
Initialize the GAP3 interface and start a session.
INPUT:
- command - string (default "gap3"); points to the gap3
executable on your system; by default, it is assumed the
executable is in your path.
EXAMPLES::
sage: gap3 = Gap3() #optional - gap3
sage: gap3.is_running()
False
sage: gap3._start() #optional - gap3
sage: gap3.is_running() #optional - gap3
True
"""
self.__gap3_command_string = command
# Explanation of additional command-line options passed to gap3:
#
# -p invokes the internal programmatic interace, which is how Sage
# talks to GAP4. This allows reuse some of the GAP4 interface code.
#
# -y -- sets the number of lines of the terminal; controls how many
# lines of text are output by GAP3 before the pager is invoked.
# This option is useful in dealing with the GAP3 help system.
Expect.__init__(self,
name='gap3',
prompt='gap> ',
command=self.__gap3_command_string + " -p -y 500",
server=None,
ulimit=None,
script_subdirectory=None,
restart_on_ctrlc=True,
verbose_start=False,
init_code=[],
max_startup_time=None,
logfile=None,
eval_using_file_cutoff=100,
do_cleaner=True,
remote_cleaner=False,
path=None)
def _start(self):
r"""
EXAMPLES::
sage: gap3 = Gap3() #optional - gap3
sage: gap3.is_running()
False
sage: gap3._start() #optional - gap3
sage: gap3.is_running() #optional - gap3
True
sage: gap3.quit() #optional - gap3
"""
Expect._start(self)
# The -p command-line option to GAP3 produces the following
# funny-looking patterns in the interface. We compile the patterns
# now, and use them later for interpreting interface messages.
self._compiled_full_pattern = self._expect.compile_pattern_list([
'@p\d+\.','@@','@[A-Z]','@[123456!"#$%&][^+]*\+', '@e','@c',
'@f','@h','@i','@m','@n','@r','@s\d','@w.*\+','@x','@z'])
self._compiled_small_pattern = self._expect.compile_pattern_list('@J')
def _object_class(self):
r"""
Return the class used for constructing GAP3 elements.
TESTS::
sage: gap3._object_class()
<class 'sage.interfaces.gap3.GAP3Element'>
"""
return GAP3Element
def _execute_line(self, line, wait_for_prompt=True, expect_eof=False):
r"""
Execute a line of code in GAP3 and parse the output.
TESTS:
Test that syntax errors are handled correctly::
sage: gap3('syntax error', name='x') #optional - gap3
Traceback (most recent call last):
...
TypeError: Gap3 produced error output
Syntax error: ; expected
x:=syntax error;
^
gap>
...
Test that error messages are detected and reported correctly::
sage: gap3.SymmetricGroup(5,3) #optional - gap3
Traceback (most recent call last):
...
RuntimeError: Gap3 produced error output
Error, Record: left operand must be a record at
return arg[1].operations.SymmetricGroup( arg[1], arg[2] ) ... in
SymmetricGroup( ..., ... ) called from
main loop
brk> quit;
...
Test that break loops are detected and exited properly::
sage: f = gap3('function(L) return L[0]; end;;') #optional - gap3
sage: f([1,2,3]) #optional - gap3
Traceback (most recent call last):
...
RuntimeError: Gap3 produced error output
Error, List Element: <position> must be a positive integer at
return L[0] ... in
... called from
main loop
brk> quit;
...
"""
# It seems that GAP3 does not classify syntax errors as regular error
# messages, so the generic GAP interface processing code does not
# detect it. So we test for a syntax error explicitly.
normal_output, error_output = \
super(Gap3, self)._execute_line(line, wait_for_prompt=True, expect_eof=False)
if normal_output.startswith("Syntax error:"):
normal_output, error_output = "", normal_output
return (normal_output, error_output)
def help(self, topic, pager=True):
r"""
Print help on the given topic.
INPUT:
- ``topic`` -- string
EXAMPLES::
sage: gap3.help('help', pager=False) #optional - gap3
Help _______________________________________________________...
<BLANKLINE>
This section describes together with the following sectio...
help system. The help system lets you read the manual inter...
::
sage: gap3.help('SymmetricGroup', pager=False) #optional - gap3
no section with this name was found
TESTS::
sage: m = gap3([[1,2,3],[4,5,6]]); m #optional - gap3
[ [ 1, 2, 3 ], [ 4, 5, 6 ] ]
sage: gap3.help('help', pager=False) #optional - gap3
Help _______________________________________________________...
sage: m #optional - gap3
[ [ 1, 2, 3 ], [ 4, 5, 6 ] ]
sage: m.Print() #optional - gap3
[ [ 1, 2, 3 ], [ 4, 5, 6 ] ]
sage: gap3.help('Group', pager=False) #optional - gap3
Group ______________________________________________________...
sage: m #optional - gap3
[ [ 1, 2, 3 ], [ 4, 5, 6 ] ]
sage: m.Print() #optional - gap3
[ [ 1, 2, 3 ], [ 4, 5, 6 ] ]
"""
import pexpect
if self._expect is None:
self._start()
E = self._expect
helptext = []
# we request the help document
E.sendline("? %s" % topic)
# it seems necessary to skip TWO echoes (as is done in the GAP4 interface)
E.expect("\r\n")
E.expect("\r\n")
# next we process the help document; translating special characters, etc.
while True:
try:
x = E.expect_list(self._compiled_full_pattern, timeout=2)
except pexpect.TIMEOUT:
break
if x == 1:
# matched @@; replace with @
helptext.append('@')
helptext.append(E.before)
elif x == 2:
# matched a special char; convert and insert
helptext.append(chr(ord(E.after[1:2])-ord('A')+1))
helptext.append(E.before)
elif x == 10:
# matched @n (normal input mode); it seems we're done
break
elif x==11:
# matched @r (echoing input); skip to end of line
E.expect_list(self._compiled_small_pattern)
# merge the help text into one string and print it.
helptext = "".join(helptext).strip()
if pager is True:
from sage.misc.pager import pager as pag
pag()(helptext)
else:
print(helptext)
def cputime(self, t=None):
r"""
Returns the amount of CPU time that the GAP session has used in
seconds. If ``t`` is not None, then it returns the difference
between the current CPU time and ``t``.
EXAMPLES::
sage: t = gap3.cputime() #optional - gap3
sage: t #random #optional - gap3
0.02
sage: gap3.SymmetricGroup(5).Size() #optional - gap3
120
sage: gap3.cputime() #random #optional - gap3
0.14999999999999999
sage: gap3.cputime(t) #random #optional - gap3
0.13
"""
if t is not None:
return self.cputime() - t
else:
return eval(self.eval('Runtime();'))/1000.0
def console(self):
r"""
Spawn a new GAP3 command-line session.
EXAMPLES::
sage: gap3.console() #not tested
######## Lehrstuhl D fuer Mathematik
### #### RWTH Aachen
## ##
## # ####### #########
## # ## ## # ##
## # # ## # ##
#### ## ## # # ##
##### ### ## ## ## ##
######### # ######### #######
# #
## Version 3 #
### Release 4.4 #
## # 18 Apr 97 #
## #
## # <NAME>, <NAME>, <NAME>
## # <NAME>, <NAME>, <NAME>
## # <NAME>, <NAME>, <NAME>
### ## <NAME>, <NAME>, <NAME>
###### <NAME>, <NAME>, <NAME>
<NAME>, <NAME>, <NAME>
<NAME>
For help enter: ?<return>
gap>
"""
os.system(self.__gap3_command_string)
def _install_hints(self):
r"""
TESTS::
sage: gap3 = Gap3(command='/wrongpath/gap3')
sage: gap3('3+2')
Traceback (most recent call last):
...
TypeError: unable to start gap3 because the command '/wrongpath/gap3 -p -y 500' failed: The command was not found or was not executable: /wrongpath/gap3.
<BLANKLINE>
Your attempt to start GAP3 failed, either because you do not have
have GAP3 installed, or because it is not configured correctly.
<BLANKLINE>
- If you do not have GAP3 installed, then you must either...
sage: print(gap3._install_hints())
<BLANKLINE>
Your attempt to start GAP3 failed, either because you do not have
have GAP3 installed, or because it is not configured correctly.
<BLANKLINE>
- If you do not have GAP3 installed, then you must either...
"""
return r"""
Your attempt to start GAP3 failed, either because you do not have
have GAP3 installed, or because it is not configured correctly.
- If you do not have GAP3 installed, then you must either install
the optional package, see :trac:20107, or you download and
install it yourself.
Here are two other ways to obtain GAP3:
- <NAME> maintains a GAP3 Linux executable, optimized
for i686 and statically linked for jobs of 2 GByte or more:
http://www.math.rwth-aachen.de/~Frank.Luebeck/gap/GAP3
- Finally, you can download GAP3 from the GAP website below. Since
GAP3 is no longer an officially supported distribution of GAP, it
may not be easy to install this version.
http://www.gap-system.org/Gap3/Download3/download.html
- If you have GAP3 installed, then perhaps it is not configured
correctly. Sage assumes that you can start GAP3 with the command
%s. Alternatively, you can use the following command
to point Sage to the correct command for your system.
gap3 = Gap3(command='/usr/local/bin/gap3')
""" % self.__gap3_command_string
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
Currently this is empty
OUTPUT:
List of strings
EXAMPLES::
sage: gap3._tab_completion()
[]
"""
return []
gap3 = Gap3()
class GAP3Element(GapElement_generic):
r"""
A GAP3 element
.. NOTE::
If the corresponding GAP3 element is a GAP3 record,
then the class is changed to a ``GAP3Record``.
INPUT:
- ``parent`` -- the GAP3 session
- ``value`` -- the GAP3 command as a string
- ``is_name`` -- bool (default: False); if True, then ``value`` is
the variable name for the object
- ``name`` -- str (default: ``None``); the variable name to use for the
object. If ``None``, then a variable name is generated.
.. NOTE::
If you pass ``E``, ``X`` or ``Z`` for ``name``, then an error is
raised because these are sacred variable names in GAP3 that should
never be redefined. Sage raises an error because GAP3 does not!
EXAMPLES::
sage: from sage.interfaces.gap3 import GAP3Element #optional - gap3
sage: gap3 = Gap3() #optional - gap3
sage: GAP3Element(gap3, value='3+2') #optional - gap3
5
sage: GAP3Element(gap3, value='sage0', is_name=True) #optional - gap3
5
TESTS::
sage: GAP3Element(gap3, value='3+2', is_name=False, name='X') #optional - gap3
Traceback (most recent call last):
...
ValueError: you are attempting to redefine X; but you should never redefine E, X or Z in gap3 (because things will break!)
AUTHORS:
- <NAME> (Feb 2010)
"""
def __init__(self, parent, value, is_name=False, name=None):
r"""
See ``GAP3Element`` for full documentation.
EXAMPLES::
sage: from sage.interfaces.gap3 import GAP3Element #optional - gap3
sage: gap3 = Gap3() #optional - gap3
sage: GAP3Element(gap3, value='3+2') #optional - gap3
5
sage: GAP3Element(gap3, value='sage0', is_name=True) #optional - gap3
5
TESTS::
sage: GAP3Element(gap3, value='3+2', is_name=False, name='X') #optional - gap3
Traceback (most recent call last):
...
ValueError: you are attempting to redefine X; but you should never redefine E, X or Z in gap3 (because things will break!)
"""
# Warning: One should not redefine E, X or Z in gap3, because
# things will break, but gap3 raises no errors if one does this!
if name in ["E","X","Z"]:
raise ValueError("you are attempting to redefine %s; but you should never redefine E, X or Z in gap3 (because things will break!)" % name)
# initialize the superclass
super(GAP3Element, self).__init__(parent, value, is_name, name)
# check for a GAP record; if so then change the class
parent._synchronize()
if parent.eval("IsRec(%s)" % self._name) == "true":
self.__class__ = GAP3Record
def __getitem__(self, n):
r"""
EXAMPLES::
sage: l = gap3('[1,2,3]') #optional - gap3
sage: l[1] #optional - gap3
1
sage: a = gap3([1,2,3]) #optional - gap3
sage: a[1] #optional - gap3
1
sage: m = gap3([[1,2,3],[4,5,6],[7,8,9]]) #optional - gap3
sage: m[1,3] #optional - gap3
3
sage: m[2][1] #optional - gap3
4
"""
gap3_session = self._check_valid()
if not isinstance(n, tuple):
return gap3_session.new('%s[%s]'%(self.name(), n))
else:
return gap3_session.new('%s%s'%(self.name(), ''.join(['[%s]'%x for x in n])))
def _latex_(self):
r"""
EXAMPLES::
sage: s = gap("[[1,2], [3/4, 5/6]]")
sage: s._latex_()
'\\left(\\begin{array}{rr} 1&2\\\\ 3/4&\\frac{5}{6}\\\\ \\end{array}\\right)'
sage: latex(s)
\left(\begin{array}{rr} 1&2\\ 3/4&\frac{5}{6}\\ \end{array}\right)
"""
gap3_session = self._check_valid()
try:
s = gap3_session.eval('FormatLaTeX(%s)'%self.name())
s = s.replace('\\\\','\\').replace('"','')
s = s.replace('%\\n',' ')
return s
except RuntimeError:
return str(self)
class GAP3Record(GAP3Element):
r"""
A GAP3 record
.. NOTE::
This class should not be called directly, use GAP3Element instead.
If the corresponding GAP3 element is a GAP3 record, then the class
is changed to a ``GAP3Record``.
AUTHORS:
- <NAME> (Feb 2010)
"""
def recfields(self):
r"""
Return a list of the fields for the record. (Record fields are akin
to object attributes in Sage.)
OUTPUT:
- list of strings - the field records
EXAMPLES::
sage: S5 = gap3.SymmetricGroup(5) #optional - gap3
sage: S5.recfields() #optional - gap3
['isDomain', 'isGroup', 'identity', 'generators',
'operations', 'isPermGroup', 'isFinite', '1', '2',
'3', '4', 'degree']
sage: S5.degree #optional - gap3
5
"""
gap3_session = self._check_valid()
if not hasattr(self, "_gap_recfields"):
s = str(gap3_session.eval("RecFields(%s)" % self._name))
s = s.strip('[] ').replace('\n','')
self._gap_recfields = [ss.strip('" ') for ss in s.split(',')]
return getattr(self,"_gap_recfields")
def operations(self):
r"""
Return a list of the GAP3 operations for the record.
OUTPUT:
- list of strings - operations of the record
EXAMPLES::
sage: S5 = gap3.SymmetricGroup(5) #optional - gap3
sage: S5.operations() #optional - gap3
[..., 'NormalClosure', 'NormalIntersection', 'Normalizer',
'NumberConjugacyClasses', 'PCore', 'Radical', 'SylowSubgroup',
'TrivialSubgroup', 'FusionConjugacyClasses', 'DerivedSeries', ...]
sage: S5.DerivedSeries() #optional - gap3
[ Group( (1,5), (2,5), (3,5), (4,5) ),
Subgroup( Group( (1,5), (2,5), (3,5), (4,5) ),
[ (1,2,5), (1,3,5), (1,4,5) ] ) ]
"""
gap3_session = self._check_valid()
if not hasattr(self,"_gap_operations"):
s = str(gap3_session.eval("RecFields(%s.operations)" % self._name))
s = s.strip('[] ').replace('\n','')
self._gap_operations = [ss.strip('" ') for ss in s.split(',')]
return getattr(self,"_gap_operations")
def __getattr__(self, attrname):
r"""
OUTPUT:
- ``GAP3Record`` -- if ``attrname`` is a field of the GAP record
- ``ExpectFunction`` -- if ``attrname`` is the name of a GAP3 function
EXAMPLES::
sage: S5 = gap3.SymmetricGroup(5) #optional - gap3
sage: S5.__getattr__('Size') #optional - gap3
Size
sage: gap3.IsFunc(S5.__getattr__('Size')) #optional - gap3
true
sage: S5.__getattr__('generators') #optional - gap3
[ (1,5), (2,5), (3,5), (4,5) ]
"""
gap3_session = self._check_valid()
if attrname[:1] == "_":
raise AttributeError
if attrname in self.recfields():
return gap3_session.new('%s.%s' % (self.name(), attrname))
return gap3_session._function_element_class()(self, attrname)
def _tab_completion(self):
r"""
Defines the list of methods and attributes that will appear for tab
completion.
OUTPUT:
- list of strings -- the available fields and operations of the
record
EXAMPLES::
sage: S5 = gap3.SymmetricGroup(5) #optional - gap3
sage: S5._tab_completion() #optional - gap3
[..., 'ConjugacyClassesTry', 'ConjugateSubgroup', 'ConjugateSubgroups',
'Core', 'DegreeOperation', 'DerivedSeries', 'DerivedSubgroup',
'Difference', 'DimensionsLoewyFactors', 'DirectProduct', ...]
"""
names = self.recfields() + self.operations()
names.sort()
return names
import os
def gap3_console():
r"""
Spawn a new GAP3 command-line session.
EXAMPLES::
sage: gap3.console() #not tested
######## Lehrstuhl D fuer Mathematik
### #### RWTH Aachen
## ##
## # ####### #########
## # ## ## # ##
## # # ## # ##
#### ## ## # # ##
##### ### ## ## ## ##
######### # ######### #######
# #
## Version 3 #
### Release 4.4 #
## # 18 Apr 97 #
## #
## # <NAME>, <NAME>, <NAME>
## # <NAME>, <NAME>, <NAME>
## # <NAME>, <NAME>, <NAME>
### ## <NAME>, <NAME>, <NAME>
###### <NAME>, <NAME>, <NAME>
<NAME>, <NAME>, <NAME>
<NAME>
For help enter: ?<return>
gap>
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%gap3 magics instead.')
os.system(gap3_cmd)
def gap3_version():
r"""
Return the version of GAP3 that you have in your PATH on your computer.
EXAMPLES::
sage: gap3_version() # random, optional - gap3
'lib: v3r4p4 1997/04/18, src: v3r4p0 1994/07/10, sys: usg gcc ansi'
"""
return gap3.eval('VERSION')[1:-1]
|
<gh_stars>1-10
import datetime, yaml, re
import urllib, hashlib
from .base import BaseModule
from md2book.config import *
from md2book.templates import TemplateFiller
from md2book.formats.mdhtml import extract_toc
from md2book.util.exceptions import SimpleWarning
from md2book.util.common import download_url
class MetadataModule(BaseModule):
NAME = 'metadata'
def __init__(self, conf, target):
super().__init__(conf, target)
meta = self.conf
meta['title'] = target.conf['title'].strip()
if target.conf['subtitle']:
meta['subtitle'] = target.conf['subtitle'].strip()
if meta.get('author', None) is None and target.conf['by']:
meta['author'] = target.conf['by']
if meta.get('date', None) == 'current':
meta['date'] = datetime.now().strftime("%d/%m/%Y")
def get_yaml_intro(self):
metadata = {key : val for key, val in self.conf.items() if val is not None}
content = "---\n" + yaml.dump(metadata) + "\n---\n"
content = content.replace("_", "\\_")
return content
class ImagesModule(BaseModule):
NAME = 'images'
REGEX_RM_IMG = r"<img.*?>|!\[.*?\]\(.*?\)"
RE_IMG_HTML = r'<img(.*?)/?>'
RE_EMPTY_ALT = r'alt=([\'"])\1'
def __init__(self, conf, target):
super().__init__(conf, target)
conf['remove'] = bool(conf['remove'])
if conf['cover']:
conf['cover'] = str((target.path.parent / conf['cover']).resolve())
target.conf['metadata']['cover-image'] = conf['cover']
def replace_html_images(self, match):
code = match.group(1)
if not " alt=" in code:
code = code + ' alt="Image" '
return '<img{}/>'.format(code)
def alter_md(self, code):
if self.conf['remove']:
code.code = re.sub(self.REGEX_RM_IMG, '', code.code)
def alter_html(self, code):
code.code = re.sub(self.RE_IMG_HTML, self.replace_html_images, code.code)
code.code = re.sub(self.RE_EMPTY_ALT, 'alt="Image"', code.code)
class TitlePageModule(BaseModule):
NAME = 'titlepage'
def alter_html(self, code):
if self.format != 'epub':
with open(TITLE_PAGE_TEMPLATE, 'r') as f:
titlepage = f.read()
titlepage = TemplateFiller(self.target).fill(titlepage)
code.code = titlepage + code.code
class TocModule(BaseModule):
NAME = 'toc'
def pandoc_options(self, dest_format):
options = super().pandoc_options(dest_format)
chapter_level = 1
if self.conf['enable']:
chapter_level = min(3, max(1, int(self.conf['enable'])))
options.append("--toc")
options.append('--toc-depth=' + str(self.conf['level']))
if dest_format == 'epub':
options.append("--epub-chapter-level=" + str(chapter_level))
return options
def alter_md(self, code):
if self.format in ['html', 'pdf', 'markdown', 'txt'] and self.conf['enable']:
code.code = '[TOC]\n\n' + code.code
if self.format in ['epub'] and '[TOC]' in code.code:
toc_html = extract_toc(code.code, self.conf['level'], self.format)
code.code = code.code.replace('[TOC]', toc_html)
if self.format in ['odt', 'epub', 'docx', 'html_light']:
if self.conf['enable'] is None:
self.conf['enable'] = '[TOC]' in code.code
code.code = code.code.replace('[TOC]', '')
def get_stylesheets(self):
styles = []
style = self.conf['style'].strip()
if style:
styles.append(DATA_PATH / 'styles/toc/base.css')
if style != 'base':
styles.append(DATA_PATH / 'styles/toc/{}.css'.format(style))
return styles
class HtmlBlocksModule(BaseModule):
RE_BR = r'<br(.*?)>'
REGEX_COMMENTS = r"<!--([\s\S]*?)-->"
RE_SVG_IMAGE = r'(<img .*?src="(.*?\.svg)".*?>)'
def __init__(self, conf, target):
super().__init__(conf, target)
self.alter_svg = target.format in ['html', 'pdf']
def alter_html(self, code):
code.code = re.sub(self.REGEX_COMMENTS, '', code.code)
code.code = re.sub(self.RE_BR, '<br />', code.code)
if self.alter_svg:
code.code = re.sub(self.RE_SVG_IMAGE, self.svg_inserter, code.code)
def svg_inserter(self, match):
path = match.group(2)
if path.startswith('http://') or path.startswith('https://'):
return math.group(1)
with open(str(path)) as f:
xml = f.readlines()[2:]
xml = ''.join(xml)
return xml
class LatexModule(BaseModule):
NAME = 'latex'
TEX_BLOCKS = r'\$\$([\s\S]*?)\$\$'
TEX_INLINE = r'\$(.*?)\$'
SVG_SIZE= r'width="(.*?)" height="(.*?)"'
# XML_PROP_RE = r'[\s\S]*?<svg.*?width="(.*?)".*?height="(.*?)".*?role="img"[\s\S]*?'
# IMAGE_TEMPLATE = ''
IMAGE_INLINE = '<img src="{}" style="width:{}; height: {};" />'
IMAGE_BLOCK = '<p class="centerblock"><img src="{}" style="width:{}; height: {};" /></p>'
ALIASES = {
r'\\R' : r'\\mathbb{R}',
r'\\infin' : r'\\infty',
}
def __init__(self, conf, target):
super().__init__(conf, target)
self.enabled = bool(self.conf)
self.download_status = None
self.equations_names = set()
self.relative_path = target.format in ['md']
if self.enabled:
self.download_dir = target.compile_dir / 'latex'
self.download_dir.mkdir(parents=True, exist_ok=True)
def get_latex_image_code(self, path, inline=True):
with open(str(path)) as f:
xml = f.readlines()
xml = ''.join(xml)
sizes = re.search(self.SVG_SIZE, xml)
template = self.IMAGE_INLINE if inline else self.IMAGE_BLOCK
path = ('latex/' + path.name) if self.relative_path else str(path)
return template.format(path, sizes.group(1), sizes.group(2))
def clean_cache(self):
for file in self.download_dir.iterdir():
if not file.name in self.equations_names:
file.unlink()
def preprocess_text(self, texcode):
a = texcode
for alias in self.ALIASES:
texcode = re.sub(alias + r'(?=([^a-zA-Z\d]|$))', self.ALIASES[alias], texcode)
return texcode
def get_inserter(self, argname):
def match_latex(match):
if self.download_status == 'offline':
return ''
texcode = match.group(1).strip()
texcode = self.preprocess_text(texcode)
args = urllib.parse.urlencode({'color' : 'black', argname : texcode})
url = 'https://math.vercel.app?' + args
filename = hashlib.md5(url.encode('utf-8')).hexdigest()
path = self.download_dir / (filename + '.svg')
if not path.resolve().is_file():
if self.download_status is None:
print("Download LaTeX images from math.now.sh...")
result_path = download_url(url, path=str(path))
if result_path is None:
self.download_status = 'offline'
return ''
else:
self.download_status = 'ok'
# path = path.relative_to(self.target.compile_dir)
path = path.resolve()
self.equations_names.add(path.name)
return self.get_latex_image_code(path, inline=(argname == 'inline'))
return match_latex
def alter_md(self, code):
if self.enabled:
code.code = re.sub(self.TEX_BLOCKS, self.get_inserter('from'), code.code)
code.code = re.sub(self.TEX_INLINE, self.get_inserter('inline'), code.code)
if self.download_status == 'offline':
SimpleWarning('The math.now.sh API is not accessible, LaTeX may not be rendered').show()
elif self.download_status == 'ok':
print("LaTeX download complete")
if self.download_status != 'offline':
self.clean_cache() |
<gh_stars>0
# -*- coding: utf-8 -*
# Copyright (c) 2019 BuildGroup Data Services Inc.
# All rights reserved.
import json
import datetime
import inspect
from django.utils import six, timezone
from django.utils.timezone import utc
try:
from dse.cqlengine.usertype import UserType
except ImportError:
from cassandra.cqlengine.usertype import UserType
from django.contrib.gis.measure import Distance
from rest_framework import fields, serializers, ISO_8601
from rest_framework.settings import api_settings
class CurrentUserNameDefault(object):
def set_context(self, serializer_field):
self.user = serializer_field.context["request"].user
def __call__(self):
return self.user.username if self.user else None
def __repr__(self):
return repr("%s()" % self.__class__.__name__)
class CassandraDateTimeField(fields.DateTimeField):
"""
Timestamps in Cassandra are timezone-naive timestamps
encoded as milliseconds since UNIX epoch
ref: https://datastax.github.io/python-driver/dates_and_times.html
"""
def enforce_timezone(self, value):
if timezone.is_aware(value):
return timezone.make_naive(value, utc)
else:
return value
class CassandraDateField(fields.DateField):
"""
Date object in Cassandra do not have isoformat method
we need to override the to_representation method to extract the
date from the cassandra Date first
"""
def to_representation(self, value):
if not value:
return None
output_format = getattr(self, "format", api_settings.DATE_FORMAT)
if output_format is None or isinstance(value, six.string_types):
return value
# Applying a `DateField` to a datetime value is almost always
# not a sensible thing to do, as it means naively dropping
# any explicit or implicit timezone info.
assert not isinstance(value, datetime.datetime), (
"Expected a `date`, but got a `datetime`. Refusing to coerce, "
"as this may mean losing timezone information. Use a custom "
"read-only field and deal with timezone issues explicitly."
)
# We are using Cassandra Model serializers with non Cassandra
# objects when doing Solr Searches.
# The results or Solr searches are SearchResult objects
# and the dates fields in this case are objects of
# type datetime.date
if output_format.lower() == ISO_8601:
if isinstance(value, datetime.date):
return value.isoformat()
return value.date().isoformat()
return (
value.strftime(output_format) if isinstance(value, datetime.date) else value.date().strftime(output_format)
)
class CassandraJSONFieldAsText(fields.JSONField):
"""
Cassandra do not have support for JSON fields, we need to manage as
text fields but convert the values to dict objects when serializing them
through the API.
"""
def to_internal_value(self, data):
try:
if self.binary or getattr(data, "is_json_string", False):
data = json.dumps(json.loads(data))
else:
data = json.dumps(data)
except (TypeError, ValueError):
self.fail("invalid")
return data
def to_representation(self, value):
value = value.strip()
if not value:
return {}
return json.loads(value)
class CassandraUDTField(fields.JSONField):
"""
Cassandra do not have support for JSON fields, we need to manage as
text fields but convert the values to dict objects when serializing them
through the API.
"""
udt = None
def __init__(self, *args, **kwargs):
self.udt = kwargs.pop("udt", self.udt)
assert inspect.isclass(self.udt), "`udt` has been instantiated."
assert issubclass(self.udt, UserType), (
"The `child` argument must be an instance of `CharField`, "
"as the hstore extension stores values as strings."
)
super(fields.JSONField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
try:
if self.binary or getattr(data, "is_json_string", False):
data = self.udt(**json.loads(data))
else:
data = self.udt(**data)
except (TypeError, ValueError):
self.fail("invalid")
return data
def to_representation(self, value):
return json.loads(value)
class DistanceField(fields.FloatField):
"""
When we use Spatial queries, a Distance object is generated for the
distance field.
"""
udt = None
def __init__(self, *args, **kwargs):
self.units = kwargs.pop("units", "m")
assert isinstance(self.units, str), "The `units` argument must be an instance of `str`."
assert self.units in Distance.UNITS.keys(), "`{}` invalid units.".format(self.units)
super(fields.FloatField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
try:
params = {"{}".format(self.units): data}
data = Distance(**params)
except (TypeError, ValueError):
self.fail("invalid")
return data
def to_representation(self, value):
return getattr(value, self.units)
|
# -*- encoding: utf-8 -*-
'''
Model Managers RexChain
BlockManager
RXmanager
TXmanager
'''
import json
import logging
from datetime import timedelta
from django.db import models
from django.apps import apps
from django.conf import settings
from django.utils import timezone
from django.core.cache import cache
from core.utils import Hashcash
from core.helpers import safe_set_cache, get_timestamp, logger_debug
from core.connectors import ReachCore as PoE
from nom151.models import ConservationCertificate
from .helpers import genesis_hash_generator, GENESIS_INIT_DATA, get_genesis_merkle_root, CryptoTools
from .utils import calculate_hash, pubkey_base64_to_rsa, ordered_data, iterate_and_order_json
from .querysets import (
PayloadQueryset,
TransactionQueryset,
AddressQueryset,
)
from .RSAaddresses import AddressBitcoin
logger = logging.getLogger('django_info')
class BlockManager(models.Manager):
''' Model Manager for Blocks '''
def create_block(self, tx_queryset, hashcash, counter):
# Do initial block or create next block
Block = apps.get_model('blockchain', 'Block')
last_block = Block.objects.last()
if last_block is None:
genesis = self.get_genesis_block()
return self.generate_next_block(genesis.hash_block, tx_queryset, hashcash, counter)
else:
return self.generate_next_block(last_block.hash_block, tx_queryset, hashcash, counter)
def get_genesis_block(self):
# Get the genesis arbitrary block of the blockchain only once in life
Block = apps.get_model('blockchain', 'Block')
genesis_block = Block.objects.create(
hash_block=genesis_hash_generator(),
data=GENESIS_INIT_DATA,
merkleroot=get_genesis_merkle_root())
genesis_block.previous_hash = "0"
genesis_block.save()
return genesis_block
def generate_next_block(self, hash_before, tx_queryset, hashcash, nonce):
""" Generete a new block """
new_block = self.create(previous_hash=hash_before)
new_block.save()
data_block = new_block.get_block_data(tx_queryset)
new_block.hash_block = calculate_hash(new_block.id, hash_before,
str(new_block.timestamp), data_block["sum_hashes"])
# Add Merkle Root
new_block.merkleroot = data_block["merkleroot"]
# Add hashcash
new_block.hashcash = hashcash
# Add nonce
new_block.nonce = nonce
# Proof of Existennce layer
connector = PoE()
xml_response = connector.generate_proof(new_block.merkleroot)
new_block.data["xml_response"] = xml_response
# Save
new_block.save()
# Save response on a new table too
certificate = ConservationCertificate(
folio=xml_response["Folio"],
raw_document=xml_response["Constancia"],
reference=new_block.merkleroot
)
certificate.block = new_block
certificate.data["xml_response"] = xml_response
certificate.save()
return new_block
class TransactionManager(models.Manager):
''' Manager for Payloads '''
_crypto = CryptoTools(has_legacy_keys=False)
def get_queryset(self):
return TransactionQueryset(self.model, using=self._db)
def has_not_block(self):
return self.get_queryset().has_not_block()
def create_block_attempt(self, counter, challenge):
'''
Use PoW hashcash algoritm to attempt to create a block
'''
Block = apps.get_model('blockchain', 'Block')
_hashcash_tools = Hashcash(debug=settings.DEBUG)
is_valid_hashcash, hashcash_string = _hashcash_tools.calculate_sha(challenge, counter)
if is_valid_hashcash:
Block.objects.create_block(self.has_not_block(), hashcash_string, counter)
challenge = _hashcash_tools.create_challenge(word_initial=settings.HC_WORD_INITIAL)
safe_set_cache('challenge', challenge)
safe_set_cache('counter', 0)
else:
counter = counter + 1
safe_set_cache('counter', counter)
def is_transfer_valid(self, data, _previous_hash, pub_key, _signature):
''' Method to handle transfer validity!'''
Payload = apps.get_model('blockchain', 'Payload')
if not Payload.objects.check_existence(data['previous_hash']):
logger.info("[IS_TRANSFER_VALID] Send a transfer with a wrong reference previous_hash!")
return (False, None)
before_rx = Payload.objects.get(hash_id=data['previous_hash'])
if not before_rx.readable:
logger.info("[IS_TRANSFER_VALID]The before_rx is not readable")
return (False, before_rx)
# TODO add verify method completed on transfer also check wallet compatibility!
try:
json.dumps(data, separators=(',', ':'), ensure_ascii=False)
except Exception as e:
logger.error("[ERROR in reading data] {}, Type {}".format(e, type(e)))
# if not self._crypto.verify(_msg, _signature, self._crypto.un_savify_key(before_rx.public_key)):
# logger.info("[IS_TRANSFER_VALID]Signature is not valid!")
# return (False, before_rx)
logger.info("[IS_TRANSFER_VALID] Success")
return (True, before_rx)
def create_tx(self, data, **kwargs):
''' Custom method for create Tx with rx item '''
# Logic to obtains the counter and challenge variables from Redis
_hashcash_tools = Hashcash(debug=settings.DEBUG)
counter = cache.get('counter')
challenge = cache.get('challenge')
if not counter and not challenge:
challenge = _hashcash_tools.create_challenge(word_initial=settings.HC_WORD_INITIAL)
safe_set_cache('challenge', challenge)
safe_set_cache('counter', 0)
''' Get initial data '''
_payload = ""
_signature = data.pop("signature", None)
_previous_hash = data.pop("previous_hash", "0")
# Get Public Key from API None per default
raw_pub_key = data.get("public_key", None)
if not raw_pub_key:
logger.error("[get public key ERROR]: Couldn't find public key outside data")
data = data["data"]
''' When timestamp is convert to python datetime needs this patch '''
# timestamp = data["timestamp"]
# timestamp.replace(tzinfo=timezone.utc)
# data["timestamp"] = timestamp.isoformat()
# Initalize some data
try:
# First we order the sub lists and sub jsons
data = iterate_and_order_json(data)
# Then we order the json
data_sorted = ordered_data(data)
_payload = json.dumps(data_sorted, separators=(',', ':'), ensure_ascii=False)
except Exception as e:
logger.error("[create_tx ERROR]: {}, type:{}".format(e, type(e)))
_is_valid_tx = False
_rx_before = None
try:
# Prescript unsavify method
pub_key = self._crypto.un_savify_key(raw_pub_key)
except Exception:
logger.error("[create_tx WARNING]: The key is base64")
# Attempt to create public key with base64 with js payload
pub_key, raw_pub_key = pubkey_base64_to_rsa(raw_pub_key)
hex_raw_pub_key = self._crypto.savify_key(pub_key)
''' Get previous hash '''
# _previous_hash = data.get('previous_hash', '0')
logger.info("previous_hash: {}".format(_previous_hash))
''' Check initial or transfer '''
if _previous_hash == '0':
# It's a initial transaction
logger_debug(_payload)
if self._crypto.verify(_payload, _signature, pub_key):
logger.info("[create_tx] Tx valid!")
_is_valid_tx = True
else:
# Its a transfer, so check validite transaction
data["previous_hash"] = _previous_hash
_is_valid_tx, _rx_before = self.is_transfer_valid(data, _previous_hash, pub_key, _signature)
''' FIRST Create the Transaction '''
tx = self.create_raw_tx(data, _is_valid_tx=_is_valid_tx, _signature=_signature, pub_key=pub_key)
''' THEN Create the Data Item(Payload) '''
Payload = apps.get_model('blockchain', 'Payload')
rx = Payload.objects.create_rx(
data,
_signature=_signature,
pub_key=hex_raw_pub_key, # This is basically the address
_is_valid_tx=_is_valid_tx,
_rx_before=_rx_before,
transaction=tx,
)
''' LAST do create block attempt '''
self.create_block_attempt(counter, challenge)
# Return the rx for transaction object
return rx
def create_raw_tx(self, data, **kwargs):
''' This method just create the transaction instance '''
''' START TX creation '''
Transaction = apps.get_model('blockchain', 'Transaction')
tx = Transaction()
# Get Public Key from API
tx.signature = kwargs.get("_signature", None)
tx.is_valid = kwargs.get("_is_valid_tx", False)
tx.timestamp = timezone.now()
# Set previous hash
if self.last() is None:
tx.previous_hash = "0"
else:
tx.previous_hash = self.last().txid
# Create raw data to generate hash and save it
tx.create_raw_msg()
tx.hash()
tx.save()
''' RETURN TX '''
return tx
class PayloadManager(models.Manager):
''' Manager for Payload Model '''
def get_queryset(self):
return PayloadQueryset(self.model, using=self._db)
def range_by_hour(self, date_filter):
return self.get_queryset().range_by_hour(date_filter)
def non_validated_rxs(self):
return self.get_queryset().non_validated_rxs()
def total_medics(self):
return self.get_queryset().total_medics()
def rx_by_today(self, date_filter):
return self.get_queryset().rx_by_today(date_filter)
def rx_by_month(self, date_filter):
return self.get_queryset().rx_by_month(date_filter)
def get_stats_last_hours(self, hours=10):
''' Return a list of last rx created by given last hours '''
RANGE_HOUR = 1
_list = []
_time = timezone.now()
_list.append([get_timestamp(_time), self.range_by_hour(_time).count()])
for i in range(0, hours):
_time = _time - timedelta(hours=RANGE_HOUR)
_list.append([get_timestamp(_time), self.range_by_hour(_time).count()])
return _list
def check_existence(self, previous_hash):
return self.get_queryset().check_existence(previous_hash)
def create_rx(self, data, **kwargs):
rx = self.create_raw_rx(data, **kwargs)
return rx
def create_raw_rx(self, data, **kwargs):
# This calls the super method saving all clean data first
Payload = apps.get_model('blockchain', 'Payload')
_rx_before = kwargs.get('_rx_before', None)
rx = Payload(
data=data,
timestamp=data.get("timestamp", timezone.now()),
public_key=kwargs.get("pub_key", ""),
signature=kwargs.get("_signature", ""),
is_valid=kwargs.get("_is_valid_tx", False),
transaction=kwargs.get("transaction", None)
)
if "files" in data:
rx.files = data["files"]
if "location" in data:
rx.location = data["location"]
# Save previous hash
if _rx_before is None:
logger.info("[CREATE_RX] New transaction!")
rx.previous_hash = "0"
rx.readable = True
else:
logger.info("[CREATE_RX] New transaction transfer!")
rx.previous_hash = _rx_before.hash_id
if rx.is_valid:
logger.info("[CREATE_RX] Tx transfer is valid!")
rx.readable = True
_rx_before.transfer_ownership()
else:
logger.info("[CREATE_RX] Tx transfer not valid!")
rx.create_raw_msg()
rx.hash()
rx.save()
return rx
class AddressManager(models.Manager):
''' Add custom Manager '''
def get_queryset(self):
return AddressQueryset(self.model, using=self._db)
def check_existence(self, public_key_b64):
return self.get_queryset().check_existence(public_key_b64)
def get_rsa_address(self, public_key_b64):
return self.get_queryset().get_rsa_address(public_key_b64)
def create_rsa_address(self, public_key_b64):
''' Method to create new rsa address '''
_addresses_generator = AddressBitcoin()
_new_raw_address = _addresses_generator.create_address_bitcoin(public_key_b64)
rsa_address = self.create(
public_key_b64=public_key_b64,
address=_new_raw_address,
)
rsa_address.save()
return rsa_address.address
def get_or_create_rsa_address(self, public_key_b64):
''' 'Check existence of address for public key '''
if self.check_existence(public_key_b64):
''' Return correct address '''
return self.get_rsa_address(public_key_b64)
else:
''' Return a new address for the public key '''
return self.create_rsa_address(public_key_b64)
|
# This file is part of beets.
# Copyright 2016, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import time
from datetime import datetime
import unittest
from test.helper import TestHelper
from confuse import ConfigValueError
class TypesPluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('types')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_integer_modify_and_query(self):
self.config['types'] = {'myint': 'int'}
item = self.add_item(artist='aaa')
# Do not match unset values
out = self.list('myint:1..3')
self.assertEqual('', out)
self.modify('myint=2')
item.load()
self.assertEqual(item['myint'], 2)
# Match in range
out = self.list('myint:1..3')
self.assertIn('aaa', out)
def test_album_integer_modify_and_query(self):
self.config['types'] = {'myint': 'int'}
album = self.add_album(albumartist='aaa')
# Do not match unset values
out = self.list_album('myint:1..3')
self.assertEqual('', out)
self.modify('-a', 'myint=2')
album.load()
self.assertEqual(album['myint'], 2)
# Match in range
out = self.list_album('myint:1..3')
self.assertIn('aaa', out)
def test_float_modify_and_query(self):
self.config['types'] = {'myfloat': 'float'}
item = self.add_item(artist='aaa')
# Do not match unset values
out = self.list('myfloat:10..0')
self.assertEqual('', out)
self.modify('myfloat=-9.1')
item.load()
self.assertEqual(item['myfloat'], -9.1)
# Match in range
out = self.list('myfloat:-10..0')
self.assertIn('aaa', out)
def test_bool_modify_and_query(self):
self.config['types'] = {'mybool': 'bool'}
true = self.add_item(artist='true')
false = self.add_item(artist='false')
self.add_item(artist='unset')
# Do not match unset values
out = self.list('mybool:true, mybool:false')
self.assertEqual('', out)
# Set true
self.modify('mybool=1', 'artist:true')
true.load()
self.assertEqual(true['mybool'], True)
# Set false
self.modify('mybool=false', 'artist:false')
false.load()
self.assertEqual(false['mybool'], False)
# Query bools
out = self.list('mybool:true', '$artist $mybool')
self.assertEqual('true True', out)
out = self.list('mybool:false', '$artist $mybool')
# Dealing with unset fields?
# self.assertEqual('false False', out)
# out = self.list('mybool:', '$artist $mybool')
# self.assertIn('unset $mybool', out)
def test_date_modify_and_query(self):
self.config['types'] = {'mydate': 'date'}
# FIXME parsing should also work with default time format
self.config['time_format'] = '%Y-%m-%d'
old = self.add_item(artist='prince')
new = self.add_item(artist='britney')
# Do not match unset values
out = self.list('mydate:..2000')
self.assertEqual('', out)
self.modify('mydate=1999-01-01', 'artist:prince')
old.load()
self.assertEqual(old['mydate'], mktime(1999, 1, 1))
self.modify('mydate=1999-12-30', 'artist:britney')
new.load()
self.assertEqual(new['mydate'], mktime(1999, 12, 30))
# Match in range
out = self.list('mydate:..1999-07', '$artist $mydate')
self.assertEqual('prince 1999-01-01', out)
# FIXME some sort of timezone issue here
# out = self.list('mydate:1999-12-30', '$artist $mydate')
# self.assertEqual('britney 1999-12-30', out)
def test_unknown_type_error(self):
self.config['types'] = {'flex': 'unkown type'}
with self.assertRaises(ConfigValueError):
self.run_command('ls')
def test_template_if_def(self):
# Tests for a subtle bug when using %ifdef in templates along with
# types that have truthy default values (e.g. '0', '0.0', 'False')
# https://github.com/beetbox/beets/issues/3852
self.config['types'] = {'playcount': 'int', 'rating': 'float',
'starred': 'bool'}
with_fields = self.add_item(artist='prince')
self.modify('playcount=10', 'artist=prince')
self.modify('rating=5.0', 'artist=prince')
self.modify('starred=yes', 'artist=prince')
with_fields.load()
without_fields = self.add_item(artist='britney')
int_template = '%ifdef{playcount,Play count: $playcount,Not played}'
self.assertEqual(with_fields.evaluate_template(int_template),
'Play count: 10')
self.assertEqual(without_fields.evaluate_template(int_template),
'Not played')
float_template = '%ifdef{rating,Rating: $rating,Not rated}'
self.assertEqual(with_fields.evaluate_template(float_template),
'Rating: 5.0')
self.assertEqual(without_fields.evaluate_template(float_template),
'Not rated')
bool_template = '%ifdef{starred,Starred: $starred,Not starred}'
self.assertIn(with_fields.evaluate_template(bool_template).lower(),
('starred: true', 'starred: yes', 'starred: y'))
self.assertEqual(without_fields.evaluate_template(bool_template),
'Not starred')
def modify(self, *args):
return self.run_with_output('modify', '--yes', '--nowrite',
'--nomove', *args)
def list(self, query, fmt='$artist - $album - $title'):
return self.run_with_output('ls', '-f', fmt, query).strip()
def list_album(self, query, fmt='$albumartist - $album - $title'):
return self.run_with_output('ls', '-a', '-f', fmt, query).strip()
def mktime(*args):
return time.mktime(datetime(*args).timetuple())
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
# Generated by Django 2.0.1 on 2018-11-27 10:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0036_auto_20181124_1705'),
]
operations = [
migrations.AlterField(
model_name='aluno_aula',
name='aula_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='core.Aula', verbose_name='Aula'),
),
migrations.AlterField(
model_name='aluno_exercicio',
name='exercicio_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='core.Exercicio', verbose_name='Exercício'),
),
migrations.AlterField(
model_name='aula',
name='exercise_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Exercicio', verbose_name='Exercício'),
),
migrations.AlterField(
model_name='aula',
name='experimentation_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Experimentacao', verbose_name='Experimentacao'),
),
migrations.AlterField(
model_name='exercicio',
name='class_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Aula', verbose_name='Aula'),
),
migrations.AlterField(
model_name='exercicio',
name='experimentation_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Experimentacao', verbose_name='Experimentacao'),
),
migrations.AlterField(
model_name='experimentacao',
name='class_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Aula', verbose_name='Aula'),
),
migrations.AlterField(
model_name='experimentacao',
name='exercise_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Exercicio', verbose_name='Exercício'),
),
migrations.AlterField(
model_name='forum',
name='class_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Aula', verbose_name='Aula'),
),
migrations.AlterField(
model_name='forum',
name='exercise_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Exercicio', verbose_name='Exercício'),
),
migrations.AlterField(
model_name='forum',
name='experimentation_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Experimentacao', verbose_name='Experimentacao'),
),
migrations.AlterField(
model_name='material',
name='aula_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='materials', to='core.Aula', verbose_name='Aula'),
),
migrations.AlterField(
model_name='pergunta',
name='exercise_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Exercicio', verbose_name='Exercício'),
),
]
|
<filename>convert.py
#!/usr/bin/python3
# Convert data from Kartverket and Posten into GeoJson
import xml.etree.ElementTree as ET
import json
import pyproj
from shapely.geometry import Point, Polygon, LineString, mapping
from shapely.ops import linemerge, transform
import csv
# Log configuration
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s : %(message)s', level=logging.INFO)
# Postcode category helpers
postcode_categories = {}
def postcode_category(postcode):
global postcode_categories
if len(postcode_categories) == 0:
# need to initialize
with open('input/register.tsv') as tsv:
reader = csv.reader(tsv, delimiter="\t")
for row in reader:
postcode_categories[row[0]] = row[4]
if postcode in postcode_categories:
return postcode_categories[postcode]
else:
return "U" # unknown
# GML parsing helper functions
def ringToList(ring):
points = []
for poslist in ring.findall('.//gml:posList', ns):
numbers = iter([float(j) for j in poslist.text.split()])
points.extend(zip(numbers, numbers))
return points
# Start conversion
kartverketfile = 'input/Basisdata_0000_Norge_25833_Postnummeromrader_GML.gml'
# Handle namespaces
logging.info("reading namespaces")
ns = dict([node for _, node in ET.iterparse(kartverketfile, events=['start-ns'])])
logging.info(json.dumps(ns, indent=4, sort_keys=True))
# Parse file
tree = ET.parse(kartverketfile)
root = tree.getroot()
# Projection
proj = pyproj.Transformer.from_proj(
pyproj.CRS(25833), # source coordinate system
pyproj.CRS(4326), # destination coordinate system wsg
always_xy=True
).transform
# The one and only map
postalcodes = {}
for feature in root.findall('.//app:Postnummerområde', ns):
try:
municipality = feature.find('app:kommune', ns).text
postalcode = feature.find('.//app:postnummer', ns).text
city = feature.find('.//app:poststed', ns).text
date = feature.find('app:datauttaksdato', ns).text
exterior = ringToList(feature.find('.//gml:exterior', ns))
interiors = []
for interior in feature.findall('.//gml:interior', ns):
interiors.append(ringToList(interior))
pol = Polygon(exterior, interiors)
area = pol.area
valid = pol.is_valid
simple = pol.is_simple
c1 = pol.centroid
polwsg = transform(proj, pol)
c2 = polwsg.centroid
# add empty array if it doesn't exist
if postalcode not in postalcodes:
postalcodes[postalcode] = []
else:
if postalcodes[postalcode][-1]["municipality"] != municipality:
raise ValueError("municipality is off %s vs %s" % ( postalcodes[postalcode][-1]["municipality"],municipality ))
postalcodes[postalcode].append({
"polygon" : polwsg,
"area" : area,
"city" : city,
"municipality" : municipality,
"holes" : len(interiors)
})
except Exception as e:
logging.error("failed on %s" % postalcode)
raise(e)
for code in postalcodes:
data = postalcodes[code]
geoj = {
"type" : "FeatureCollection",
"metadata" : {
"municipality" : data[0]["municipality"],
"city" : data[0]["city"],
"num" : len(data),
"postcode_category" : postcode_category(code)
},
"attribution" : [
{
"of" : "Entire dataset, except field metadata.postcode_category",
"to" : "Kartverket ©",
"source" : "https://kartkatalog.geonorge.no/metadata/462a5297-33ef-438a-82a5-07fff5799be3",
"license" : "https://creativecommons.org/licenses/by/4.0/"
},
{
"of" : "The field metadata.postcode_category",
"to" : "Posten Norge AS",
"source" : "https://www.bring.no/tjenester/adressetjenester/postnummer",
"license" : "Unknown"
}
],
"features" : [{
'type': 'Feature',
'properties': {
"area" : area["area"],
"holes" : area["holes"]
},
'geometry': mapping(area["polygon"])
} for area in data
]
}
with open('output/%s.json' % (code), 'w') as outfile:
json.dump(geoj, outfile, sort_keys=True)
|
import json
import logging
from redes_neurais.mlp import Mlp
from redes_neurais.som import SOM
from PIL import Image
import numpy
import pprint
from termcolor import colored
colors = {
"0": "white",
"1": "red",
"2": "blue",
"3": "green"
}
class DataProcessor(object):
def __init__(self, path_to_train, path_to_test):
self.path_to_train = path_to_train
self.path_to_test = path_to_test
@classmethod
def openJson(cls, path_to_json):
try:
with open(path_to_json) as data_file:
return json.load(data_file)
except Exception as e:
print e
def run(self):
train_data = self.openJson(self.path_to_train)
test_data = self.openJson(self.path_to_test)
try:
data_format_train = train_data['format']
data_format_test = test_data['format']
if data_format_train != data_format_test:
raise Exception("formats don't match.")
else:
data_format = data_format_train
except Exception as e:
print e
return None
return self._dataFormatProcessor(data_format, train_data, test_data)
def _open_bmp(self, path_to_bmp):
img = Image.open(path_to_bmp)
return numpy.matrix(img).getA1().tolist()
def _dataFormatProcessor(self, data_format, train_data, test_data):
if data_format == "binary":
return (train_data, test_data)
elif data_format == "bmp":
for index, item in enumerate(train_data["data"]):
train_data["data"][index] = self._open_bmp(item)
train_data["input_size"] = len(train_data["data"][0])
for index, item in enumerate(test_data["data"]):
test_data["data"][index] = self._open_bmp(item)
return (train_data, test_data)
elif data_format == "values":
return (train_data, test_data)
else:
raise NotImplementedError("Format not implemented.")
def classify(class_values, results):
pp = pprint.PrettyPrinter(indent=4)
classes = dict()
pp.pprint(results)
for index, item in enumerate(class_values):
classes.update({"class_{}".format(index):{"format":item, "member":[]}})
for index, result in enumerate(results):
for key, _class in classes.items():
if numpy.allclose(numpy.array(result), numpy.array(_class["format"]),rtol=0.01, atol=0.01):
_class["member"].append(result.pop(index))
classes.update({"Unclassified":results})
pp.pprint(classes)
def run_mlp(path_to_config, path_to_train, path_to_test):
processor = DataProcessor(path_to_train, path_to_test)
config = DataProcessor.openJson(path_to_config)
values = processor.run()
if values:
train_data, test_data = values
mlp = Mlp(train_data["input_size"], config["hidden"], config["output"])
mlp.train(train_data["data"], train_data["targets"], config["max_iterations"], config["min_error"], config["n"], config["m"])
results, iterations = mlp.test(test_data["data"])
print "Iteration: {}".format(iterations)
classify(train_data["targets"], results)
def run_som(path_to_config, path_to_train, path_to_test):
processor = DataProcessor(path_to_train, path_to_test)
config = DataProcessor.openJson(path_to_config)
values = processor.run()
if values:
train_data, test_data = values
som = SOM(config['x'], config['y'], train_data['input_size'], config['sigma'], config['n'])
if config['test'] == "random":
test = train = test_data['data']
som.random_weights_init(train)
som.train_random(train, config["iterations"])
elif config['test'] == 'batch':
test = test_data['data']
train = train_data['data']
som.random_weights_init(train)
som.train_batch(train)
else:
print "Something Wrong"
exit(1)
_map = numpy.chararray((config['x'], config['y']))
_map[:] = "0"
target = [ x[-1] for x in test]
t = numpy.zeros(len(target),dtype=int)
for index, item in enumerate(target):
t[index] = train_data["target"].index(item)+1
for cnt,xx in enumerate(test):
w = som.winner(xx)
_map[w] = t[cnt]
for i in range(config['x']):
for j in range(config['y']):
print colored(u"\u25A0", colors[str(_map[i,j])]),
print
print
print "Color Index:"
for index, item in enumerate(train_data['target']):
print colored(item, colors[str(index+1)])
|
import sys
import os
import argparse
import util
import time
import datetime
import threading
import logging
from subprocess import call
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
from os.path import join, getsize
lock = threading.RLock()
gl_threadTotTime = 0
gl_errorNum = 0
#func define
def add_argument(parse):
parse.add_argument("--Input", "-I", help="The input directory include log files(such as LogA.log, LogB.log, etc...)")
parse.add_argument("--Output", "-O", help="The output directory inclues output zip(each input file corresponds to a directory(such as LogA/0.7z,1.7z,.. LogB/0.7z,1.7z,..)")
parse.add_argument("--Template", "-T", default="/apsarapangu/disk7/LogTemplate/", help="The template directory inclues needed templates(each input file corresponds to a directory(such as LogA/ LogB/ ..)")
parse.add_argument("--TemplateLevel", "-TL", default="0", choices=["0", "N"], help="The template level. 0 for nothing, N for number correlation.")
parse.add_argument("--MatchPolicy", "-P", default="L", choices=["L", "T"], help="The match policy after parsing")
parse.add_argument("--TimeDiff", "-D", default="D", choices=["D", "ND"], help="Open time diff policy")
parse.add_argument("--EncoderMode", "-E", default="Z", choices=["Z", "NE", "P"], help="Encoder type ")
parse.add_argument("--MaxThreadNum", "-TN", default="4", help="The max thread running num")
parse.add_argument("--ProcFilesNum", "-FN", default="0", help="The max block num a single thread can process, 0 for dynamic distrib.")
parse.add_argument("--BlockSize", "-B", default="100000", help="The size of lines in single block(100000 for each block)")
parse.add_argument("--Mode", "-m", default="Tot", choices=["Tot", "Seg"], help="The mode of compression(Tot for single large file, Seg for multiple blocks default for Tot)")
def check_args(args):
print("Now mode: {}, input file: {}".format(args.TemplateLevel + "_" + args.MatchPolicy + "_" + args.TimeDiff + "_" + args.EncoderMode, args.Input))
if (not os.path.exists(args.Input)):
print("No input path. Quit")
return 0
if (not os.path.exists(args.Template)):
print("No template path. Quit")
return 0
if (not os.path.exists(args.Output)):
print("No output path. Will make new directory at {}".format(args.Output))
else:
call("rm -rf " + args.Output,shell=True)
os.mkdir(args.Output)
return 1
def atomic_addTime(step):
lock.acquire()
global gl_threadTotTime
gl_threadTotTime += step
lock.release()
def atomic_addErrnum(step):
lock.acquire()
global gl_errorNum
gl_errorNum += step
lock.release()
def writeLog(fname, message, levelStr):
logging.basicConfig(filename=fname,
filemode='a',
format = '%(asctime)s - %(message)s')
logger = logging.getLogger(__name__)
if (levelStr =='WARNING'):
logger.warning(message)
elif (levelStr =='INFO'):
logger.info(message)
#return exec time (t2-t1)
def procFiles(typename, fileBeginNo, fileEndNo, now_input, now_output, now_temp, type_template):
t1 = time.time()
#parser
now_temp += threading.current_thread().name + "/"
if (not os.path.exists(now_temp)):
os.mkdir(now_temp)
order = "./THULR -I " + now_input + " -X " + str(fileBeginNo) + " -Y " + str(fileEndNo) + " -O " + now_temp + " -T " + util.path_pro(type_template) + " -E " + encoder_mode + " -D " + time_diff + " -F " + os.path.join(type_template,"head.format")
print(order + " " + threading.current_thread().name)
res = call(order,shell=True)
if (res != 0):
tempStr = "Error Occur at: {} thread: {}, fileNo: {} to {}".format(typename, threading.current_thread().name, fileBeginNo, fileEndNo)
print (tempStr)
writeLog(str(output_path) + "Log_{}".format(datetime.date.today()), tempStr,'WARNING')
atomic_addErrnum(1)
#compress
for i in range(fileBeginNo, fileEndNo + 1):
filename = "{}".format(i)
zip_path = str(now_output) + str(filename) + ".7z"
compression_order = "7za a " + zip_path + " " + now_temp + filename + "/*" + " -m0=LZMA"
call(compression_order, shell=True)
call("rm -rf " + now_temp + filename + "/*", shell=True)
t2 = time.time()
tempStr = "thread:{}, type:{}, fileNo: {} to {} , cost time: {}".format(threading.current_thread().name, typename, fileBeginNo, fileEndNo, t2 - t1)
print (tempStr)
writeLog(str(output_path) + "Log_{}".format(datetime.date.today()), tempStr,'WARNING')
return t2 - t1
def procFiles_result(future):
atomic_addTime(future.result())
# calculate the reduce rate of each type file
def getdirsize(dir):
size = 0
for root, dirs, files in os.walk(dir):
size += sum([getsize(join(root, name)) for name in files])
return size
# calculate the reduce rate of each type file
def calcuReduceRate(inputPath, outputPath, typename):
inFileSize = getdirsize(inputPath)
outFileSize = getdirsize(outputPath)
rate = inFileSize / outFileSize
inFileSize = inFileSize / 1024
outFileSize = outFileSize / 1024
tempStr = "Type:{}, In_OutSize: {} _ {} Kb, Rate: {} , InFilePath:{}, OutFilePath:{}".format(typename, float('%.3f' % inFileSize), float('%.3f' % outFileSize), float('%.3f' % rate), inputPath, outputPath)
print (tempStr)
writeLog(str(output_path) + "Log_{}".format(datetime.date.today()), tempStr,'WARNING')
def threadsToExecTasks(typename, files, now_input, now_output, now_temp, type_template):
fileListLen = len(files)
curFileNumBegin = 0
curFileNumEnd = 0
step = maxSingleThreadProcFilesNum
if (step == 0):# dynamic step
step = fileListLen // maxThreadNum
if(step == 0):
step = 1 # make sure the step is bigger than 0
threadPool = ThreadPoolExecutor(max_workers = maxThreadNum, thread_name_prefix="LR_")
while curFileNumBegin < fileListLen:
if (curFileNumBegin + step > fileListLen):
curFileNumEnd = fileListLen - 1
else:
curFileNumEnd = curFileNumBegin + step - 1
future = threadPool.submit(procFiles, typename, curFileNumBegin, curFileNumEnd, now_input, now_output, now_temp, type_template)
future.add_done_callback(procFiles_result)
curFileNumBegin = curFileNumEnd + 1
#wait(future, return_when=ALL_COMPLETED)
threadPool.shutdown(wait=True)
if __name__ == "__main__":
parse = argparse.ArgumentParser()
add_argument(parse)
args = parse.parse_args()
if (not check_args(args)):
exit(1)
#init params
input_path = args.Input
template_path = util.path_pro(args.Template)
output_path = util.path_pro(args.Output)
template_level = args.TemplateLevel
time_diff = args.TimeDiff
encoder_mode = args.EncoderMode
match_policy = args.MatchPolicy
mode = args.Mode
maxThreadNum = int(args.MaxThreadNum)
maxSingleThreadProcFilesNum = int(args.ProcFilesNum)
blockSize = int(args.BlockSize)
#threadPool = ThreadPoolExecutor(max_workers = maxThreadNum, thread_name_prefix="test_")
time1 = time.time()
filename = input_path.split("/")[-1]
print(filename)
path = input_path.split(filename)[0]
now_type = filename.split(".")[0]
if (mode == "Tot"):
seg_path = os.path.join(path, now_type + "_Segment/")
if os.path.exists(seg_path):
call("rm -rf "+seg_path,shell=True)
os.mkdir(seg_path)
f = open(input_path, encoding = "ISO-8859-1")
cou = 0
count = 0
buffer = []
while True:
line = f.readline()
if not line:
util.list_write(os.path.join(seg_path, str(cou) + ".col"), buffer, True)
break
buffer.append(line)
count += 1
if count == blockSize:
count = 0
util.list_write(os.path.join(seg_path, str(cou) + ".col"), buffer, True)
buffer = []
cou += 1
else:
seg_path = input_path
time_t1 = time.time()
all_files = os.listdir(seg_path)
type_template = template_path
temp_path = os.path.join(output_path,"tmp/")
if (not os.path.exists(temp_path)):
os.mkdir(temp_path)
now_temp = temp_path
if (not os.path.exists(now_temp)):
pass
else:
call("rm -rf " + now_temp, shell=True)
os.mkdir(now_temp)
now_input = seg_path
now_output = output_path
if (not os.path.exists(now_output)):
os.mkdir(now_output)
###ThreadPool to Proc Files
threadsToExecTasks(now_type, all_files, now_input, now_output, now_temp, type_template)
time_t2 = time.time()
tempStr = "{} finished, total time cost: {} , thread accum time: {}".format(now_output, time_t2 - time_t1, gl_threadTotTime)
print(tempStr)
writeLog(str(output_path) + "Log_{}".format(datetime.date.today()), tempStr,'WARNING')
gl_threadTotTime = 0 # reset
calcuReduceRate(now_input, now_output, input_path)
time2 = time.time()
tempStr = "{} Main finished, total time cost: {} , error num: {}".format(output_path, time2 - time1, gl_errorNum)
print(tempStr)
writeLog(str(output_path) + "Log_{}".format(datetime.date.today()), tempStr,'WARNING')
|
<filename>jobboard/views.py
from django.shortcuts import render, get_object_or_404, redirect
import jobboard
from .forms import CreateNewJobForm
from .models import Job
from datetime import date, timedelta
from student.models import Student
from recruiter.models import Recruiter
from job_application.models import Application
def get_content_if_user_is_student(request):
"""
If the user is a student then the jobs that will automatically
be displayed will be jobs that correspond to that student's degree
"""
user_as_student = Student.get_student(request.user.id)
jobs_by_student_major = Job.get_jobs_by_major(user_as_student.major)
content = {'jobs_to_display': jobs_by_student_major,
'jobs_intro_message': 'Here are some jobs that match your profile',
'user': request.user,
'user_is_student': True,
'user_is_recruiter': False,
'user_as_student': user_as_student,
'user_name': f"{request.user.first_name} {request.user.last_name}"}
return content
def get_content_if_user_is_not_student(request, user_is_recruiter):
"""
If the user is not a student then the jobs that will automatically
be displayed will be jobs that have been created in the past 6 months
"""
date_six_months_ago = date.today() - timedelta(days=180)
latest_jobs = Job.get_jobs_posted_on_or_after_specific_date(
date_six_months_ago)
content = {'jobs_to_display': latest_jobs,
'jobs_intro_message': 'Here are jobs created in the last 6 months',
'user': request.user,
'user_is_student': False,
'user_is_recruiter': user_is_recruiter}
if user_is_recruiter:
content['user_as_recruiter'] = Recruiter.get_recruiter(request.user.id)
content['user_name'] = f"{request.user.first_name} {request.user.last_name}"
return content
def searched_by_company_or_keyword(searched_name):
jobs_by_company = Job.get_jobs_by_company_name(searched_name)
jobs_by_keyword = Job.get_jobs_by_keyword(searched_name)
return jobs_by_company.union(jobs_by_keyword)
def searched_by_city(searched_city):
jobs_by_city = Job.get_jobs_by_city_name(searched_city)
return jobs_by_city
def board(request):
user_is_student = Student.is_student(request.user.id)
user_is_recruiter = Recruiter.is_recruiter(request.user.id)
if user_is_student:
content = get_content_if_user_is_student(request)
else:
content = get_content_if_user_is_not_student(request, user_is_recruiter)
# user hit the search button
if request.method == "POST":
searched_name = request.POST.get('searched_name')
searched_city = request.POST.get('searched_city')
content["searched_name"] = searched_name
content["searched_city"] = searched_city
if searched_name and not searched_city:
content['jobs_to_display'] = searched_by_company_or_keyword(searched_name)
content['jobs_intro_message'] = f"Here are jobs for '{searched_name}'"
elif searched_city and not searched_name:
content['jobs_to_display'] = searched_by_city(searched_city)
content['jobs_intro_message'] = f"Here are jobs located at '{searched_city}"
else:
content['jobs_to_display'] = searched_by_city(searched_city).\
intersection(searched_by_company_or_keyword(searched_name))
content['jobs_intro_message'] = f"Here are jobs for '{searched_name}' located at '{searched_city}"
add_navbar_links_to_context(request, content)
return render(request, 'jobboard/board.html', content)
def add_navbar_links_to_context(request, context):
if Student.is_student(request.user.id):
context['navbar_links'] = {"Account Settings": "/student/account_settings/", "Logout": "/logout",
f"Welcome {request.user.username}": "#"}
elif Recruiter.is_recruiter(request.user.id):
context['navbar_links'] = {"Account Settings": "/recruiter/account_settings/", "My Jobs": "/myjobs",
"Create Job": "/recruiter/create_new_job_form", "Logout": "/logout",
f"Welcome {request.user.username}": "#"}
elif request.user.is_authenticated:
context['navbar_links'] = {"Logout": "/logout"}
else:
context['navbar_links'] = {"Login": "/login"}
def create_new_job_form(request):
recruiter_object = get_object_or_404(Recruiter, user_id=request.user.id)
form = CreateNewJobForm()
context = {}
if request.method == 'POST':
form = CreateNewJobForm(request.POST)
if form.is_valid():
form.instance.company = recruiter_object.company
form.instance.recruiter = recruiter_object
form.instance.date_created = date.today()
form.save()
return redirect("/job_created_successfully/", context)
context = {'form': form}
jobboard.views.add_navbar_links_to_context(request, context)
return render(request, "create_new_job_form.html", context)
def job_created_successfully(request):
return render(request, 'job_created_successfully.html')
def job_detail_view(request, id):
context = dict()
try:
context["job_data"] = Job.objects.get(id=id)
except Job.DoesNotExist:
return render(request, "jobboard/no_such_job.html")
# setting a null default value if the user isn't recruiter or student
user_indicator_template = None
if Student.is_student(request.user.id):
if check_if_student_already_applied(Student.get_student(request.user), context["job_data"]):
user_indicator_template = "jobboard/student_user_applied_indicator.html"
else:
user_indicator_template = "jobboard/student_user_not_applied_indicator.html"
elif Recruiter.is_recruiter(request.user.id):
if context["job_data"].recruiter.user.id == request.user.id:
user_indicator_template = "jobboard/recruiter_user_owns_job_indicator.html"
context["user_indicator_template"] = user_indicator_template
add_navbar_links_to_context(request, context)
return render(request, "jobboard/job.html", context)
def check_if_student_already_applied(student, job):
applications_for_current_job = set(Application.get_applications_by_job(job))
applications_for_current_student = set(Application.get_applications_by_student(student))
return len(applications_for_current_job.intersection(applications_for_current_student)) > 0
|
<filename>grillen.py
#encoding=utf-8
from flask import Flask, render_template, request
from sqlalchemy import update
from forms import WurstOrderForm, DeleteOrderForm, IndexForm
import config
import os
#TODO: Nachträgliche Änderungen der getätigten Bestellungen
app = Flask(__name__)
app.config['SECRET_KEY'] = config.SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_DATABASE_URI
# import models AFTER app is initiatlized
from models import db, DB_Bestellungen, DB_Events
def initEmptyDatabases():
db.create_all()
@app.route('/', methods=['GET', "POST"])
def index():
form=IndexForm(request.form)
if request.method == "POST":
if not os.path.exists(config.EVENTS_FILE):
initEmptyDatabases()
#create event
#create new Database or une database
new_event = DB_Events(name=form.name.data, date=form.date.data, offer=form.offer.data)
db.session.add(new_event)
db.session.commit()
#TODO: Datenbank umbenennen, config anpassen, Datenbank testen
return render_template('index.html', created=True, form=form)
return render_template('index.html', form=form)
@app.route('/grillen', methods=['GET', 'POST'])
def wurstOrder():
form=WurstOrderForm(request.form)
print('Valid input: ' + str(form.validate()))
if request.method == 'POST':
if not os.path.exists(config.BESTELLUNGEN_FILE):
initEmptyDatabases()
new_order = DB_Bestellungen(name=form.name.data, bratwurst=form.bratwurst.data, schinkengriller=form.schinkengriller.data, broetchen=form.broetchen.data*(int(form.bratwurst.data)+int(form.schinkengriller.data)), selbstversorger=form.selbstversorger.data)
if DB_Bestellungen.query.filter(DB_Bestellungen.name == form.name.data).one_or_none():
db.session.query(DB_Bestellungen).filter(DB_Bestellungen.name == form.name.data).update({DB_Bestellungen.bratwurst: form.bratwurst.data, DB_Bestellungen.broetchen: form.broetchen.data*(int(form.bratwurst.data)+int(form.schinkengriller.data)), DB_Bestellungen.schinkengriller: form.schinkengriller.data, DB_Bestellungen.selbstversorger: form.selbstversorger.data})
else:
db.session.add(new_order)
db.session.commit()
return render_template('order.html', bestellt=True, form=form)
return render_template('order.html', form=form)
@app.route('/summary', methods=['GET'])
def summary():
if os.path.exists(config.BESTELLUNGEN_FILE):
#namen = db.session.execute("SELECT name FROM bestellungen")
#bestellungen = db.session.execute("SELECT bratwurst FROM bestellungen")
#output = ""
db_req = db.session.execute("SELECT * FROM bestellungen")
keys = db_req.keys()
entries = db_req.fetchall()
print(keys)
print(entries)
#for x in namen.fetchall():
# name += "%s" % (x)
#for y in bestellungen.fetchall():
# bestellung += "%s" % (y)
# output += "<strong>%s</strong>: %s " % (request.keys()[y], x[y])
# output += "<br>"
#output += "<br>Teilnehmeranzahl: %s<br><br>" % x[0]
#for key in request.keys()[2:]:
# output += "%s: %s<br>" % (key, db.session.execute("SELECT SUM(%s) FROM bestellungen" % key).fetchall()[0][0]) #execute funktionert; sum rechnet alle zuammen, [0][0] "entfernt" die liest und tuple
#TODO: Richtiger Brötchenzähler
#TODO: Schöner machen
return render_template('summary.html', keys=keys, entries=entries)
elif not os.path.exists(config.BESTELLUNGEN_FILE):
return "No orders!"
#return str(output)
@app.route('/delete', methods=['GET', 'POST'])
def deleteOrderForm():
form=DeleteOrderForm(request.form)
if request.method == 'POST':
print(form.delete_secret.data)
print(form.confirm_delete.data)
if form.delete_secret.data == "Mettwoch" and form.confirm_delete.data:
return deleteOrders()
return "Hau ab!"
return render_template('delete_order.html', form=form)
def deleteOrders():
if os.path.exists(config.BESTELLUNGEN_FILE):
os.remove(config.BESTELLUNGEN_FILE)
return("Bestellungen erfolgreich gelöscht.")
return("Keine Bestellungen zum Löschen.")
|
#! /usr/bin/env python3
"""
Entry point for Postkutsche.
"""
import os
import sys
import zlib
import shutil
import asyncio
import logging
import subprocess
from os import listdir
from os.path import isfile, isdir, join, exists
import onlinebrief24
from guy import Guy
from guy import http
from jinja2 import Environment, FileSystemLoader, select_autoescape
from pk_toml_config import PkTomlConfig
import models
if sys.platform.startswith("linux"):
import notify2
CONFIG = PkTomlConfig().load_config()
ENV = Environment(
loader=FileSystemLoader("templates"), autoescape=select_autoescape(["html", "xml"]),
)
class Main(Guy):
def __init__(self):
"""\
Initializes state for client.
"""
if sys.platform.startswith("linux"):
notify2.init("Postkutsche")
elif sys.platform.startswith("darwin"):
# TODO: Add the macos thing
pass
elif sys.platform.startswith("win"):
# TODO: Add the windows thing
pass
super().__init__()
def render(self, path):
"""\
Renders template for main page.
"""
template = ENV.get_template("main.html")
return template.render(pdf_files=[])
def open_upload_folder(self):
upload_folder = CONFIG["paths"].get("upload_folder", None)
if isdir(upload_folder):
if sys.platform.startswith("darwin"):
subprocess.call(["open", upload_folder])
elif sys.platform.startswith("win"):
subprocess.call(["explorer", upload_folder])
else:
subprocess.call(["xdg-open", upload_folder])
def send_files(self):
"""\
Send PDF from upload_folder.
"""
upload_folder = CONFIG["paths"].get("upload_folder", None)
archive_folder = CONFIG["paths"].get("archive_folder", None)
if not archive_folder:
archive_folder = join(upload_folder, "archive")
os.makedirs(archive_folder, exist_ok=True)
username = CONFIG["onlinebrief24"].get("username", None)
password = CONFIG["onlinebrief24"].get("password", None)
if not username or not password:
send_system_notification(
"Logindaten werden benötigt",
"Bitte unter 'Einstellungen' Benutzername und Passwort eintragen!",
urgency="high"
)
return
pdf_files = get_upload_files(upload_folder)
if len(pdf_files) <= 0:
send_system_notification(
"Bitte Briefe hinzufügen",
"Dazu können PDF in das Upload-Verzeichnis gespeichert werden.",
urgency="high"
)
return
# sending files with onlinebrief24.de
with onlinebrief24.Client(username, password) as c:
for pdf in pdf_files:
logging.debug("sending: {}".format(pdf.filename))
c.upload(join(upload_folder, pdf.filename),
duplex=pdf.duplex,
color=pdf.color,
envelope=pdf.envelope,
distribution=pdf.distribution,
registered=pdf.registered,
payment_slip=pdf.payment_slip)
shutil.move(
join(upload_folder, pdf.filename), join(archive_folder, pdf.filename)
)
models.Archive.create(
adler32=pdf.adler32,
filename=pdf.filename,
color=pdf.color,
duplex=pdf.duplex,
envelope=pdf.envelope,
distribution=pdf.distribution,
registered=pdf.registered,
payment_slip=pdf.payment_slip,
)
# Delete entry after it was moved to the archive
pdf.delete().execute()
send_system_notification(
"Briefe hochgeladen",
"{} Briefe wurden zu onlinebrief24 hochgeladen und werden jetzt verarbeitet.".format(
len(pdf_files))
)
loop = asyncio.get_event_loop()
loop.run_until_complete(self.emit("reload_pdf_files"))
def openPdfSettings(self, pdf_hash, filename):
"""\
Opens PDF settings in modal overlay.
"""
return PdfSettings(pdf_hash=pdf_hash, filename=filename)
class Archive(Guy):
def render(self, path):
"""\
Renders template for archive page.
"""
template = ENV.get_template("archive.html")
return template.render(pdf_files=[])
def open_archive_file(self, filename):
"""\
Opens an archived file via xdg-open (linux), explorer (win) or open (macos).
"""
archive_folder = CONFIG["paths"].get("archive_folder", None)
archive_file = join(archive_folder, filename)
if exists(archive_file):
if sys.platform.startswith("darwin"):
subprocess.call(["open", archive_file])
elif sys.platform.startswith("win"):
subprocess.call(["explorer", archive_file])
else:
subprocess.call(["xdg-open", archive_file])
else:
logging.critical("'%s' seems to be not existing!", archive_file)
class Settings(Guy):
def render(self, path):
"""\
Renders template for settings page.
"""
template = ENV.get_template("settings.html")
context = {
"username": CONFIG["onlinebrief24"].get("username", ""),
"password": CONFIG["<PASSWORD>"].get("password", ""),
"upload_folder": CONFIG["paths"].get("upload_folder", ""),
"archive_folder": CONFIG["paths"].get("archive_folder", ""),
}
return template.render(**context)
class PdfSettings(Guy):
def __init__(self, *args, **kwargs):
self.pdf_hash = kwargs["pdf_hash"]
self.pdf_filename = kwargs["filename"]
super().__init__(*args)
def render(self, path, includeGuyJs=False):
template = ENV.get_template("_pdf_settings.html")
pdf = (
models.PdfFile.select()
.where(
models.PdfFile.adler32 == self.pdf_hash,
models.PdfFile.filename == self.pdf_filename,
)
.limit(1)
.execute()[0]
)
logging.debug(
"PDF file::: filename: {}, color: {}, duplex: {}, envelope: {}, distri: {}, registered: {}, payment_slip: {}".format(
pdf.filename,
pdf.color,
pdf.duplex,
pdf.envelope,
pdf.distribution,
pdf.registered,
pdf.payment_slip,
)
)
return template.render(pdf=pdf)
def send_system_notification(subject, text, urgency=None, ttl=None):
"""\
Sends notification with the OS native framework.
"""
if sys.platform.startswith("linux"):
notification = notify2.Notification(
subject,
text,
"notification-message-im",
)
if urgency:
notification.set_urgency(notify2.URGENCY_CRITICAL)
notification.show()
def get_upload_files(upload_folder):
"""\
Fetches PDF file list from upload folder.
"""
pdf_files = [
(f, get_file_hash(join(upload_folder, f)))
for f in listdir(upload_folder)
if isfile(join(upload_folder, f)) and f.endswith(".pdf")
]
db_files = []
for pdf in pdf_files:
db_file, created = models.PdfFile.get_or_create(adler32=pdf[1], filename=pdf[0])
db_files.append(db_file)
return db_files
def get_archive_files():
"""\
Fetches archived files from database.
"""
return models.Archive.select().order_by(models.Archive.created_at.desc())
def get_file_hash(file_with_path):
"""\
Generates adler32 hash for given file in upload_folder.
"""
with open(file_with_path, "rb") as f:
return zlib.adler32(b"".join(f.readlines()))
@http("/pdf_files")
def get_pdf_files(web):
upload_folder = CONFIG["paths"].get("upload_folder")
pdf_files = get_upload_files(upload_folder)
template = ENV.get_template("_pdf_table.html")
web.write(template.render(pdf_files=pdf_files))
@http("/file-upload")
def file_upload(web):
"""PDF upload aka adding PDF via drag and drop."""
pdf_file = web.request.files.get("file")[0]
if pdf_file.get("content_type") != "application/pdf":
web.write("No PDF")
else:
upload_folder = CONFIG["paths"].get("upload_folder")
filename = pdf_file.get("filename")
content = pdf_file.get("body")
with open(os.path.join(upload_folder, filename), "wb") as new_file:
new_file.write(content)
web.write("success")
@http("/archive")
def get_archive_pdf_files(web):
pdf_files = get_archive_files()
template = ENV.get_template("_archive_table.html")
web.write(template.render(pdf_files=pdf_files))
@http("/settings_save")
def save_settings(web):
form = web.request.body_arguments
# TODO: Validate arguments
# TOML is not able to handle byte-strings, so we have to decode them
CONFIG["paths"]["upload_folder"] = form.get("upload_folder")[0].decode()
CONFIG["paths"]["archive_folder"] = form.get("archive_folder")[0].decode()
CONFIG["onlinebrief24"]["username"] = form.get("username")[0].decode()
CONFIG["onlinebrief24"]["password"] = form.get("password")[0].decode()
PkTomlConfig().write_config(CONFIG)
web.redirect("/Settings?success")
@http("/pdf_settings_save")
def save_pdf_settings(web):
form = web.request.body_arguments
pdf = (
models.PdfFile.select()
.where(
models.PdfFile.adler32 == form["adler32"],
models.PdfFile.filename == form["filename"],
)
.limit(1)
.execute()[0]
)
# color
if form.get("color", None)[0].decode() == "true":
pdf.color = True
else:
pdf.color = False
# duplex
if form.get("duplex", None)[0].decode() == "true":
pdf.duplex = True
else:
pdf.duplex = False
# envelope
envelope = form.get("envelope", None)[0].decode()
if envelope:
pdf.envelope = envelope
# distribution
distribution = form.get("distribution", None)[0].decode()
if distribution:
pdf.distribution = distribution
# registered
registered = form.get("registered", None)[0].decode()
if registered == "None":
pdf.registered = None
else:
pdf.registered = registered
# payment_slip
payment_slip = form.get("payment_slip", None)[0].decode()
if payment_slip == "None":
pdf.payment_slip = None
else:
pdf.payment_slip = payment_slip
pdf.save()
logging.debug(
"pdf setting::: color: {}, duplex: {}, envelope: {}, distri: {}, registered: {}, payment_slip: {}".format(
pdf.color,
pdf.duplex,
pdf.envelope,
pdf.distribution,
pdf.registered,
pdf.payment_slip,
)
)
web.redirect("/Main")
if __name__ == "__main__":
app = Main()
app.run()
|
<gh_stars>10-100
from surgeo.models.base_model import BaseModel
from bias_detector.common import *
import pandas as pd
import surgeo
import pathlib
class FullNameZipcodeModel(BaseModel):
def __init__(self):
super().__init__()
self._package_root = pathlib.Path(surgeo.__file__).parents[0]
self._PROB_RACE_GIVEN_SURNAME = self._get_prob_race_given_surname()
self._PROB_ZCTA_GIVEN_RACE = self._get_prob_zcta_given_race()
def get_probabilities(self, first_names, last_names, zip_codes):
first_names_probs = self._get_first_names_probs(first_names)
last_names_probs = self._get_last_names_probs(last_names)
zip_codes_probs = self._get_zip_codes_probs(zip_codes)
names_zip_codes_probs = self._combined_probs(first_names_probs, last_names_probs, zip_codes_probs)
result = self._adjust_frame(
first_names_probs,
last_names_probs,
zip_codes_probs,
names_zip_codes_probs
)
return result
#See: https://www.tandfonline.com/doi/full/10.1080/2330443X.2018.1427012
def _combined_probs(self,
first_names_probs: pd.DataFrame,
last_name_probs: pd.DataFrame,
zip_code_probs: pd.DataFrame) -> pd.DataFrame:
names_zip_codes_numer = last_name_probs.iloc[:, 1:] * first_names_probs.iloc[:, 1:] * zip_code_probs.iloc[:, 1:]
names_zip_codes_denom = names_zip_codes_numer.sum(axis=1)
names_zip_codes_probs = names_zip_codes_numer.div(names_zip_codes_denom, axis=0)
return names_zip_codes_probs
def _adjust_frame(self,
first_names_probs: pd.DataFrame,
last_names_probs: pd.DataFrame,
zip_codes_probs: pd.DataFrame,
names_zip_codes_probs: pd.DataFrame) -> pd.DataFrame:
names_zip_codes_data = pd.concat([
first_names_probs['first_name'].to_frame(),
last_names_probs['last_name'].to_frame(),
zip_codes_probs['zip_code'].to_frame(),
names_zip_codes_probs
], axis=1)
return names_zip_codes_data
def _get_last_names_probs(self,
last_names: pd.Series) -> pd.DataFrame:
last_names_probs = last_names.to_frame().merge(
self._PROB_RACE_GIVEN_SURNAME,
left_on='last_name',
right_index=True,
how='left',
)
return last_names_probs
def _get_first_names_probs(self, first_names: pd.Series) -> pd.DataFrame:
first_names_probs = first_names.to_frame().merge(
p_first_name_given_race_df,
left_on='first_name',
right_index=True,
how='left',
)
return first_names_probs
def _get_zip_codes_probs(self, zip_codes: pd.Series) -> pd.DataFrame:
zip_codes_probs = zip_codes.to_frame().merge(
self._PROB_ZCTA_GIVEN_RACE,
left_on='zip_code',
right_index=True,
how='left',
)
return zip_codes_probs
|
"""
Класс дома
"""
import pygame
from random import random
from match import Match
from paper import Paper
class House(object):
"""Описывает дом"""
def __init__(self, forest, physical_x: float, physical_y: float):
"""
Параметры
forest - объект леса
physical_x - Физическая координата x дома в [м]
physical_y - Физическая координата y дома в [м]
"""
# Графика
self.color: tuple = (41, 171, 255) # Цвет дома
self.graphical_height: int = 60 # Графическая высота дома в [px]
self.graphical_width: int = 75 # Графическая ширина дома в [px]
# Изображение дома в формате bmp
self.image_house = pygame.image.load('Sprites/snow_house.bmp')
self.image_house_light = pygame.image.load('Sprites/house_light.bmp')
# Физика
self.action_radius: float = 1 # Радиус в [м], внутри которого герой может взаимодействовать
self.matches_amount: int = 0 # Количество спичек
self.match_generation_chance: float = 0.5 # Шанс нахождения спички в доме
self.paper_amount: int = 0 # Количество листов бумаги в доме
self.paper_generation_chance: float = 0.5 # Шанс нахождения бумаги в доме
self.physical_x: float = physical_x
self.physical_y: float = physical_y
self.safe_radius: float = 2 # Радиус вокруг дома в [м], в пределах которого не генерируются деревья
self.temperature: float = 28 # Разница температуры внктри дома и снаружи в [К]
# Изображение дома в формате bmp
self.image_house = pygame.image.load('Sprites/snow_house.bmp')
# Объекты
self.forest = forest
self.match = Match(self) # Объект спички
self.paper = Paper(self) # Объект бумаги
# Звуки
self.sound_inventory = pygame.mixer.Sound('Soundtrack/inventory.wav')
# --- Инициализация ---
@staticmethod
def generation_needed(generation_chance: float):
"""
Необходима ли дальнейшая генерация
generation_chance - шанс генерации
"""
generation_number: float = random()
if generation_number < generation_chance:
return True
else:
return False
def generate_matches(self):
"""
В доме можно найти спички
"""
while self.generation_needed(self.match_generation_chance):
self.matches_amount += 1 # Создать спичку
def generate_paper(self):
"""
В доме можно найти бумагу
"""
while self.generation_needed(self.paper_generation_chance):
self.paper_amount += 1 # Создать бумагу
def setup(self):
"""
Инициализация дома
"""
self.generate_matches()
self.generate_paper()
# --- Логика ---
def collect_matches(self):
"""
Герой забирает спички
"""
if self.matches_amount >= 1:
self.sound_inventory.play()
self.sound_inventory.set_volume(0.3)
self.forest.game.hero.inventory.matches_amount += self.matches_amount # Герой забирает все спички
self.matches_amount: int = 0 # Спичек не осталось
def collect_paper(self):
"""
Герой забирает бумагу
"""
if self.matches_amount >= 1:
self.sound_inventory.play()
self.sound_inventory.set_volume(0.3)
self.forest.game.hero.inventory.paper_amount += self.paper_amount # Герой забирает всю бумагу
self.paper_amount: int = 0 # Бумаги не осталось
# --- Графика ---
def draw(self, graphical_x: int, graphical_y: int):
"""
Рисует дом
graphical_x - Графическая координата x дома в [px]
graphical_y - Графическая координата y дома в [px]
"""
hero_x = self.forest.game.hero.x
hero_y = self.forest.game.hero.y
self.forest.game.graphic_engine.draw_image_center(self.image_house, graphical_x, graphical_y,
self.graphical_width, self.graphical_height)
if (hero_x - self.physical_x) ** 2 + (hero_y - self.physical_y) ** 2 <= 2:
self.forest.game.graphic_engine.draw_image_center(self.image_house_light, graphical_x, graphical_y,
self.graphical_width, self.graphical_height)
else:
self.forest.game.graphic_engine.draw_image_center(self.image_house, graphical_x, graphical_y,
self.graphical_width, self.graphical_height)
# --- Обработка ---
def manage_graphics(self, graphical_x: int, graphical_y: int):
"""
Обрабатывает графические события дома
graphical_x - Графическая координата x дома в [px]
graphical_y - Графическая координата y дома в [px]
"""
self.draw(graphical_x, graphical_y)
def manage_logic(self):
"""
Обрабатывает логические события дома
"""
self.collect_matches()
self.collect_paper()
|
'''
Compendium of generic DNS utilities
'''
# Import salt libs
import salt.utils
# Import python libs
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform
'''
return 'dnsutil'
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
Example::
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
zone = ''
with salt.utils.fopen(zonefile, 'r') as fp_:
for line in fp_:
zone += line
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line.strip():
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$','')] = comps[1]
continue
if '(' in line and not ')' in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(','').replace(')','')
else:
continue
if 'ORIGIN' in zonedict.keys():
comps = line.replace('@',zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.','@',1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.'):
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
if not 'NS' in zonedict.keys():
zonedict['NS'] = []
zonedict['NS'].append(comps[3])
elif comps[2] == 'MX':
if not 'MX' in zonedict.keys():
zonedict['MX'] = []
zonedict['MX'].append({'priority': comps[3],
'host': comps[4]})
else:
if not comps[2] in zonedict.keys():
zonedict[comps[2]] = {}
zonedict[comps[2]][comps[0]] = comps[3]
return zonedict
def _to_seconds(time):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
if 'H' in time.upper():
time = int(time.upper().replace('H','')) * 3600
elif 'D' in time.upper():
time = int(time.upper().replace('D','')) * 86400
elif 'W' in time.upper():
time = 604800
else:
try:
time = int(time)
except:
time = 604800
if time < 604800:
time = 604800
return time
|
"""
@author <NAME>
@file enum.py
@note The singleton example is taken from:
http://www.python.org/dev/peps/pep-0318/#examples
@note I don't use TAB's and indentation is 4.
@note epydoc wrongly interprets the class as a function.
Probably because of the decorator (without it works).
"""
__docformat__ = "javadoc en"
import inspect
import unittest
def singleton(theClass):
""" decorator for a class to make a singleton out of it """
classInstances = {}
def getInstance():
""" creating or just return the one and only class instance """
if theClass not in classInstances:
classInstances[theClass] = theClass()
return classInstances[theClass]
return getInstance
@singleton
class Enum(object):
""" class for providing enum functionality. An enum in C++ usually looks
like this: enum { A, B, C }; and the results are 0, 1 and 2 for A, B and C.
We provide similar functionality means auto incrementing of values for constants
added to an enum...
>>> TOP = enum("direction") # or enum(Direction) when Direction is a class
>>> LEFT = enum("direction") # or enum(Direction) when Direction is a class
>>> RIGHT = enum("direction") # or enum(Direction) when Direction is a class
>>> BOTTOM = enum("direction") # or enum(Direction) when Direction is a class
>>> assert TOP < LEFT < RIGHT < BOTTOM
<ul>
<li>You still can assign an individual value.
<li>You can place constants inside the class (if you like) -> Direction.TOP
<li>Same to C++: you have to pay attention where new constants are added. When
you insert it inbetween then you will 'move' the other values.
</ul>
"""
def __init__(self):
""" registered enums """
self.contexts = {}
def getNextId(self, context):
""" providing next id >= 0 on each call per context
@param context is a string
@return is an integer value being unique for given context
"""
if not context in self.contexts:
self.contexts[context] = -1
self.contexts[context] += 1
return self.contexts[context]
def enum(context):
""" wrapper for calling the singleton. Documentation is placed
at the class Enum.
@param context can be a string or a class
@return is an integer value being unique for given context
"""
if inspect.isclass(context):
return Enum().getNextId(context.__name__)
return Enum().getNextId(context)
class EnumTestCase(unittest.TestCase):
""" testing of class Enum """
def testSingleton(self):
""" testing the singleton mechanism """
instanceA = Enum()
instanceB = Enum()
self.assertEqual(instanceA, instanceB)
def testGetNextId(self):
""" example of creating two constants """
instance = Enum()
HORIZONTAL = instance.getNextId("orientation")
VERTICAL = instance.getNextId("orientation")
self.assertTrue(HORIZONTAL < VERTICAL)
def testEnumFunctionWithStringContext(self):
""" example of creating four constants with string as context """
class Direction:
TOP = enum("direction")
LEFT = enum("direction")
RIGHT = enum("direction")
BOTTOM = enum("direction")
self.assertTrue(Direction.TOP < Direction.LEFT < Direction.RIGHT < Direction.BOTTOM)
def testEnumFunctionWithClassContext(self):
""" example of creating four constants with a class as context
@note I have tried to move the enum code to the class but this
seems not to work """
class Vector:
def __init__(self):
self.vector = [0, 0]
X = enum(Vector)
Y = enum(Vector)
self.assertTrue(X < Y)
self.assertEqual(X, 0)
self.assertEqual(Y, 1)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(EnumTestCase)
unittest.TextTestRunner(verbosity=3).run(suite)
|
import argparse
import collections
import logging
import os
import matplotlib.pyplot as plt
from lib.ctoolswrapper import CToolsWrapper
import gammalib
logging.basicConfig(format='%(asctime)s %(levelname)s:\n%(message)s', level=logging.WARNING)
# PYTHONPATH=.. python test_events_generation.py --model ../crab_simulations/crab.xml --save --dir pippo --tmax 100
def read_spectrum_fits(fits):
table = fits.table(1)
c_energy = table['Energy']
c_ed = table['ed_Energy']
c_eu = table['eu_Energy']
c_flux = table['Flux']
c_eflux = table['e_Flux']
c_ts = table['TS']
c_upper = table['UpperLimit']
# Initialise arrays to be filled
energies = []
flux = []
ed_engs = []
eu_engs = []
e_flux = []
ul_energies = []
ul_ed_engs = []
ul_eu_engs = []
ul_flux = []
# Loop over rows of the file
for i in range(table.nrows()):
# Get Test Statistic, flux and flux error
ts = c_ts.real(i)
flx = c_flux.real(i)
e_flx = c_eflux.real(i)
# If Test Statistic is larger than 9 and flux error is smaller than flux then append flux plots ...
got = None
if ts > 9.0 and e_flx < flx:
got = True
energies.append(c_energy.real(i))
flux.append(c_flux.real(i))
ed_engs.append(c_ed.real(i))
eu_engs.append(c_eu.real(i))
e_flux.append(c_eflux.real(i))
# ... otherwise append upper limit
else:
got = False
ul_energies.append(c_energy.real(i))
ul_flux.append(c_upper.real(i))
ul_ed_engs.append(c_ed.real(i))
ul_eu_engs.append(c_eu.real(i))
# logging.warning(str(got), ts, flx, e_flx, sep='\t')
return { 'energies': energies,
'flux': flux,
'ed_engs': ed_engs,
'eu_engs': eu_engs,
'e_flux': e_flux,
'ul_energies': ul_energies,
'ul_ed_engs': ul_ed_engs,
'ul_eu_engs': ul_eu_engs,
'ul_flux': ul_flux, }
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Create spectrum from a lot of csspec fits files and plot them")
parser.add_argument("input_files", help="The csspec fits file", nargs='+')
# parser.add_argument("--save", help="save the outputs", default=False, action="store_true")
args = parser.parse_args()
data = []
for fn in args.input_files:
spectrum_fits = gammalib.GFits(fn)
# logging.warning(spectrum_fits)
data.append(read_spectrum_fits(spectrum_fits))
# Set upper limit errors
# Plot the spectra
plt.figure(figsize=(8,5))
plt.loglog()
plt.grid()
plt.errorbar(data[0]['energies'], data[0]['flux'], yerr=data[0]['e_flux'], xerr=[data[0]['ed_engs'], data[0]['eu_engs']], fmt='ro', label="crab")
plt.errorbar(data[0]['ul_energies'], data[0]['ul_flux'], xerr=[data[0]['ul_ed_engs'], data[0]['ul_eu_engs']], yerr=[0.6 * x for x in data[0]['ul_flux']], uplims=True, fmt='ro')
plt.errorbar(data[1]['energies'], data[1]['flux'], yerr=data[1]['e_flux'], xerr=[data[1]['ed_engs'], data[1]['eu_engs']], fmt='bo', label="grb afterflow")
plt.errorbar(data[1]['ul_energies'], data[1]['ul_flux'], xerr=[data[1]['ul_ed_engs'], data[1]['ul_eu_engs']], yerr=[0.6 * x for x in data[1]['ul_flux']], uplims=True, fmt='bo')
plt.xlabel('Energy (TeV)')
plt.ylabel(r'E$^2$ $\times$ dN/dE (erg cm$^{-2}$ s$^{-1}$)')
plt.legend()
# plt.title('{} spectrum'.format(args.name))
plt.show()
exit(0)
# tw = CToolsWrapper({ 'name': args.name,
# 'ra': 0,
# 'dec': 0,
# 'energy_min': 0.03,
# 'energy_max': 150.0,
# #'seed': args.seed,
# }, verbosity=args.verbose)
#
# working_dir = os.path.join(args.dir)
# try:
# os.makedirs(working_dir)
# except FileExistsError as e:
# logging.warning("The data dir {} already exists".format(working_dir))
#
# output_filename = os.path.join(working_dir, 'test_spectrum.fits')
# log_filename = os.path.join(working_dir, 'test_csspec.log')
#
# spec = tw.csspec_run( input_obs_list = args.input_file,
# input_models = args.model,
# output_file = output_filename,
# log_file = log_filename,
# force = args.force,
# save = args.save )
#
# fits = spec.spectrum()
# from show_spectrum cscript
|
from django.shortcuts import render, get_object_or_404, redirect
from django.template import Context, RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.conf import settings
from signbank.video.models import Video, GlossVideo, GlossVideoHistory
from signbank.video.forms import VideoUploadForm, VideoUploadForGlossForm
# from django.contrib.auth.models import User
# from datetime import datetime as DT
import os
import re
import glob
import shutil
from signbank.dictionary.models import Gloss, DeletedGlossOrMedia
from signbank.settings.base import GLOSS_VIDEO_DIRECTORY, WRITABLE_FOLDER
from signbank.settings.server_specific import FFMPEG_PROGRAM
from signbank.tools import generate_still_image, get_default_annotationidglosstranslation
def addvideo(request):
"""View to present a video upload form and process
the upload"""
if request.method == 'POST':
form = VideoUploadForGlossForm(request.POST, request.FILES)
if form.is_valid():
gloss_id = form.cleaned_data['gloss_id']
gloss = get_object_or_404(Gloss, pk=gloss_id)
# deal with any existing video for this sign
goal_folder = WRITABLE_FOLDER+GLOSS_VIDEO_DIRECTORY + '/' + gloss.idgloss[:2] + '/'
goal_filename = gloss.idgloss + '-' + str(gloss.pk) + '.mp4'
goal_location = goal_folder + goal_filename
vfile = form.cleaned_data['videofile']
vfile.name = gloss.idgloss + "-" + str(gloss.pk) + ".mp4"
redirect_url = form.cleaned_data['redirect']
old_vid = GlossVideo.objects.filter(gloss_id=gloss_id)
old_vid.first().delete()
# make a new GlossVideo object for the new file
video = GlossVideo(videofile=vfile, gloss=gloss)
video.save()
# Issue #162: log the upload history
log_entry = GlossVideoHistory(action="upload", gloss=gloss, actor=request.user,
uploadfile=vfile, goal_location=goal_location)
log_entry.save()
# TODO: provide some feedback that it worked (if
# immediate display of video isn't working)
return redirect(redirect_url)
# if we can't process the form, just redirect back to the
# referring page, should just be the case of hitting
# Upload without choosing a file but could be
# a malicious request, if no referrer, go back to root
if 'HTTP_REFERER' in request.META:
url = request.META['HTTP_REFERER']
else:
url = '/'
return redirect(url)
@login_required
def deletevideo(request, videoid):
"""Remove the video for this gloss, if there is an older version
then reinstate that as the current video (act like undo)"""
if request.method == "POST":
# deal with any existing video for this sign
gloss = get_object_or_404(Gloss, pk=videoid)
# Issue #162: log the deletion history
log_entry = GlossVideoHistory(action="delete", gloss=gloss, actor=request.user)
log_entry.save()
#Extra check: if the file is still there, delete it manually
if os.path.isfile(WRITABLE_FOLDER + gloss.get_video_path()):
os.remove(WRITABLE_FOLDER + gloss.get_video_path())
default_annotationidglosstranslation = get_default_annotationidglosstranslation(gloss)
deleted_video = DeletedGlossOrMedia()
deleted_video.item_type = 'video'
deleted_video.idgloss = gloss.idgloss
deleted_video.annotation_idgloss = default_annotationidglosstranslation
deleted_video.old_pk = gloss.pk
deleted_video.filename = gloss.get_video_path()
deleted_video.save()
# return to referer
if 'HTTP_REFERER' in request.META:
url = request.META['HTTP_REFERER']
else:
url = '/'
return redirect(url)
def poster(request, videoid):
"""Generate a still frame for a video (if needed) and
generate a redirect to the static server for this frame"""
video = get_object_or_404(GlossVideo, gloss_id=videoid)
return redirect(video.poster_url())
def video(request, videoid):
"""Redirect to the video url for this videoid"""
video = get_object_or_404(GlossVideo, gloss_id=videoid)
return redirect(video)
def iframe(request, videoid):
"""Generate an iframe with a player for this video"""
try:
gloss = Gloss.objects.get(pk=videoid)
glossvideo = gloss.get_video()
videourl = glossvideo.get_absolute_url()
posterurl = glossvideo.poster_url()
except:
gloss = None
glossvideo = None
videourl = None
posterurl = None
return render(request,"iframe.html",
{'videourl': videourl,
'posterurl': posterurl,
'aspectRatio': settings.VIDEO_ASPECT_RATIO})
def create_still_images(request):
processed_videos = []
for gloss in Gloss.objects.all():
video_path = WRITABLE_FOLDER + gloss.get_video_path()
if os.path.isfile(video_path.encode('UTF-8')):
idgloss_prefix = gloss.idgloss[:2]
(folder, basename) = os.path.split(video_path)
generate_still_image(idgloss_prefix, folder + os.sep, basename)
processed_videos.append(video_path)
return HttpResponse('Processed videos: <br/>' + "<br/>".join(processed_videos))
|
<gh_stars>1-10
#
#
# Copyright (c) 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from amqplib import client_0_8 as amqp
from wsgiref.handlers import SimpleHandler
import mimetools
import os
import cStringIO
import inspect
class TrapezeWSGIHandler(SimpleHandler):
wsgi_run_once = False
def __init__(self, stdin, stdout, stderr):
base_environ = dict(os.environ.items())
base_environ['SERVER_PROTOCOL'] = 'HTTP/1.0'
SimpleHandler.__init__(self, stdin, stdout, stderr, base_environ,
multithread=False, multiprocess=True)
def update_environ(self, environ):
self.base_env.update(environ)
class TrapezeWSGI:
"""
Handle HTTP requests that have been encapsulated in an AMQP message
by passing them via WSGI to a Python application.
"""
DEFAULT_QUEUE_NAME = 'app'
CONSUMER_TAG = 'consumer'
def __init__(self, application, routing_key,
conn_settings=('localhost:5672', 'guest', 'guest',
'/', False),
exchange='trapeze', wsgi_handler=TrapezeWSGIHandler):
"""Initialize the AMQP connection, channel, and receiving queue."""
self.output_buffer = cStringIO.StringIO()
self.input_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.amqp_connection = amqp.Connection(host=conn_settings[0],
userid=conn_settings[1],
password=conn_settings[2],
virtual_host=conn_settings[3],
insist=conn_settings[4])
self.amqp_channel = self.amqp_connection.channel()
self.amqp_channel.queue_declare(queue=TrapezeWSGI.DEFAULT_QUEUE_NAME,
durable=False, exclusive=False,
auto_delete=False)
self.amqp_channel.queue_bind(queue=TrapezeWSGI.DEFAULT_QUEUE_NAME,
exchange=exchange,
routing_key=routing_key)
self.application = application
self.handler = TrapezeWSGIHandler(self.input_buffer,
self.output_buffer,
self.error_buffer)
self.amqp_channel.basic_consume(queue=TrapezeWSGI.DEFAULT_QUEUE_NAME,
callback=self._deliver_callback,
consumer_tag=TrapezeWSGI.CONSUMER_TAG,
no_ack=True)
def serve_forever(self):
"""Handle one request at a time until
an unhandled exception is raised
"""
try:
while True:
self.handle_request(False)
finally:
self._cleanup()
def _extract_env(self, request_headers):
"""Extract necessary information from the HTTP request headers
and store it in the WSGI environment dictionary.
"""
stream = cStringIO.StringIO(request_headers)
# this isn't a reliable method of doing this,
# but since we only plan on supporting one client...
[command, full_path, version] = stream.readline() \
.split("\n", 1)[0].split()
path_components = full_path.split('?', 1)
path = path_components[0]
if len(path_components) == 2:
query = path_components[1]
else:
query = ''
headers = mimetools.Message(stream)
forwarded_host = headers.get('x-forwarded-host', '')
if forwarded_host != '':
host_parts = forwarded_host.split(':')
else:
host_parts = headers.get('host', '').split(':')
# TODO this doesn't take HTTPS into account.
# How could we tell if this request came to us via HTTPS
# at this point?
if len(host_parts) == 2:
[host, port] = host_parts
else:
host = host_parts[0]
port = 80
env = {}
env['REQUEST_METHOD'] = command
env['SERVER_NAME'] = host
env['SERVER_PORT'] = port
env['REMOTE_HOST'] = None
env['CONTENT_LENGTH'] = headers.get('Content-Length', 0)
env['SCRIPT_NAME'] = ''
env['PATH_INFO'] = path
env['QUERY_STRING'] = query
if headers.typeheader is None:
env['CONTENT_TYPE'] = headers.type
else:
env['CONTENT_TYPE'] = headers.typeheader
length = headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
env['HTTP_COOKIE'] = headers.getheader('cookie', '')
return env
def _deliver_callback(self, message):
[headers, body] = message.body.split('\r\n\r\n')
self.input_buffer.write(body)
self.input_buffer.seek(0)
# use self.handler.update_environ() to set environ vars
env = self._extract_env(headers)
self.handler.update_environ(env)
self.handler.run(self.application)
response = amqp.Message(self.output_buffer.getvalue(),
correlation_id=message.message_id)
# don't ack until after wsgi app returns response and we are just about
# to send that back to the queue.
self.amqp_channel.basic_ack(message.delivery_tag)
self.amqp_channel.basic_publish(response, routing_key=message.reply_to)
self.input_buffer.truncate(0)
self.output_buffer.truncate(0)
# TODO logging the contents of error buffer?
self.error_buffer.truncate(0)
def handle_request(self, cleanup=True):
"""Wait for a callback to handle a single request, and
close all resources afterwards if cleanup == True.
"""
try:
self.amqp_channel.wait()
finally:
if cleanup:
self._cleanup()
def _cleanup(self):
"""Close all buffers, AMQP channels, and the AMQP connection."""
self.amqp_channel.basic_cancel(TrapezeWSGI.CONSUMER_TAG)
self.input_buffer.close()
self.output_buffer.close()
self.error_buffer.close()
self.amqp_channel.close()
self.amqp_connection.close()
def main():
import sys
(prog, args) = (sys.argv[0], sys.argv[1:])
usage = """Usage: %s <ROUTING_KEY> <APPLICATION_MODULE_PATH>
The routing key to bind our queue to the exchange (e.g. "*.localhost.*./.#").
Module path to WSGI application (e.g. django.core.handlers.wsgi.WSGIHandler).
""" % (prog,)
if len(args) != 2:
print usage
sys.exit(1)
routing_key = args[0]
application_mod_path = args[1]
(application_mod_name, application_class_name) = \
tuple(application_mod_path.rsplit('.', 1))
application_module = __import__(application_mod_name,
globals(), locals(),
[application_class_name])
application_object = getattr(application_module, application_class_name)
if inspect.isclass(application_object):
application = application_object()
else:
application = application_object
trapezewsgi_server = TrapezeWSGI(application, routing_key)
trapezewsgi_server.serve_forever()
if __name__ == '__main__':
main()
|
###############################################################################################
# Varna SVG color script
#
# Author: <NAME>
# <EMAIL>
# Steve modified to plot T1 cleavage data
# Steve modified to plot additional custom nuc colors and lines between nucs
# Tony modified it to simplify class structure (reduce redundnacy) and fix bugs
#
#
# Affiliation: Weeks Lab, UNC Chemistry
#
# Date: 11.07.12+
# Version: 0.92+
#
# released under GPL 2.0
###############################################################################################
import math,sys
import numpy as np
import RNAtools
###############################################################################################
# End of ssDiagram class
# Combines both varna and xrna classes into one, since they replicate the same functionality
###############################################################################################
# known bugs:
# - 23S rRNA structure fails to load. There are two RNAs present in this fileIn
# and that causes an issue
class ssDiagram:
def __init__(self, fIN, filetype = None):
"""
Init the secondary structure diagram object
Will infer object type from file extension, or can specified manually using filetype
"""
if filetype is None:
ext = fIN.split('.')[-1].upper()
if ext == 'VARNA':
filetype = 'VARNA'
elif ext == 'XRNA':
filetype = 'XRNA'
else:
sys.exit("Extension %s of file %s is not recognized" % (ext, fIN))
self.filetype = filetype
self.name = fIN
if filetype == 'VARNA':
self.num, self.pairMap, self.baseMap,self.center,self.shape,self.ct,self.scale,self.period= self.readVarna(fIN)
else:
self.num, self.pairMap, self.baseMap,self.shape,self.ct,self.scale, self.period = self.parseXRNA(fIN)
self.center = self.pairMap
self.numMap = self.baseMap
self.circleColors = False
self.colorIDs = []
self.extraLines = False
self.extraLineData = []
self.threshA = 0.025
self.threshB = 0.055
self.diff = False
self.colorFunction = self.getSHAPEcolor
def __str__(self):
a = '{ Name= %s, Length = %s}' % (self.name, str(len(self.num)))
def setdiff(self):
self.diff = True
self.colorFunction = self.getDiffSHAPEcolor
def readVarna(self,x):
import xml.etree.ElementTree as ET
tree = ET.parse(x)
root = tree.getroot()
#initialize some arrays
offset = 15/2.
num,bases,x_pos,y_pos,shape,x_cen,y_cen = [],[],[],[],[],[],[]
#read the varna xml file, nucleotides are under bases
for nt in root.findall('./RNA/bases/nt'):
num.append(int(nt.get('num')))
shape.append(float(nt.get('val')))
base = nt.find('base').text
for i in nt:
if i.get('r')=='pos':
x_pos.append(float(i.get('x')))
y_pos.append(float(i.get('y')))
bases.append(base)
if i.get('r')=='center':
x_cen.append(float(i.get('x')))
y_cen.append(float(i.get('y')))
#determine offset
x_pos,y_pos = np.array(x_pos),np.array(y_pos)
vec = ((x_pos[0]-x_pos[1])**2+(y_pos[0]-y_pos[1])**2)**0.5
offset = vec/4
#transpose coords to positive space
xoff = abs(min(x_pos))+offset
yoff = abs(min(y_pos))+offset
x_pos = x_pos + xoff
y_pos = y_pos + yoff
x_cen += xoff
y_cen += yoff
center = list(zip(x_cen,y_cen))
#make expected arrays
coord = list(zip(x_pos,y_pos))
basemap = list(zip(bases,zip(x_pos,y_pos+offset)))
#read varna xml file for pairing information
ct = np.zeros(len(num))
for pair in root.findall('./RNA/BPs/bp'):
p5,p3 = int(pair.get('part5')),int(pair.get('part3'))
#print p5+1,p3+1
ct[p5] = p3+1
ct[p3] = p5+1
ct = list(map(int,ct))
#get the number period
period = int(root.find('config').get('numperiod'))
return num,coord,basemap,center,shape,ct,offset,period
def parseXRNA(self,x):
import xml.etree.ElementTree as ET
tree = ET.parse(x)
root = tree.getroot()
nucList = root.findall('./Complex/RNAMolecule/')
nucLists = []
for i in nucList:
#print i.tag
if i.tag == 'NucListData':nucLists.append(i)
#print nucLists
startNT = int(nucLists[0].get('StartNucID'))
#print startNT
num = []
bases,x_pos,y_pos,x_cen,y_cen = [],[],[],[],[]
for nt in nucLists[0].text.split('\n'):
if nt == '':continue
line = nt.split()
num.append(startNT)
startNT+=1
bases.append(line[0]),x_pos.append(float(line[1])),y_pos.append(float(line[2]))
#print x_pos
#determine offset
x_pos,y_pos = np.array(x_pos),-1*np.array(y_pos)
vec = ((x_pos[0]-x_pos[1])**2+(y_pos[0]-y_pos[1])**2)**0.5
offset = vec/1.5
#transpose coords to positive space
xoff = abs(min(x_pos))+offset
yoff = abs(min(y_pos))+offset
x_pos = x_pos + xoff
y_pos = y_pos + yoff
y_pos=2.3*y_pos
x_pos=2.3*x_pos
x_cen += xoff
y_cen += yoff
center = list(zip(x_cen,y_cen))
#make expected arrays
coord = list(zip(x_pos,y_pos))
basemap = list(zip(bases,list(zip(x_pos,y_pos+offset))))
#print basemap
shape = np.zeros(len(num))
clist = {'bbbbbb':-999,'999999':-999,'ff0000':1.0, '0':0.0, '000000':0.0, '1c75bc':-0.45,'00ff00':0.45, 'ff9900':0.45, 'f57e1f':0.45}
for shapeLine in root.findall('./Complex/RNAMolecule/Nuc'):
nucRange = shapeLine.get('RefIDs')
preColor = shapeLine.get('Color')
if not preColor:continue
try:nucColor = clist[shapeLine.get('Color')]
except:nucColor = 0.0
if not nucRange:continue
for i in nucRange.split(','):
if len(i.split('-'))==1:
try:shape[int(i)-1]=nucColor
except:pass
else:
line = i.split('-')
line = list(map(int,line))
for j in range(line[0],line[1]+1):
try:shape[j-1]=nucColor
except:pass
shape = list(map(float,shape))
period = 20
#get pairing informationo
ct = np.zeros(len(num))
for pair in root.findall('./Complex/RNAMolecule/BasePairs'):
pairStart,pairLength,pairEnd = pair.get('nucID'),pair.get('length'),pair.get('bpNucID')
for nt in range(int(pairLength)):
p5 = int(pairStart)+nt
p3 = int(pairEnd)-nt
try:
ct[p5-1] = p3
ct[p3-1] = p5
except:pass
ct = list(map(int,ct))
return num,coord,basemap,shape,ct,offset,period
def readSHAPE(self,z):
self.shape = RNAtools.readSHAPE(z)
def parseCircleColorLine(self, line):
"""
first column is nuc number, second and third columns are color specifications
available color formats are as follows:
0/1/2 : the values will be assigned to 'white', 'red, nofill', 'red, fill' respectively
float [0/1] : the values will be converted to colors based on SHAPE or differential SHAPE color scheme
optional 0/1 indicates whether to fill (defaults to 0 -- nofill)
color [color] : Colorname must be SVG color. Optional second color specifies the fill (defaults to white)
r,g,b [r,g,b] : Input r,g,b values. Optional second color specifies the fill (defaults to white)
"""
defaultmap = ['white', 'red', 'red']
color = ['white', 'white']
spl = line.split()
try:
cindex = int(spl[1])
color = [defaultmap[cindex], defaultmap[cindex-1]]
except ValueError:
try:
color[0] = float(spl[1])
if len(spl) == 3 and spl[2]=='1':
color[1] = color[0]
except ValueError:
for i in range(1,len(spl)):
if ',' in spl[i]:
color[i-1] = 'rgb('+spl[i]+')'
else:
color[i-1] = spl[i]
except:
sys.exit("Improperly formatted circle color :: %s" % line)
return tuple(color)
def readCircleColors(self, z, ):
self.circleColors = True
with open(z, 'rU') as f:
for line in f:
# parse out any headers
if line[0] == '#':
continue
self.colorIDs.append(self.parseCircleColorLine(line))
def readExtraLines(self,z):
self.extraLines = True
splitZ = z.split(',')
filename=splitZ[0]
if len(splitZ)>1:
self.threshA = float(splitZ[1])
self.threshB = float(splitZ[2])
extra = []
with open(filename, 'rU') as f:
for line in f:
spl = line.split()
try:
extra.append((int(spl[0]), int(spl[1]), float(spl[2])))
except (ValueError, IndexError):
pass
self.extraLineData = extra
def getSHAPEcolor(self, x, suppress=False):
"""
return shape color if x is expected float, 'black' if x is None, and the
same input string otherwise
if suppress is True, will suppress black colors by setting them to white
"""
if isinstance(x, str):
try:
x = float(x)
except ValueError:
return x
elif x is None:
return 'rgb(1,0,0)'
if x < -4: col = '160,160,160' # grey
elif x > 0.85: col = '255,0,0' # red
elif 0.85 >= x >0.4: col ='255,164,26' # orange
elif not suppress and 0.4 >= x > -4: col = '1,0,0' # black
else: col = '255,255,255'
return 'rgb(%s)' % col
def getDiffSHAPEcolor(self, x, suppress=False):
"""
return diff shape color if x is expected float, 'white' if x is None, and the
and return the same input string otherwise
"""
if isinstance(x, str):
try:
x = float(x)
except ValueError:
return x
elif x is None:
return 'rgb(1,0,0)'
if x < -500: col = '160,160,160'
elif x <= -0.2: col = '41,171,226'
elif x >= 0.2: col = '0,210,0'
elif not suppress: col = '1,0,0'
else: col = '255,255,255'
return 'rgb(%s)' % col
###############################################################################################
# Start Enzyme class
###############################################################################################
class Enzyme:
def __init__(self, filePath):
self.arrowSizes = []
self.nums = []
if filePath != "":
self.nums, self.arrowSizes = self.parseEnzymeFile(filePath)
def parseEnzymeFile(self,filePath):
fileIn = open(filePath, "rU")
fileIn.readline() # skip header line
nums = []
arrowSizes = []
for line in fileIn:
splitLine = line.strip().split()
nums.append(int(splitLine[0]))
arrowSizes.append(int(splitLine[1]))
fileIn.close()
return nums, arrowSizes
def rotatePoint(self, coords, theta):
x = coords[0]
y = coords[1]
xRot = x*math.cos(-theta)+ y*math.sin(-theta)
yRot = -x*math.sin(-theta) + y*math.cos(-theta)
#print "x,y= %f, %f\ttheta= %f\t xrot,yrot= %f, %f"%(x,y,theta,xRot,yRot)
return [xRot,yRot]
def translatePoint(self, coords, offset):
return [coords[i] + offset[i] for i in range(len(coords))]
def midpoint(self, c1, c2):
return [c1[0]+(c2[0]-c1[0])/2, c1[1]+(c2[1]-c1[1])/2]
def calcArrowCoords(self, origin, h, w, theta):
# define initial wedge V
left = [-w/2,h]
right = [w/2,h]
# rotate initial wedge about 0,0
leftRot = self.rotatePoint(left,theta)
rightRot = self.rotatePoint(right,theta)
# translate to given origin
left = self.translatePoint(leftRot, origin)
right = self.translatePoint(rightRot, origin)
# return three coords
return origin, left, right
def arrowToString(self, pt1, pt2, pt3):
svgString = '<polygon fill="rgb(80,100,255)" stroke="green" stroke-width="0" points="%f,%f %f,%f %f,%f" />'%(pt1[0],pt1[1],pt2[0],pt2[1],pt3[0],pt3[1])
return svgString
def drawArrows(self, varna):
heights = [0,20,40,80]
width = 10
arrowString = ""
for i in range(len(self.nums)):
num = self.nums[i]
arrowSize = self.arrowSizes[i]
loc1 = [varna.baseMap[num-1][1][0],4.0+varna.baseMap[num-1][1][1]]
loc2 = [varna.baseMap[num][1][0],4.0+varna.baseMap[num][1][1]]
loc = self.midpoint(loc1, loc2) # put arrow 3prime of cleaved nuc
#print loc
# 0=no arrow, 1=lo, 2=mid, 3=high
if arrowSize != 0:
height = heights[arrowSize]
#pt = findFarPoint(num-1,varna)
#xDiff = pt[0] - loc[0]
#yDiff = pt[1] - loc[1]
#theta = math.atan2(yDiff, xDiff)
# assuming clockwise nuc order, find normal angle
diff = [loc2[n]-loc1[n] for n in [0,1]]
theta = math.atan2(diff[1], diff[0]) + math.pi
coords = self.calcArrowCoords(loc,height,width,theta)
arrowString += self.arrowToString(coords[0],coords[1],coords[2])
return arrowString
###############################################################################################
# General functions below
###############################################################################################
def evalStructures(rna1,rna2):
# Returns shared basepairs, those only in rna1, those only in rna2
# n number of accepted pairs x2, p number of predicted pairs x2
n,p = 0,0
shared,acceptedOnly,predictedOnly = [],[],[]
#print(rna1.ct)
rna1.ct = list(rna1.ct)
rna2.ct = list(rna2.ct)
for i in range(len(rna1.ct)):
#clean out duplicates and nonbasepairs
if rna1.ct[i] == 0 and rna2.ct[i] ==0:continue
#count for Sens,PPV
if rna1.ct[i] != 0: n+=1
if rna2.ct[i] != 0: p+=1
#shared bps
if rna1.ct[i] == rna2.ct[i]:
if rna1.ct[i] < rna1.num[i]:continue
if rna2.ct[i] < rna2.num[i]:continue
shared.append((rna1.num[i],rna1.ct[i]))
continue
if rna1.ct[i] != 0 and rna1.ct[i] < rna1.num[i]:
acceptedOnly.append((rna1.num[i],rna1.ct[i]))
if rna2.ct[i] != 0 and rna2.ct[i] < rna2.num[i]:
predictedOnly.append((rna2.num[i],rna2.ct[i]))
return acceptedOnly,predictedOnly,shared,len(shared)/(float(n)/2),len(shared)/(float(p)/2)
def offPoint(p,q,r):
p,q = np.array(list(p)),np.array(list(q))
v_u = (q-p)/(np.sum((q-p)**2)**0.5)
return v_u*r+p
def newLines(pointSet,locMap,r,struct=False):
a = []
for i,j in pointSet:
p1 = offPoint(locMap[i-1],locMap[j-1],r)
p2 = offPoint(locMap[j-1],locMap[i-1],r)
#check noncanonical
#if struct:
#canonical
#print struct.baseMap[i-1][0],struct.baseMap[j-1][0]
a.append((p1,p2))
return a
def drawBases(varna, fontsize = 24):
bases = varna.baseMap
shape = varna.shape
line = ''
max_x, max_y = 0.0, 0.0
for i in range(len(bases)):
color = varna.colorFunction(shape[i])
#if varna.diff and color != 'black':
# line += '<text x="%s" y="%s" text-anchor="middle" font-family="Sans-Serif" font-weight="bold" font-size="%d" stroke="%s" fill="%s" >%s</text>'\
# % (bases[i][1][0], bases[i][1][1], fontsize+2, color, color, bases[i][0])
#else:
if max_x < bases[i][1][0]:
max_x = float(bases[i][1][0])
if max_y < bases[i][1][1]:
max_y = float(bases[i][1][1])
line += '<text x="%s" y="%s" text-anchor="middle" font-family="Sans-Serif" font-weight="bold" font-size="%d" fill="%s" >%s</text>' \
% (bases[i][1][0], bases[i][1][1]+4.0, fontsize, color, bases[i][0])
max_x += 60
max_y += 60
return line, max_x, max_y
def findFarPoint(pt,varna):
#finds a good direction to draw the number label
# goes through the distance pairs, finds all nts within ~ 63pts and finds the center of mass
x,y = [],[]
for i,j in varna.pairMap:
x.append(i),y.append(j)
x,y = np.array(x),np.array(y)
point = np.array((x[pt],y[pt]))
#print point
dist = np.sum((point-np.transpose((x,y)))**2,axis=1)
#print len(dist[dist<5000]),'#'
#print (x,y)
cutoff=4000
length = len(x[dist<cutoff])
centerMass = np.sum(x[dist<cutoff])/length, np.sum(y[dist<cutoff])/length
#print str(np.array(centerMass))
return np.array(centerMass)
def findCWNormalPoint(pt,varna):
x,y = [],[]
for i,j in varna.pairMap:
x.append(i),y.append(j)
x,y = np.array(x),np.array(y)
point = np.array((x[pt],y[pt]))
#print "point: "+str(point)
try:
pointAfter = np.array((x[pt+1],y[pt+1]))
except IndexError:
pointAfter = np.array((x[pt]+1,y[pt]))
# assuming clockwise nuc order, find normal angle
diff = [pointAfter[n]-point[n] for n in [0,1]]
#print "diff: "+str(diff)
theta = math.atan2(diff[1], diff[0]) + math.pi/2
#print "theta: "+str(theta)
distance = 20
newX = point[0]+math.cos(theta)*distance
newY = point[1]+math.sin(theta)*distance
newPoint = np.array((newX,newY))
#print "newPoint: "+str(newPoint)+"\n"
return newPoint
def drawNums(varna,offset):
period = varna.period
#draw over i
nums = []
lines = []
for i in [1]+list(range(0,len(varna.num),period))[1:]+[len(varna.num)]:
key = i-1
if varna.filetype == 'XRNA':
center = findFarPoint(key,varna)
#center = findCWNormalPoint(key,varna)
else:
center = np.array(varna.center[key])
a = np.array(varna.pairMap[key])
base = np.array(varna.pairMap[key])
#print "base: "+str(base)
#print "center: "+str(center)
norm = np.sum((base-center)**2)**0.5
u_vec = (base-center)/norm*varna.scale*7 + base
nums.append((str(i),list(map(float,u_vec))))
p1 = offPoint(map(float,u_vec),map(float,base),varna.scale*2)
p2 = offPoint(map(float,base),map(float,u_vec),varna.scale*2)
lines.append((p1,p2))
#add lines connecting base and letters
line = processConnect(lines,(3,0,0),lineMap=True)
#add numbering
for i in list(range(len(nums))):
line += '<text x="%s" y="%s" text-anchor="middle" font-family="Sans-Serif" font-weight="bold" font-size="18" fill="rgb(0,1,0)" >%s</text>' \
% (nums[i][1][0],nums[i][1][1]+varna.scale,str(int(nums[i][0])+offset))
return line
def drawOutline(varna,strokewidth=3):
outlineString = '<polyline points= "'
for nuc in varna.baseMap:
pt = nuc[1]
outlineString += '%f,%f ' % (pt[0],pt[1]-4)
outlineString += '" stroke="rgb(200,200,200)" stroke-width="{0}.0" fill="none"/>'.format(strokewidth)
return outlineString
def drawCircles(varna):
outlineString = ''
for i in range(len(varna.baseMap)):
if i < len(varna.colorIDs):
pt = varna.baseMap[i][1]
col = [varna.colorFunction(x, True) for x in varna.colorIDs[i]]
outlineString += '<circle cx="%f" cy="%f" r="14" stroke="%s" stroke-width="2" fill="%s"/>'% (pt[0], pt[1]-4, col[0], col[1])
return outlineString
def processConnect(pairlist,color,dashed=False,lineMap=False,strokewidth=4.0):
def rbgform(x):
return ','.join(map(str, x))
out = ''
for i,j in pairlist:
line = '<line x1="%s" y1="%s" x2="%s" y2="%s" ' % (i[0],i[1],j[0],j[1])
if lineMap == True:
line += 'stroke="rgb(%s)" stroke-width="0.9" opacity="0.95" />' % rbgform(color)
out+=line
continue
if dashed==False:line += 'stroke="rgb(%s)" stroke-width="%.1f" opacity="0.95" />' % (rbgform(color),strokewidth)
if dashed==True:line += 'stroke="rgb(%s)" stroke-width="1.2" opacity="0.85" />' % rbgform(color)
out+=line
return out
def drawExtraLines(varna):
out = ""
if varna.extraLines == False:
return out
bins = [-1.0, -varna.threshB, -varna.threshA, 0, varna.threshA, varna.threshB, 1.0]
colorList = [(44,123,182), (44,123,182), (171,217,233), (255,255,255),
(253,174,97), (215,25,28), (215,25,28)]
gradient = [False, True, False, False, True, False]
for fields in varna.extraLineData:
fromNuc = fields[0]-1
fromNucCoord = varna.baseMap[fromNuc][1]
toNuc = fields[1]-1
toNucCoord = varna.baseMap[toNuc][1]
corrCoeff = fields[2]
# filter out low correlations
if abs(corrCoeff) < varna.threshA or abs(fromNuc-toNuc) < 6:
continue
for i in range(len(bins)-1):
if bins[i]<corrCoeff<bins[i+1]:
if gradient[i]:
col = colorGrad(corrCoeff, colorList[i], colorList[i+1], bins[i], bins[i+1])
else:
col = colorList[i]
line = processConnect([( ([fromNucCoord[0],fromNucCoord[1]]),
([toNucCoord[0],toNucCoord[1]]) )],
col,strokewidth=5)
out+=line
break
#print out
return out
def colorGrad(value, colorMin, colorMax, minValue, maxValue):
"""
returns a middle rgb color value based on the distance between max and min
"""
c_range = abs(maxValue - minValue)
v = value - min(maxValue,minValue)
v_pct = v/c_range
#v_pct *= v_pct
#print value, v_pct, colorMin, colorMax
i= v_pct*(colorMax[0] - colorMin[0]) + colorMin[0]
j= v_pct*(colorMax[1] - colorMin[1]) + colorMin[1]
k= v_pct*(colorMax[2] - colorMin[2]) + colorMin[2]
alphaThresh = 0.25
if v_pct > alphaThresh: alpha = 1.0
else:
alpha = v_pct / alphaThresh
if value > maxValue: return colorMax #, alpha
elif value < minValue: return colorMin #, alpha
else: return map(int,(i,j,k)) #, alpha
def parseArgs():
import argparse
prs = argparse.ArgumentParser(description='Colors and optionally compares a VARNA or XRNA file with a reference ct and .SHAPE file')
prs.add_argument('input',action='store',type=str,help='input file')
prs.add_argument('output',action='store',type=str,help='output .svg file')
prs.add_argument('--ct',action='store',type=str,help='compare structure with a reference ct file')
#prs.add_argument('-x','--xrna',action='store_true',default=False,help='changes input file type to XRNA')
prs.add_argument('-e','--enzyme',action='store',type=str,help='draw enzymatic cleavage data from file')
prs.add_argument('-d','--diff',action='store_true',default=False,help='changes chemical probing type to differential, coloring cutoffs +=0.3')
prs.add_argument('-c','--colors',action='store',type=str,help='color behind nucs with custom colors')
prs.add_argument('-l','--lines',action='store',type=str,help='draw additional lines between certain nucs')
prs.add_argument('-s','--shape',action='store',type=str,help='overide stored chemical probing values from varna')
prs.add_argument('--offset', action='store',type=int,default=0,help='numbering ofset, adds this to the numbering in the file')
prs.add_argument('--switch',action='store_true',default=False,help='reverse the pairing coloring scheme')
prs.add_argument('--onlyCircles',action='store_true',default=False,help="only draw the base positions as circles, needs colorfile input")
o=prs.parse_args()
return o
def hasCT(correct,varna,switch=False):
ac,pred,both,s,p = evalStructures(correct,varna)
print('PPV: ', round(p,2),'SENS: ',round(s,2))
setScale = varna.scale*2
if varna.filetype == 'XRNA':
setScale = varna.scale*2
#make lines
both_pt = newLines(both,varna.pairMap,setScale)
pred_pt = newLines(pred,varna.pairMap,setScale)
ac_pt = newLines(ac,varna.pairMap,setScale)
#define colors
green,red,purple = (0,50,0),(100,0,0),(39,17,56)
if switch:
# switch the predicted and accepted colors
red,purple=(39,17,56),(100,0,0)
#draw lines
drawnLines = processConnect(pred_pt,purple) + processConnect(ac_pt,red,dashed=True) + processConnect(both_pt,green)
return drawnLines
def noCT(varna):
ac,pred,both,s,p = evalStructures(varna,varna)
setScale = varna.scale*2
if varna.filetype == 'XRNA':
setScale = varna.scale*2
both_pt = newLines(both,varna.pairMap,setScale,struct=varna)
black = (0,0,0)
drawnLines = processConnect(both_pt,black)
return drawnLines
def main():
arg = parseArgs()
#read input file
svgStruct = ssDiagram(arg.input)
#overwrite stored chemical probing values
if arg.shape:
svgStruct.readSHAPE(arg.shape)
if arg.diff:
svgStruct.setdiff()
# do custom colors if given
if arg.colors:
svgStruct.readCircleColors(arg.colors)
# do extra lines
if arg.lines:
svgStruct.readExtraLines(arg.lines)
# read enzymatic cleavage data
if arg.enzyme:
enzyme = Enzyme(arg.enzyme)
# draw nucleotide outline
outline = drawOutline(svgStruct)
if arg.onlyCircles:
outline = drawOutline(svgStruct, strokewidth=15)
# draw circles behind nucleotides
circles = drawCircles(svgStruct)
#print svgStruct
#print circles
# extra lines
extraLines = drawExtraLines(svgStruct)
# draw lines and compare structures if needed
if arg.ct:
correct = RNAtools.CT(arg.ct)
drawnLines = hasCT(correct,svgStruct,arg.switch)
else:
drawnLines = noCT(svgStruct)
letters, max_x, max_y = drawBases(svgStruct)
letters += drawNums(svgStruct,arg.offset)
#construct header and letters
header = '<svg width="{0}" height="{1}" version="1.1" xmlns="http://www.w3.org/2000/svg">'.format(max_x, max_y)
background = '<rect x="0" y="0" width="100%" height="100%" style="fill:rgb(255,255,255)"/>'.format(max_x, max_y)
if arg.enzyme:
arrows = enzyme.drawArrows(svgStruct)
else:
arrows = ""
#write file
out = header + background + outline + arrows + circles + letters + drawnLines + extraLines + '</svg>'
# catch only circles
if arg.onlyCircles:
out = header + background + outline + circles + '</svg>'
w = open(arg.output,"w")
w.write(out)
w.close()
if __name__ == "__main__":
main()
|
<filename>sazabi/__init__.py<gh_stars>0
import asyncio
import logging
import threading
import discord
import imgurpython
import twython
import yaml
from sazabi.plugins.twitch import Twitch
from sazabi.types import SazabiBotPlugin, LoggedObject
client = discord.Client()
class Sazabi(LoggedObject):
def __init__(self, session, config='config.yaml'):
self.session = session # type: sqlalchemy.orm.session.Session
self.logger.setLevel(logging.INFO)
self._config = self._read_config(config)
self.imgur_client = self._imgur_client() if 'imgur' in self._enabled_plugins else None
self.twitter_client = self._twitter_client() if 'twitter' in self._enabled_plugins else None
self._plugins = None
self._configure_plugins()
# TODO move these into decorators
setattr(client, self.on_message.__name__, self.on_message)
setattr(client, self.on_ready.__name__, self.on_ready)
@staticmethod
def _read_config(config):
with open(config) as fp:
parsed = yaml.load(fp)
return parsed
@property
def _discord_config(self):
return self._config.get('discord')
@property
def _imgur_config(self):
return self._config.get('imgur')
@property
def _twitch_config(self):
return self._config.get('twitch')
@property
def _twitter_config(self):
return self._config.get('twitter')
@property
def _enabled_plugins(self):
return self._config.get('plugins')
def _imgur_client(self):
try:
return imgurpython.ImgurClient(
self._imgur_config.get('client_id'),
self._imgur_config.get('client_token'),
)
except AttributeError:
self.logger.error(
"Client id and token for imgur plugin must be specified. Imgur plugin disabled!")
def _twitter_client(self):
try:
twitter = twython.Twython(
self._twitter_config.get('consumer_key'),
self._twitter_config.get('consumer_secret'),
oauth_version=2
)
ACCESS_TOKEN = twitter.obtain_access_token()
return twython.Twython(
self._twitter_config.get('consumer_key'),
access_token=ACCESS_TOKEN
)
except AttributeError:
self.logger.error(
"Consumer key and secret for twitter plugin must be specified. Twitter plugin disabled!")
@property
def _weather_config(self):
return self._config.get('weather')
def _configure_plugins(self):
if self._plugins is None:
plugin_config = [
getattr(
__import__('sazabi.plugins.' + p, fromlist=[p.title()]), p.title()
) for p in self._config.get('plugins')]
_ = map(__import__, plugin_config)
self._plugins = [cls() for cls in SazabiBotPlugin.__subclasses__()]
def launch(self):
self.logger.info("Launching...")
f_stop = threading.Event()
loop = asyncio.get_event_loop()
try:
tasks = [client.start(self._config.get('discord').get('token')),
self.background_tasks(f_stop)]
gathered = asyncio.gather(*tasks, loop=loop)
loop.run_until_complete(gathered)
except RuntimeError as e:
self.logger.error("Received fatal error: {}".format(e))
self._handle_exit()
except KeyboardInterrupt:
loop.run_until_complete(client.logout())
pending = asyncio.Task.all_tasks(loop=loop)
gathered = asyncio.gather(*pending, loop=loop)
try:
gathered.cancel()
loop.run_until_complete(gathered)
gathered.exception()
except:
pass
finally:
loop.close()
def _handle_exit(self):
self.logger.error("Handing fatal error, restarting...")
client.loop.run_until_complete(client.logout())
for t in asyncio.Task.all_tasks(loop=client.loop):
if t.done():
t.exception()
continue
t.cancel()
try:
client.loop.run_until_complete(asyncio.wait_for(t, 5, loop=client.loop))
t.exception()
except asyncio.InvalidStateError:
pass
except asyncio.TimeoutError:
pass
except asyncio.CancelledError:
pass
self.launch() # TODO may cause stack overflow
async def background_tasks(self, f_stop):
twitch = Twitch()
while True:
self.logger.info('Looking for stream updates...')
await twitch.parse(client, None, None, **self._twitch_config)
await asyncio.sleep(self._twitch_config.get('interval'))
@asyncio.coroutine
async def on_ready(self):
self.logger.info(
"Connected as: {} {}".format(client.user.name, client.user.id))
@asyncio.coroutine
async def on_message(self, message):
self.logger.info("Got message: User: {}, Message: {}".format(message.author.name, message.content))
for plugin in self._plugins: # type SazabiBotPlugin
kwargs = {
'config': self._config,
'imgur': self.imgur_client,
'twitter': self.twitter_client
}
await plugin.parse(client, message, **kwargs)
|
# Importing testing frameworks:
import unittest
# Importing 3rd party packages for testing:
import sqlite3
import sqlalchemy
import pandas as pd
import bs4
import numpy as np
# Importing velkoz web packages for testing:
from velkoz_web_packages.objects_stock_data.objects_fund_holdings.web_objects_fund_holdings import NASDAQFundHoldingsResponseObject
from velkoz_web_packages.objects_stock_data.objects_fund_holdings.ingestion_engines_fund_holdings import FundHoldingsDataIngestionEngine
from velkoz_web_packages.objects_stock_data.objects_stock_price.web_objects_stock_price import NASDAQStockPriceResponseObject
# Creating NASDAQFundHoldingsResponseObjects for testing:
icln_holdings = NASDAQFundHoldingsResponseObject("ICLN")
qcln_holdings = NASDAQFundHoldingsResponseObject("QCLN")
voo_holdings = NASDAQFundHoldingsResponseObject("VOO")
aapl_price_obj = NASDAQStockPriceResponseObject('AAPL')
tsla_price_obj = NASDAQStockPriceResponseObject('TSLA')
mixed_web_obj_lst = [icln_holdings, qcln_holdings, voo_holdings, aapl_price_obj, tsla_price_obj]
class NASDAQFundHoldingsResponseObjectTest(unittest.TestCase):
def test_fund_holding_web_object_data_extraction(self):
"""
This method tests the NASDAQFundHoldingsResponseObject’s ability to extract
the correct data from Yahoo Finance.
It performs assertion tests that test the object's ability to:
* Accurately construct a Yahoo Finance holdings GET request based on an input ticker
* Correctly parse the HTML response to said GET request for “Top 10 Holdings Table”.
* Converts the extracted html table to a pandas DataFrame that is correctly formatted and typed.
"""
# Testing the accuracy of the url generator:
self.assertEqual("https://finance.yahoo.com/quote/ICLN/holdings", icln_holdings._yhfinance_url)
self.assertEqual("https://finance.yahoo.com/quote/ICLN/holdings?p=ICLN", icln_holdings._http_response.url)
self.assertEqual("https://finance.yahoo.com/quote/QCLN/holdings", qcln_holdings._yhfinance_url)
self.assertEqual("https://finance.yahoo.com/quote/QCLN/holdings?p=QCLN", qcln_holdings._http_response.url)
self.assertEqual("https://finance.yahoo.com/quote/VOO/holdings", voo_holdings._yhfinance_url)
self.assertEqual("https://finance.yahoo.com/quote/VOO/holdings?p=VOO", voo_holdings._http_response.url)
# Type Checking the holdings_tbl BeautifulSoup object:
self.assertIsInstance(icln_holdings._holdings_tbl, bs4.element.Tag)
self.assertIsInstance(qcln_holdings._holdings_tbl, bs4.element.Tag)
self.assertIsInstance(voo_holdings._holdings_tbl, bs4.element.Tag)
# Validating the dataframe extracted from the html table of the NASDAQFundHoldingsResponseObject:
df_column_format = sorted(['name', 'percent_holdings'])
icln_df_columns = sorted(icln_holdings._holdings_data.columns)
qcln_df_columns = sorted(qcln_holdings._holdings_data.columns)
voo_df_columns = sorted(voo_holdings._holdings_data.columns)
self.assertEqual(icln_df_columns, df_column_format)
self.assertEqual(qcln_df_columns, df_column_format)
self.assertEqual(voo_df_columns, df_column_format)
# Type-checking the dataframe columns:
self.assertIs(icln_holdings._holdings_data.name.dtype, np.dtype("object"))
self.assertIs(icln_holdings._holdings_data.percent_holdings.dtype, np.dtype("float64"))
self.assertIs(icln_holdings._holdings_data.index.dtype, np.dtype('object'))
self.assertIs(qcln_holdings._holdings_data.name.dtype, np.dtype("object"))
self.assertIs(qcln_holdings._holdings_data.percent_holdings.dtype, np.dtype("float64"))
self.assertIs(qcln_holdings._holdings_data.index.dtype, np.dtype('object'))
self.assertIs(voo_holdings._holdings_data.name.dtype, np.dtype("object"))
self.assertIs(voo_holdings._holdings_data.percent_holdings.dtype, np.dtype("float64"))
self.assertIs(voo_holdings._holdings_data.index.dtype, np.dtype('object'))
class FundHoldingsDataIngestionEngineTest(unittest.TestCase):
def test_fund_holding_data_ingestion(self):
"""
This method tests the functionality of the FundHoldingsDataIngestionEngine.
It tests the ability of the Ingestion Engine to successfully validate Ingested
Web Objects and manage its Que of WebPageResponseObjects. As a result
this method tests the following functionality of the Ingestion Engine:
* The ability to successfully manipulate the WebResponseObjects list (add, remove, purge).
* The ability of the internal validation methods to successfully validate
ingested objects (only validating supported WebResponseObject types).
* The successful creation of a database connection, database engine and
associated scoped database session.
"""
# Creating Ingestion Engine Instance connected to an in-memory database:
ingestion_engine = FundHoldingsDataIngestionEngine(
"sqlite:///:memory:")
# Testing the connectivity of the ingestion engine to the database:
self.assertIsInstance(ingestion_engine._sqlaengine, sqlalchemy.engine.Engine)
self.assertIsInstance(ingestion_engine._db_session_maker, sqlalchemy.orm.session.sessionmaker)
self.assertIsInstance(ingestion_engine._db_session, sqlalchemy.orm.scoped_session)
# Performing assertion testing on the ability of the Ingestion Engine to manage its que of objects:
self.assertEqual(len(ingestion_engine._WebPageResponseObjs), 0) # Que should be empty currently
# Adding WebPageResponse Objects into the Ingestion Engine:
ingestion_engine._insert_web_obj(icln_holdings)
self.assertEqual(len(ingestion_engine._WebPageResponseObjs), 1) # Que should be len 1
ingestion_engine._insert_web_obj(qcln_holdings)
self.assertEqual(len(ingestion_engine._WebPageResponseObjs), 2) # Que should be len 2
ingestion_engine._insert_web_obj(voo_holdings)
self.assertEqual(len(ingestion_engine._WebPageResponseObjs), 3) # Que should be len 3
# Purging the list of all elements:
ingestion_engine._purge_web_obj_que()
# Asserting the length of the internal WebObject list (should be 0):
self.assertEqual(len(ingestion_engine._WebPageResponseObjs), 0)
# Ingesting a mixed list of WebObjects to test validation status:
for web_obj in mixed_web_obj_lst:
ingestion_engine._insert_web_obj(web_obj)
# Asserting the length of the internal WebObject list (should be 5):
self.assertEqual(len(ingestion_engine._WebPageResponseObjs), 5)
# Performing the main data type validation method externally for testing:
validation_dict = ingestion_engine._validate_args()
# Asserting that the ingestion engine accurately validated the web_objects:
# Objects that should have been validated as compatable with the Ingestion Engine:
self.assertEqual(validation_dict[icln_holdings], 20)
self.assertEqual(validation_dict[qcln_holdings], 20)
self.assertEqual(validation_dict[voo_holdings], 20)
# Objects that should have been validated as incompatable with the Ingestion Engine:
self.assertEqual(validation_dict[aapl_price_obj], 10)
self.assertEqual(validation_dict[tsla_price_obj], 10)
def test_fund_holdings_ingestion_engine_database_update(self):
"""
This method performs the unit tests for the __add_session_web_obj method.
This method that is designed to create and update database tables containing
the data on a funds holdings extracted from a FundHoldingsResponseObject.
The method tests the Ingestion Engine’s ability to:
* Create a new table for a ticker if it does not already exist and populate
it with the most recent pricing data.
"""
# Creating a populated ingestion engine for testing:
ingestion_engine = FundHoldingsDataIngestionEngine("sqlite:///:memory:",
icln_holdings, qcln_holdings,
voo_holdings)
# Performing Write Operations:
ingestion_engine._write_web_objects()
# Asserting that the correct data was written to the database:
extracted_db_tbls_lst = sorted(ingestion_engine._sqlaengine.table_names())
test_db_tbls_lst = sorted(["ICLN_holdings_data", "QCLN_holdings_data", "VOO_holdings_data"])
self.assertEqual(extracted_db_tbls_lst, test_db_tbls_lst)
# Extracting data from the in-memory database as pandas dataframe for type-checking:
icln_data = pd.read_sql_table("ICLN_holdings_data", con=ingestion_engine._sqlaengine, index_col='symbol')
qcln_data = pd.read_sql_table("QCLN_holdings_data", con=ingestion_engine._sqlaengine, index_col='symbol')
voo_data = pd.read_sql_table("VOO_holdings_data", con=ingestion_engine._sqlaengine, index_col='symbol')
# Comparing the read data to the written data to ensure accurate writing:
self.assertEqual(icln_data.equals(icln_holdings._holdings_data), True)
self.assertEqual(qcln_data.equals(qcln_holdings._holdings_data), True)
self.assertEqual(voo_data.equals(voo_holdings._holdings_data), True)
|
<reponame>theEpsilon/slr-tool
import boto3
import json
from bson import json_util
from db_service import DBService
from event_validator import EventValidator
def handler(event, context):
validator = EventValidator(event)
if not validator.validate_event():
return {
"statusCode": 400,
"body": "Bad Request"
}
path = event["request"]["path"].split("/")
path.pop(0)
method = event["request"]["method"]
auth_request = {
"request": event["request"],
"user-id": event["queryparams"]["user-id"]
}
if len(path) >= 2:
auth_request["project-id"] = event["urlparams"]["project-id"]
boto3_client = boto3.client('lambda', region_name='eu-central-1')
auth_response = boto3_client.invoke(FunctionName='slrauthorizeuser-fabi', InvocationType='RequestResponse', Payload=json.dumps(auth_request))
auth_response = json.loads(auth_response["Payload"].read().decode())
if auth_response["status"] == 403:
return {
"statusCode": 403,
"body": "Permission denied"
}
elif auth_response["status"] == 404:
return {
"statusCode": 404,
"body": "User not found"
}
elif auth_response["status"] == 500:
return {
"statusCode": 500,
"body": "Server Error"
}
db_service = DBService()
url_params = event["urlparams"]
query_params = event["queryparams"]
if "payload" in event:
payload = event["payload"]
else:
payload = {}
result = "Error"
if len(path) == 1:
if path[0] == "projects":
if method == "POST":
# Handled in different Lambda
pass
elif method == "GET":
# Handled in different Lambda
pass
elif path[0] == "find_user":
if "username" in query_params and query_params["username"] != "":
if "name" in query_params and query_params["name"] != "":
result = db_service.find_user(query_params["username"], query_params["name"])
else:
result = db_service.find_user(username=query_params["username"])
elif "name" in query_params and query_params["name"] != "":
result = db_service.find_user(name=query_params["name"])
elif len(path) == 3:
if path[2] == "results":
if method == "GET":
if "filter" in query_params and query_params["filter"] != "":
result = db_service.get_all_results_in_project(url_params["project-id"], query_params["filter"])
else:
result = db_service.get_all_results_in_project(url_params["project-id"])
elif method == "POST":
result = db_service.add_result_to_project(url_params["project-id"], payload["result-id"])
if path[2] == "collabs":
if method == "GET":
result = db_service.get_collabs(url_params["project-id"])
elif method == "POST":
result = db_service.add_collab_to_project(url_params["project-id"], payload["user-id"])
elif method == "DELETE":
result = db_service.remove_collab_from_project(url_params["project-id"], query_params["del-id"])
elif path[2] == "labels":
if method == "GET":
result = db_service.get_labels_in_project(url_params["project-id"])
elif method == "POST":
result = db_service.add_label_to_project(url_params["project-id"], payload)
elif method == "DELETE":
result = db_service.remove_label_from_project(url_params["project-id"], query_params["label-id"])
elif method == "PUT":
result = db_service.update_label_in_project(url_params["project-id"], query_params["label-id"], payload)
elif path[2] == "searches":
if method == "POST":
result = db_service.add_search_to_project(url_params["project-id"], payload["search-id"], payload["add-results"])
elif method == "DELETE":
result = db_service.remove_search_from_project(url_params["project-id"], query_params["search-id"])
elif path[2] == "meta":
if method == "PUT":
result = db_service.change_meta_info(url_params["project-id"], payload)
elif len(path) > 3:
if len(path) == 4 and path[2] == "results":
if method == "DELETE":
result = db_service.remove_result_from_project(url_params["project-id"], url_params["result-id"])
elif method == "GET":
result = db_service.get_result_in_project(url_params["project-id"], url_params["result-id"])
elif len(path) > 4:
if path[4] == "labels":
if method == "GET":
# Not considered necessary
pass
elif method == "POST":
result = db_service.add_label_to_result(url_params["project-id"], url_params["result-id"], payload["label-id"])
elif method == "DELETE":
result = db_service.remove_label_from_result(url_params["project-id"], url_params["result-id"], query_params["label-id"])
elif path[4] == "comments":
if method == "GET":
result = db_service.get_comments_for_result(url_params["project-id"], url_params["result-id"])
elif method == "POST":
result = db_service.add_comment_to_result(url_params["project-id"], url_params["result-id"], query_params["user-id"], payload)
elif method == "DELETE":
result = db_service.delete_comment_from_result(url_params["project-id"], url_params["result-id"], query_params["comment-id"])
return {
"statusCode": 200,
"body": json.loads(json_util.dumps(result))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.