code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import requests # pip install requests
import json
import execjs # pip install PyExecJS
import urllib3 # pip install urllib3
'''
author by Benji
date at 2018.12.07
实现: 模拟浏览器中Google翻译的url请求
不同于Baidu直接给出API, Google翻译需要调用其封装的lib
参考: https://www.jianshu.com/p/95cf6e73d6ee
https://cloud.google.com/translate/docs/apis
'''
class PyJsParams():
def __init__(self):
self.ctx = execjs.compile("""
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
""")
def getTk(self, text):
return self.ctx.call("TL", text)
def buildUrl(text, tk):
baseUrl = 'https://translate.google.com/translate_a/single?client=webapp&'
baseUrl += '&sl=auto&tl=' + toLang
baseUrl += '&hl=en&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&source=btn&ssel=0&tsel=0&kc=0&'
baseUrl += 'tk='+str(tk)+'&'
baseUrl += 'q='+text
return baseUrl
def translate(text, jsParas):
url = buildUrl(text, jsParas.getTk(text))
try:
# 添加headers, 模仿浏览器行为
headers = requests.utils.default_headers()
headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
urllib3.disable_warnings()
# solve: SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate
r = requests.get(url, headers=headers, verify=False)
result = json.loads(r.text)
res = str(result[0][0][0])
except Exception as e:
res = ''
print("翻译"+text+"失败")
print("错误信息:")
print(e)
finally:
return res
toLang = 'en'
if __name__ == '__main__':
jsParas = PyJsParams()
res = translate('小顺子给春宫娘娘请安了', jsParas)
print(res)
'''
output
Xiaoshun gave the Spring Palace girl an appointment.
''' | Python/Exercise/Exercise_2018/Translate/googleTranslate.py |
import requests # pip install requests
import json
import execjs # pip install PyExecJS
import urllib3 # pip install urllib3
'''
author by Benji
date at 2018.12.07
实现: 模拟浏览器中Google翻译的url请求
不同于Baidu直接给出API, Google翻译需要调用其封装的lib
参考: https://www.jianshu.com/p/95cf6e73d6ee
https://cloud.google.com/translate/docs/apis
'''
class PyJsParams():
def __init__(self):
self.ctx = execjs.compile("""
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
""")
def getTk(self, text):
return self.ctx.call("TL", text)
def buildUrl(text, tk):
baseUrl = 'https://translate.google.com/translate_a/single?client=webapp&'
baseUrl += '&sl=auto&tl=' + toLang
baseUrl += '&hl=en&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&source=btn&ssel=0&tsel=0&kc=0&'
baseUrl += 'tk='+str(tk)+'&'
baseUrl += 'q='+text
return baseUrl
def translate(text, jsParas):
url = buildUrl(text, jsParas.getTk(text))
try:
# 添加headers, 模仿浏览器行为
headers = requests.utils.default_headers()
headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
urllib3.disable_warnings()
# solve: SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate
r = requests.get(url, headers=headers, verify=False)
result = json.loads(r.text)
res = str(result[0][0][0])
except Exception as e:
res = ''
print("翻译"+text+"失败")
print("错误信息:")
print(e)
finally:
return res
toLang = 'en'
if __name__ == '__main__':
jsParas = PyJsParams()
res = translate('小顺子给春宫娘娘请安了', jsParas)
print(res)
'''
output
Xiaoshun gave the Spring Palace girl an appointment.
''' | 0.330471 | 0.189465 |
import os
from shutil import copyfile
from PyQt6.QtCore import Qt
from PyQt6.QtTest import QTest
from PyQt6.QtWidgets import QApplication
from password_manager.application_context import ApplicationContext
from password_manager.utils.file_helper import FileHelper
class SystemTestFixture:
def __init__(self, delay: int = 10):
self.delay = delay
self.app = QApplication([])
self.application_context = ApplicationContext()
self.application_context.run_server = False
self.application_context.save_preferences = False
self.application_context.initialize_integration_server(self.get_filepath('test_key.pem'),
self.get_filepath('test_cert.pem'), 20000)
self.application_context.create_database_controller.run_dialog()
self.db_file = None
QTest.qWaitForWindowExposed(self.application_context.create_database_controller.dialog)
@staticmethod
def get_filepath(name: str) -> str:
return f'{os.path.dirname(os.path.realpath(__file__))}/{name}'
def get_db_file(self, name: str) -> str:
self.db_file = self.get_filepath(name)
return self.db_file
def click_button(self, button):
QTest.mouseClick(button, Qt.MouseButton.LeftButton, delay=self.delay * 5)
def insert_text(self, widget, text):
QTest.keyClicks(widget, text, delay=self.delay)
def __enter__(self):
return self
def __exit__(self, _, __, ___):
if self.db_file and os.path.exists(self.db_file):
os.remove(self.db_file)
def open_main_window_with_temp_db(self, db_file: str, new_db: str, password: str):
file = self.get_db_file(new_db)
copyfile(self.get_filepath(db_file), file)
self.open_existing_database(file)
self.insert_text(self.application_context.login_controller.dialog.password_input, password)
self.click_button(self.application_context.login_controller.dialog.open_button)
QTest.qWaitForWindowExposed(self.application_context.main_window_controller.window)
def open_existing_database(self, filepath: str):
FileHelper.open_db_file = lambda: filepath
self.click_button(self.application_context.create_database_controller.dialog.open_existing_button)
def add_record(self, title: str, password: str):
self.click_button(self.application_context.main_window_controller.window.add_new_button)
self.insert_text(self.application_context.main_window_controller.window.title_input, title)
self.insert_text(self.application_context.main_window_controller.window.password_input, password)
self.click_button(self.application_context.main_window_controller.window.edit_save_button) | system_tests/fixture.py | import os
from shutil import copyfile
from PyQt6.QtCore import Qt
from PyQt6.QtTest import QTest
from PyQt6.QtWidgets import QApplication
from password_manager.application_context import ApplicationContext
from password_manager.utils.file_helper import FileHelper
class SystemTestFixture:
def __init__(self, delay: int = 10):
self.delay = delay
self.app = QApplication([])
self.application_context = ApplicationContext()
self.application_context.run_server = False
self.application_context.save_preferences = False
self.application_context.initialize_integration_server(self.get_filepath('test_key.pem'),
self.get_filepath('test_cert.pem'), 20000)
self.application_context.create_database_controller.run_dialog()
self.db_file = None
QTest.qWaitForWindowExposed(self.application_context.create_database_controller.dialog)
@staticmethod
def get_filepath(name: str) -> str:
return f'{os.path.dirname(os.path.realpath(__file__))}/{name}'
def get_db_file(self, name: str) -> str:
self.db_file = self.get_filepath(name)
return self.db_file
def click_button(self, button):
QTest.mouseClick(button, Qt.MouseButton.LeftButton, delay=self.delay * 5)
def insert_text(self, widget, text):
QTest.keyClicks(widget, text, delay=self.delay)
def __enter__(self):
return self
def __exit__(self, _, __, ___):
if self.db_file and os.path.exists(self.db_file):
os.remove(self.db_file)
def open_main_window_with_temp_db(self, db_file: str, new_db: str, password: str):
file = self.get_db_file(new_db)
copyfile(self.get_filepath(db_file), file)
self.open_existing_database(file)
self.insert_text(self.application_context.login_controller.dialog.password_input, password)
self.click_button(self.application_context.login_controller.dialog.open_button)
QTest.qWaitForWindowExposed(self.application_context.main_window_controller.window)
def open_existing_database(self, filepath: str):
FileHelper.open_db_file = lambda: filepath
self.click_button(self.application_context.create_database_controller.dialog.open_existing_button)
def add_record(self, title: str, password: str):
self.click_button(self.application_context.main_window_controller.window.add_new_button)
self.insert_text(self.application_context.main_window_controller.window.title_input, title)
self.insert_text(self.application_context.main_window_controller.window.password_input, password)
self.click_button(self.application_context.main_window_controller.window.edit_save_button) | 0.3295 | 0.075961 |
import math
print ('----Math functions loaded up---- \nEnter \'list_functions()\' to display available functions')
def list_functions():
print ('intoRadians()')
print ('intoDegrees()')
print ('LOS()')
print ('vectorProduct()')
print ('vectorMag()')
def intoRadians():
degrees = float(input('Enter degrees: '))
radians = (degrees * (pi/180))
print (radians)
return radians
def intoDegrees():
radians = float(input('Enter radians: '))
degrees = (radians * (180/pi))
print (degrees)
return degrees
def LOS():
choice = input('Enter \'c\' to solve for c. \nEnter \'theta\' to solve for theta.')
if choice == 'c':
LOS_c()
else:
LOS_theta()
def LOS_c():
a = float(input("Enter a value for A: "))
b = float(input("Enter a value for B: "))
theta = float(input("Enter a value for theta: "))
c = (math.sqrt(((a**2) + (b**2)) - ((2*a*b) * math.cos(theta))))
return c
def LOS_theta():
a = float(input("Enter a value for A: "))
b = float(input("Enter a value for B: "))
c = float(input("Enter a value for C: "))
theta = (math.asin(((c**2)-(b**2)-(a**2))/(-2*a*b)))
print (theta)
return theta
def vectorProduct():
vector_1_i = float(input('Enter vector 1\'s i value: '))
vector_1_j = float(input('Enter vector 1\'s j value: '))
vector_2_i = float(input('Enter vector 2\'s i value: '))
vector_2_j = float(input('Enter vector 2\'s j value: '))
vector_product = [(vector_1_i * vector_2_i) + (vector_1_j * vector_2_j)]
print (vector_product)
return vector_product
def vectorMag():
vector_1_i = float(input('Enter vector 1\'s i value: '))
vector_1_j = float(input('Enter vector 1\'s j value: '))
vec_mag = math.sqrt((vector_1_i ** 2) + (vector_1_j ** 2))
print (vec_mag)
return vec_mag
def coSolve():
adj = float(input('Enter length of adjacent side: '))
hypot = float(input('Enter length of hypotenuse: '))
coSolution = (math.acos(adj / hypot))
print (coSolution)
def sinSolve():
opp = float(input('Enter length of opposite side: '))
hypot = float(input('Enter length of hypotenuse: '))
sinSolution = (math.asin(opp / hypot))
print (sinSolution) | Math Functions/math functions.py | import math
print ('----Math functions loaded up---- \nEnter \'list_functions()\' to display available functions')
def list_functions():
print ('intoRadians()')
print ('intoDegrees()')
print ('LOS()')
print ('vectorProduct()')
print ('vectorMag()')
def intoRadians():
degrees = float(input('Enter degrees: '))
radians = (degrees * (pi/180))
print (radians)
return radians
def intoDegrees():
radians = float(input('Enter radians: '))
degrees = (radians * (180/pi))
print (degrees)
return degrees
def LOS():
choice = input('Enter \'c\' to solve for c. \nEnter \'theta\' to solve for theta.')
if choice == 'c':
LOS_c()
else:
LOS_theta()
def LOS_c():
a = float(input("Enter a value for A: "))
b = float(input("Enter a value for B: "))
theta = float(input("Enter a value for theta: "))
c = (math.sqrt(((a**2) + (b**2)) - ((2*a*b) * math.cos(theta))))
return c
def LOS_theta():
a = float(input("Enter a value for A: "))
b = float(input("Enter a value for B: "))
c = float(input("Enter a value for C: "))
theta = (math.asin(((c**2)-(b**2)-(a**2))/(-2*a*b)))
print (theta)
return theta
def vectorProduct():
vector_1_i = float(input('Enter vector 1\'s i value: '))
vector_1_j = float(input('Enter vector 1\'s j value: '))
vector_2_i = float(input('Enter vector 2\'s i value: '))
vector_2_j = float(input('Enter vector 2\'s j value: '))
vector_product = [(vector_1_i * vector_2_i) + (vector_1_j * vector_2_j)]
print (vector_product)
return vector_product
def vectorMag():
vector_1_i = float(input('Enter vector 1\'s i value: '))
vector_1_j = float(input('Enter vector 1\'s j value: '))
vec_mag = math.sqrt((vector_1_i ** 2) + (vector_1_j ** 2))
print (vec_mag)
return vec_mag
def coSolve():
adj = float(input('Enter length of adjacent side: '))
hypot = float(input('Enter length of hypotenuse: '))
coSolution = (math.acos(adj / hypot))
print (coSolution)
def sinSolve():
opp = float(input('Enter length of opposite side: '))
hypot = float(input('Enter length of hypotenuse: '))
sinSolution = (math.asin(opp / hypot))
print (sinSolution) | 0.452536 | 0.439206 |
import struct
def write_instruction(instruction):
pass
def calculate_file_size(numbers_data):
number_point = numbers_data[0]
number_instructions = numbers_data[1]
number_pois = numbers_data[2]
#size of header adapted to obtain expected number, counted value was 139, need to understan where difference comes from
headers_size = 145
alphabet_size = 3 * 1 # only 1 entry is in alphabet
points_size = number_point * 11
instructions_size = (number_instructions +number_pois)* 44
difference_file_size = 16
size_attribute = headers_size + points_size + instructions_size - difference_file_size
return size_attribute
def write_points(fit_file,decoded_data):
latitude_data = decoded_data[0]
longitude_data = decoded_data[1]
altitude_data = decoded_data[2]
instructions_data = decoded_data[3]
for i in range (0,len(latitude_data)):
# 1 byte
byte=b'\x06'
fit_file.write(byte)
# 4 bytes
# point latitude
point_latitude = latitude_data[i]
byte = struct.pack('<i',point_latitude)
fit_file.write(byte)
# 4 bytes
# point longitude
point_longitude = longitude_data[i]
byte = struct.pack('<i',point_longitude)
fit_file.write(byte)
# 2 bytes
# point altitude
point_altitude = altitude_data[i]
byte = struct.pack('<H',point_altitude)
fit_file.write(byte)
def write_instructions(fit_file,instruction_data,instruction_distance,name_data,points_of_interest,numbers_data):
number_instructions = numbers_data[1]
number_pois = numbers_data[2]
poi_name = points_of_interest[0]
poi_type = points_of_interest[1]
poi_distance = points_of_interest[2]
poi_identification = points_of_interest[3]
# starts in -1 since first instructions should be 0
instruction_identification = -1
for i in range (0,len(instruction_data)): #TBD Update number of points
# increase identification to make sure that each instruction is in the correct point
instruction_identification += 1
# steps without turn instructions are identified with 15
if instruction_data[i] != b'\xff':
# 1 byte
byte=b'\x04'
fit_file.write(byte)
# 2 bytes
# instruction identification
instruction_identification = instruction_identification
byte = struct.pack('<H',instruction_identification)
fit_file.write(byte)
# 1 bytes
# instruction direction
byte = instruction_data[i]
fit_file.write(byte)
# 4 bytes
# instruction distance
byte = int(float(instruction_distance[i]))
byte=struct.pack('<I', byte)
fit_file.write(byte)
# 4 bytes
# header
byte = b'\xFF\xFF\xFF\xFF'
fit_file.write(byte)
# 32 bytes
# instruction description
byte = str.encode(name_data[i],'utf-8')
if len(byte) < 32:
byte += b'\x00' *(32-len(byte))
else:
byte = byte[:32]
fit_file.write(byte)
# write POI data
i=0
for i in range (0,number_pois): #TBD Update number of points
# 1 byte
byte=b'\x04'
fit_file.write(byte)
# 2 bytes
# instruction identification
poi = poi_identification[i]
byte = struct.pack('<H',poi)
fit_file.write(byte)
# 1 bytes
# instruction direction
byte = poi_type[i]
fit_file.write(byte)
# 4 bytes
# instruction distance
byte = int(float(poi_distance[i]))
byte=struct.pack('<I', byte)
fit_file.write(byte)
# 4 bytes
# header
byte = b'\x00\x00\x00\x00'
fit_file.write(byte)
# 32 bytes
# instruction description
byte = str.encode(poi_name[i],'utf-8')
if len(byte) < 32:
byte += b'\x00' *(32-len(byte))
else:
byte = byte[:32]
fit_file.write(byte)
def write_alphabet(fit_file):
for i in range(0, 1500):
# 1 byte
byte=b'\x02'
fit_file.write(byte)
# 2 bytes
alphabet_instruction=i
byte = struct.pack('<H',alphabet_instruction)
fit_file.write(byte)
def encode_fit (fit_path,decoded_data,extracted_attributes):
fit_file = open(fit_path, 'wb')
latitude_data = decoded_data[0]
longitude_data = decoded_data[1]
altitude_data = decoded_data[2]
instruction_data = decoded_data[3]
name_data = decoded_data[4]
lat_lon_bounding_box = extracted_attributes[0]
lat_ne_bounding_box = lat_lon_bounding_box[0]
lat_sw_bounding_box = lat_lon_bounding_box[1]
lon_ne_bounding_box = lat_lon_bounding_box[2]
lon_sw_bounding_box = lat_lon_bounding_box[3]
total_distance = int(extracted_attributes[1])
alt_bounding_box = extracted_attributes[2]
maximum_altitude = alt_bounding_box[0]
minimum_altitude = alt_bounding_box[1]
numbers_data = extracted_attributes[3]
number_point = numbers_data[0]
number_instructions = numbers_data[1]
number_pois = numbers_data[2]
instruction_distance = extracted_attributes[4]
points_of_interest = extracted_attributes[5]
# 4 bytes
# header
byte = b'\x0E\x10\x6C\x00'
fit_file.write(byte)
# 4 bytes
# file size
file_size = calculate_file_size(numbers_data)
byte = struct.pack('<i',file_size)
fit_file.write(byte)
# 4 bytes
# header
byte = b'\x2E\x46\x49\x54'
fit_file.write(byte)
# 2 bytes
# checksum
byte=b'\x00\x00'
fit_file.write(byte)
# 32 bytes
# header
byte=b'\x41\x00\x00\xFE\x00\x08\x01\x02\x84\x02\x04\x85\x03\x04\x85\x04\x04\x85\x05\x04\x85\x06\x04\x86\x07\x02\x84\x08\x02\x84\x01'
fit_file.write(byte)
# 2 bytes
# number of points
number_points = number_point
byte=struct.pack('<H',number_points)
fit_file.write(byte)
# 4 bytes
# lat ne bounding box
lat_ne_bounding_box = lat_ne_bounding_box
byte = struct.pack('<i',lat_ne_bounding_box)
fit_file.write(byte)
# 4 bytes
# lat sw bounding box
lat_sw_bounding_box = lat_sw_bounding_box
byte = struct.pack('<i',lat_sw_bounding_box)
fit_file.write(byte)
# 4 bytes
# lon ne bounding box
lon_ne_bounding_box = lon_ne_bounding_box
byte = struct.pack('<i',lon_ne_bounding_box)
fit_file.write(byte)
# 4 bytes
# lon sw bounding box
lon_sw_bounding_box = lon_sw_bounding_box
byte = struct.pack('<i',lon_sw_bounding_box)
fit_file.write(byte)
# 4 bytes
# total distance in meters
total_distance = total_distance
byte=struct.pack('<I', total_distance)
fit_file.write(byte)
# 2 bytes
# maximum altitude
maximum_altitude = maximum_altitude
byte=struct.pack('<H',maximum_altitude)
fit_file.write(byte)
# 2 bytes
# minimum altitude
minimum_altitude = minimum_altitude
byte=struct.pack('<H',minimum_altitude)
fit_file.write(byte)
# 9 bytes
# header
byte=b'\x42\x00\x00\xFB\x00\x01\x01\x02\x84'
fit_file.write(byte)
# 3 bytes
# alphabet
byte=b'\x02\x00\x00'
fit_file.write(byte)
# empirical tests show that the presence of the alphabet does not affect the unit handling of the file
#write_alphabet(fit_file)
# 10 bytes
# header
byte=b'\x43\x00\x00\xFD\x00\x01\x01\x02\x84\x03'
fit_file.write(byte)
# 2 bytes
number_instructions = number_instructions + number_pois
byte=struct.pack('<H', number_instructions)
fit_file.write(byte)
# 21 bytes
# header
byte=b'\x44\x00\x00\xFA\x00\x05\x01\x02\x84\x02\x01\x00\x03\x04\x86\x04\x04\x86\x05\x20\x07'
fit_file.write(byte)
write_instructions(fit_file,instruction_data,instruction_distance,name_data,points_of_interest,numbers_data)
# 10 bytes
# header
byte=b'\x45\x00\x00\xFC\x00\x01\x01\x02\x84\x05'
fit_file.write(byte)
# 2 bytes
# number of points
number_points = number_points
byte=struct.pack('<H', number_points)
fit_file.write(byte)
# 15 bytes
# header
byte=b'\x46\x00\x00\xF9\x00\x03\x01\x04\x85\x02\x04\x85\x03\x02\x84'
fit_file.write(byte)
write_points(fit_file,decoded_data)
byte=b'\x00\x00'
fit_file.write(byte) | source/fit_encode.py | import struct
def write_instruction(instruction):
pass
def calculate_file_size(numbers_data):
number_point = numbers_data[0]
number_instructions = numbers_data[1]
number_pois = numbers_data[2]
#size of header adapted to obtain expected number, counted value was 139, need to understan where difference comes from
headers_size = 145
alphabet_size = 3 * 1 # only 1 entry is in alphabet
points_size = number_point * 11
instructions_size = (number_instructions +number_pois)* 44
difference_file_size = 16
size_attribute = headers_size + points_size + instructions_size - difference_file_size
return size_attribute
def write_points(fit_file,decoded_data):
latitude_data = decoded_data[0]
longitude_data = decoded_data[1]
altitude_data = decoded_data[2]
instructions_data = decoded_data[3]
for i in range (0,len(latitude_data)):
# 1 byte
byte=b'\x06'
fit_file.write(byte)
# 4 bytes
# point latitude
point_latitude = latitude_data[i]
byte = struct.pack('<i',point_latitude)
fit_file.write(byte)
# 4 bytes
# point longitude
point_longitude = longitude_data[i]
byte = struct.pack('<i',point_longitude)
fit_file.write(byte)
# 2 bytes
# point altitude
point_altitude = altitude_data[i]
byte = struct.pack('<H',point_altitude)
fit_file.write(byte)
def write_instructions(fit_file,instruction_data,instruction_distance,name_data,points_of_interest,numbers_data):
number_instructions = numbers_data[1]
number_pois = numbers_data[2]
poi_name = points_of_interest[0]
poi_type = points_of_interest[1]
poi_distance = points_of_interest[2]
poi_identification = points_of_interest[3]
# starts in -1 since first instructions should be 0
instruction_identification = -1
for i in range (0,len(instruction_data)): #TBD Update number of points
# increase identification to make sure that each instruction is in the correct point
instruction_identification += 1
# steps without turn instructions are identified with 15
if instruction_data[i] != b'\xff':
# 1 byte
byte=b'\x04'
fit_file.write(byte)
# 2 bytes
# instruction identification
instruction_identification = instruction_identification
byte = struct.pack('<H',instruction_identification)
fit_file.write(byte)
# 1 bytes
# instruction direction
byte = instruction_data[i]
fit_file.write(byte)
# 4 bytes
# instruction distance
byte = int(float(instruction_distance[i]))
byte=struct.pack('<I', byte)
fit_file.write(byte)
# 4 bytes
# header
byte = b'\xFF\xFF\xFF\xFF'
fit_file.write(byte)
# 32 bytes
# instruction description
byte = str.encode(name_data[i],'utf-8')
if len(byte) < 32:
byte += b'\x00' *(32-len(byte))
else:
byte = byte[:32]
fit_file.write(byte)
# write POI data
i=0
for i in range (0,number_pois): #TBD Update number of points
# 1 byte
byte=b'\x04'
fit_file.write(byte)
# 2 bytes
# instruction identification
poi = poi_identification[i]
byte = struct.pack('<H',poi)
fit_file.write(byte)
# 1 bytes
# instruction direction
byte = poi_type[i]
fit_file.write(byte)
# 4 bytes
# instruction distance
byte = int(float(poi_distance[i]))
byte=struct.pack('<I', byte)
fit_file.write(byte)
# 4 bytes
# header
byte = b'\x00\x00\x00\x00'
fit_file.write(byte)
# 32 bytes
# instruction description
byte = str.encode(poi_name[i],'utf-8')
if len(byte) < 32:
byte += b'\x00' *(32-len(byte))
else:
byte = byte[:32]
fit_file.write(byte)
def write_alphabet(fit_file):
for i in range(0, 1500):
# 1 byte
byte=b'\x02'
fit_file.write(byte)
# 2 bytes
alphabet_instruction=i
byte = struct.pack('<H',alphabet_instruction)
fit_file.write(byte)
def encode_fit (fit_path,decoded_data,extracted_attributes):
fit_file = open(fit_path, 'wb')
latitude_data = decoded_data[0]
longitude_data = decoded_data[1]
altitude_data = decoded_data[2]
instruction_data = decoded_data[3]
name_data = decoded_data[4]
lat_lon_bounding_box = extracted_attributes[0]
lat_ne_bounding_box = lat_lon_bounding_box[0]
lat_sw_bounding_box = lat_lon_bounding_box[1]
lon_ne_bounding_box = lat_lon_bounding_box[2]
lon_sw_bounding_box = lat_lon_bounding_box[3]
total_distance = int(extracted_attributes[1])
alt_bounding_box = extracted_attributes[2]
maximum_altitude = alt_bounding_box[0]
minimum_altitude = alt_bounding_box[1]
numbers_data = extracted_attributes[3]
number_point = numbers_data[0]
number_instructions = numbers_data[1]
number_pois = numbers_data[2]
instruction_distance = extracted_attributes[4]
points_of_interest = extracted_attributes[5]
# 4 bytes
# header
byte = b'\x0E\x10\x6C\x00'
fit_file.write(byte)
# 4 bytes
# file size
file_size = calculate_file_size(numbers_data)
byte = struct.pack('<i',file_size)
fit_file.write(byte)
# 4 bytes
# header
byte = b'\x2E\x46\x49\x54'
fit_file.write(byte)
# 2 bytes
# checksum
byte=b'\x00\x00'
fit_file.write(byte)
# 32 bytes
# header
byte=b'\x41\x00\x00\xFE\x00\x08\x01\x02\x84\x02\x04\x85\x03\x04\x85\x04\x04\x85\x05\x04\x85\x06\x04\x86\x07\x02\x84\x08\x02\x84\x01'
fit_file.write(byte)
# 2 bytes
# number of points
number_points = number_point
byte=struct.pack('<H',number_points)
fit_file.write(byte)
# 4 bytes
# lat ne bounding box
lat_ne_bounding_box = lat_ne_bounding_box
byte = struct.pack('<i',lat_ne_bounding_box)
fit_file.write(byte)
# 4 bytes
# lat sw bounding box
lat_sw_bounding_box = lat_sw_bounding_box
byte = struct.pack('<i',lat_sw_bounding_box)
fit_file.write(byte)
# 4 bytes
# lon ne bounding box
lon_ne_bounding_box = lon_ne_bounding_box
byte = struct.pack('<i',lon_ne_bounding_box)
fit_file.write(byte)
# 4 bytes
# lon sw bounding box
lon_sw_bounding_box = lon_sw_bounding_box
byte = struct.pack('<i',lon_sw_bounding_box)
fit_file.write(byte)
# 4 bytes
# total distance in meters
total_distance = total_distance
byte=struct.pack('<I', total_distance)
fit_file.write(byte)
# 2 bytes
# maximum altitude
maximum_altitude = maximum_altitude
byte=struct.pack('<H',maximum_altitude)
fit_file.write(byte)
# 2 bytes
# minimum altitude
minimum_altitude = minimum_altitude
byte=struct.pack('<H',minimum_altitude)
fit_file.write(byte)
# 9 bytes
# header
byte=b'\x42\x00\x00\xFB\x00\x01\x01\x02\x84'
fit_file.write(byte)
# 3 bytes
# alphabet
byte=b'\x02\x00\x00'
fit_file.write(byte)
# empirical tests show that the presence of the alphabet does not affect the unit handling of the file
#write_alphabet(fit_file)
# 10 bytes
# header
byte=b'\x43\x00\x00\xFD\x00\x01\x01\x02\x84\x03'
fit_file.write(byte)
# 2 bytes
number_instructions = number_instructions + number_pois
byte=struct.pack('<H', number_instructions)
fit_file.write(byte)
# 21 bytes
# header
byte=b'\x44\x00\x00\xFA\x00\x05\x01\x02\x84\x02\x01\x00\x03\x04\x86\x04\x04\x86\x05\x20\x07'
fit_file.write(byte)
write_instructions(fit_file,instruction_data,instruction_distance,name_data,points_of_interest,numbers_data)
# 10 bytes
# header
byte=b'\x45\x00\x00\xFC\x00\x01\x01\x02\x84\x05'
fit_file.write(byte)
# 2 bytes
# number of points
number_points = number_points
byte=struct.pack('<H', number_points)
fit_file.write(byte)
# 15 bytes
# header
byte=b'\x46\x00\x00\xF9\x00\x03\x01\x04\x85\x02\x04\x85\x03\x02\x84'
fit_file.write(byte)
write_points(fit_file,decoded_data)
byte=b'\x00\x00'
fit_file.write(byte) | 0.412648 | 0.501221 |
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from src.fake_news_detector.core.classification.models import ClassificationModel
def split_dataset(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Normalize
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test, y_train, y_test
def get_prediction(df, model_type,list_t, output):
y = df['fake']
X = df[list_t]
X_train, X_test, y_train, y_test = split_dataset(X, y)
model = ClassificationModel(model_type)
model.train(X_train, y_train)
model.test(X_test, y_test)
if output:
print('Accuracy of ' + model_type + ' classifier on training set: {:.2f}'
.format(model.score(X_train, y_train)))
print('Accuracy of ' + model_type + ' classifier on test set: {:.2f}'
.format(model.score(X_test, y_test)))
return model.predict_all(X)
def get_similarity_prediction(df, model_type, output):
X = df[['similarity_text_title','similarity_text_subtitle','similarity_title_subtitle']]
y = df['fake']
X_train, X_test, y_train, y_test = split_dataset(X, y)
model = ClassificationModel(model_type)
model.train(X_train, y_train)
model.test(X_test, y_test)
if output:
print('Accuracy of ' + model_type + ' classifier on training set: {:.2f}'
.format(model.score(X_train, y_train)))
print('Accuracy of ' + model_type + ' classifier on test set: {:.2f}'
.format(model.score(X_test, y_test)))
return model.predict_all(X)
def get_text_prediction(df, model_type, output):
X = df[['text_length','text_sentences','text_adj_words', 'text_verbs_words', 'text_modal_verbs', 'sentiment']]
y = df['fake']
X_train, X_test, y_train, y_test = split_dataset(X, y)
model = ClassificationModel(model_type)
model.train(X_train, y_train)
model.test(X_test, y_test)
if output:
print('Accuracy of ' + model_type + ' classifier on training set: {:.2f}'
.format(model.score(X_train, y_train)))
print('Accuracy of ' + model_type + ' classifier on test set: {:.2f}'
.format(model.score(X_test, y_test)))
return model.predict_all(X)
def get_main_prediction(df, model_type, output):
X = df[['title','similarity','text']]
y = df['fake']
X_train, X_test, y_train, y_test = split_dataset(X, y)
model = ClassificationModel(model_type)
model.train(X_train, y_train)
model.test(X_test, y_test)
if output:
print('Accuracy of ' + model_type + ' classifier on training set: {:.2f}'
.format(model.score(X_train, y_train)))
print('Accuracy of ' + model_type + ' classifier on test set: {:.2f}'
.format(model.score(X_test, y_test)))
return model.predict_all(X) | src/fake_news_detector/core/classification/sub_classifications.py | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from src.fake_news_detector.core.classification.models import ClassificationModel
def split_dataset(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Normalize
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test, y_train, y_test
def get_prediction(df, model_type,list_t, output):
y = df['fake']
X = df[list_t]
X_train, X_test, y_train, y_test = split_dataset(X, y)
model = ClassificationModel(model_type)
model.train(X_train, y_train)
model.test(X_test, y_test)
if output:
print('Accuracy of ' + model_type + ' classifier on training set: {:.2f}'
.format(model.score(X_train, y_train)))
print('Accuracy of ' + model_type + ' classifier on test set: {:.2f}'
.format(model.score(X_test, y_test)))
return model.predict_all(X)
def get_similarity_prediction(df, model_type, output):
X = df[['similarity_text_title','similarity_text_subtitle','similarity_title_subtitle']]
y = df['fake']
X_train, X_test, y_train, y_test = split_dataset(X, y)
model = ClassificationModel(model_type)
model.train(X_train, y_train)
model.test(X_test, y_test)
if output:
print('Accuracy of ' + model_type + ' classifier on training set: {:.2f}'
.format(model.score(X_train, y_train)))
print('Accuracy of ' + model_type + ' classifier on test set: {:.2f}'
.format(model.score(X_test, y_test)))
return model.predict_all(X)
def get_text_prediction(df, model_type, output):
X = df[['text_length','text_sentences','text_adj_words', 'text_verbs_words', 'text_modal_verbs', 'sentiment']]
y = df['fake']
X_train, X_test, y_train, y_test = split_dataset(X, y)
model = ClassificationModel(model_type)
model.train(X_train, y_train)
model.test(X_test, y_test)
if output:
print('Accuracy of ' + model_type + ' classifier on training set: {:.2f}'
.format(model.score(X_train, y_train)))
print('Accuracy of ' + model_type + ' classifier on test set: {:.2f}'
.format(model.score(X_test, y_test)))
return model.predict_all(X)
def get_main_prediction(df, model_type, output):
X = df[['title','similarity','text']]
y = df['fake']
X_train, X_test, y_train, y_test = split_dataset(X, y)
model = ClassificationModel(model_type)
model.train(X_train, y_train)
model.test(X_test, y_test)
if output:
print('Accuracy of ' + model_type + ' classifier on training set: {:.2f}'
.format(model.score(X_train, y_train)))
print('Accuracy of ' + model_type + ' classifier on test set: {:.2f}'
.format(model.score(X_test, y_test)))
return model.predict_all(X) | 0.812272 | 0.606644 |
from __future__ import absolute_import
import logging
import sys
import math
import os.path as op
from pbcommand.pb_io import write_pipeline_chunks
from pbcommand.models import FileTypes, PipelineChunk
from pbcoretools.datastore_utils import dataset_to_datastore, datastore_to_datastorefile_objs
from .align_json_to_svsig import Constants as BC
from .scatter_call import put_items_to_boxes
from ..basic import scatter_main, TCP_INPUT, TCP_OUTPUT, BaseScatterConstants
log = logging.getLogger(__name__)
class Constants(BaseScatterConstants):
"""Constants must be identical to align_json_to_svsig
Input: idx 0 - DataStore json of AlignmentSet (or ConsensusAlignmentSet)
idx 1 - TRF BED
Output: idx 0 - FOFN of svsig.gz
"""
TOOL_NAME = 'scatter_{}'.format(BC.TOOL_NAME)
DESCRIPTION = "Scatter inputs for pbsvtools.tasks.{}".format(TOOL_NAME)
CHUNK_KEYS = ('$chunk.datastore_id', '$chunk.bed_id')
INPUT_FILES = BC.INPUT_FILES
OUTPUT_FILES = [
TCP_OUTPUT(FileTypes.CHUNK, "cjson_out", "Chunk sv datastore JSON",
"Chunk sv datastore JSON", "align.datastore.chunked")
]
ALLOWED_TYPES = BC.ALLOWED_TYPES
def datastore_to_bam_files(i_datastore_fn):
"""
Return all external rescources bam files from input datastore json file.
"""
files, _, readcls, _ = datastore_to_datastorefile_objs(
i_datastore_fn, allowed_types=Constants.ALLOWED_TYPES)
dataset_obj = readcls(*[f.path for f in files])
return dataset_obj.toExternalFiles()
def run_main(i_datastore_fn, i_trf_fn, o_json_fn, max_nchunks):
"""
Parameters:
i_datastore_fn --- DataStore json of AlignmentSet or ConsensusAlignmentSet to chunk.
i_trf_fn --- Tandem Repeats in BED
o_json_fn -- Output json file
"""
output_dir = op.dirname(o_json_fn)
basename = 'chunk'
# Chunk input datastore json, generate multiple chunked datastore.json, and
# generate pbcommand.models.PipelineChunk objects
_, _, readcls, ext = datastore_to_datastorefile_objs(
i_datastore_fn, allowed_types=Constants.ALLOWED_TYPES)
bam_fns = datastore_to_bam_files(i_datastore_fn)
# Put bam files into boxes
n_chunks = max(1, min(max_nchunks, len(bam_fns)))
cutoff = math.ceil(len(bam_fns)*1.0/n_chunks)
boxes = put_items_to_boxes(bam_fns, [1 for _ in range(len(bam_fns))], n_chunks, cutoff)
chunks = []
for i, bam_fns_in_box in enumerate(boxes):
out_xml = op.join(output_dir, '{}.{}.{}'.format(basename, i, ext))
out_json = op.join(output_dir, '{}.{}.{}'.format(basename, i, 'datastore.json'))
readcls(*bam_fns_in_box).write(out_xml)
dataset_to_datastore(out_xml, out_json, Constants.TOOL_NAME)
# Create a chunk: get $chunk.datastore_id from chunk,
# use TandemRepeat masker bed as $chunk.bed_id
d = {Constants.CHUNK_KEYS[0]: out_json,
Constants.CHUNK_KEYS[1]: i_trf_fn}
chunk_id = Constants.TOOL_NAME+'_chunk_{}'.format(i) # chunks MUST have unique IDs
chunk = PipelineChunk(chunk_id, **d)
chunks.append(chunk)
log.info("Writing chunk.json to %s", o_json_fn)
write_pipeline_chunks(chunks, o_json_fn,
"created by %s" % Constants.TOOL_ID())
return 0
def rtc_runner(rtc):
"""Resolved tool contract runner."""
max_nchunks = rtc.task.max_nchunks if hasattr(
rtc.task, 'max_nchunks') else Constants.DEFAULT_NCHUNKS
return run_main(i_datastore_fn=rtc.task.input_files[0],
i_trf_fn=rtc.task.input_files[1],
o_json_fn=rtc.task.output_files[0],
max_nchunks=int(max_nchunks))
if __name__ == '__main__':
sys.exit(scatter_main(
args=sys.argv[1:], const=Constants, rtc_runner=rtc_runner, alog=log)) | SLpackage/private/pacbio/pythonpkgs/pbsvtools/lib/python2.7/site-packages/pbsvtools/tasks/scatter_align_json_to_svsig.py | from __future__ import absolute_import
import logging
import sys
import math
import os.path as op
from pbcommand.pb_io import write_pipeline_chunks
from pbcommand.models import FileTypes, PipelineChunk
from pbcoretools.datastore_utils import dataset_to_datastore, datastore_to_datastorefile_objs
from .align_json_to_svsig import Constants as BC
from .scatter_call import put_items_to_boxes
from ..basic import scatter_main, TCP_INPUT, TCP_OUTPUT, BaseScatterConstants
log = logging.getLogger(__name__)
class Constants(BaseScatterConstants):
"""Constants must be identical to align_json_to_svsig
Input: idx 0 - DataStore json of AlignmentSet (or ConsensusAlignmentSet)
idx 1 - TRF BED
Output: idx 0 - FOFN of svsig.gz
"""
TOOL_NAME = 'scatter_{}'.format(BC.TOOL_NAME)
DESCRIPTION = "Scatter inputs for pbsvtools.tasks.{}".format(TOOL_NAME)
CHUNK_KEYS = ('$chunk.datastore_id', '$chunk.bed_id')
INPUT_FILES = BC.INPUT_FILES
OUTPUT_FILES = [
TCP_OUTPUT(FileTypes.CHUNK, "cjson_out", "Chunk sv datastore JSON",
"Chunk sv datastore JSON", "align.datastore.chunked")
]
ALLOWED_TYPES = BC.ALLOWED_TYPES
def datastore_to_bam_files(i_datastore_fn):
"""
Return all external rescources bam files from input datastore json file.
"""
files, _, readcls, _ = datastore_to_datastorefile_objs(
i_datastore_fn, allowed_types=Constants.ALLOWED_TYPES)
dataset_obj = readcls(*[f.path for f in files])
return dataset_obj.toExternalFiles()
def run_main(i_datastore_fn, i_trf_fn, o_json_fn, max_nchunks):
"""
Parameters:
i_datastore_fn --- DataStore json of AlignmentSet or ConsensusAlignmentSet to chunk.
i_trf_fn --- Tandem Repeats in BED
o_json_fn -- Output json file
"""
output_dir = op.dirname(o_json_fn)
basename = 'chunk'
# Chunk input datastore json, generate multiple chunked datastore.json, and
# generate pbcommand.models.PipelineChunk objects
_, _, readcls, ext = datastore_to_datastorefile_objs(
i_datastore_fn, allowed_types=Constants.ALLOWED_TYPES)
bam_fns = datastore_to_bam_files(i_datastore_fn)
# Put bam files into boxes
n_chunks = max(1, min(max_nchunks, len(bam_fns)))
cutoff = math.ceil(len(bam_fns)*1.0/n_chunks)
boxes = put_items_to_boxes(bam_fns, [1 for _ in range(len(bam_fns))], n_chunks, cutoff)
chunks = []
for i, bam_fns_in_box in enumerate(boxes):
out_xml = op.join(output_dir, '{}.{}.{}'.format(basename, i, ext))
out_json = op.join(output_dir, '{}.{}.{}'.format(basename, i, 'datastore.json'))
readcls(*bam_fns_in_box).write(out_xml)
dataset_to_datastore(out_xml, out_json, Constants.TOOL_NAME)
# Create a chunk: get $chunk.datastore_id from chunk,
# use TandemRepeat masker bed as $chunk.bed_id
d = {Constants.CHUNK_KEYS[0]: out_json,
Constants.CHUNK_KEYS[1]: i_trf_fn}
chunk_id = Constants.TOOL_NAME+'_chunk_{}'.format(i) # chunks MUST have unique IDs
chunk = PipelineChunk(chunk_id, **d)
chunks.append(chunk)
log.info("Writing chunk.json to %s", o_json_fn)
write_pipeline_chunks(chunks, o_json_fn,
"created by %s" % Constants.TOOL_ID())
return 0
def rtc_runner(rtc):
"""Resolved tool contract runner."""
max_nchunks = rtc.task.max_nchunks if hasattr(
rtc.task, 'max_nchunks') else Constants.DEFAULT_NCHUNKS
return run_main(i_datastore_fn=rtc.task.input_files[0],
i_trf_fn=rtc.task.input_files[1],
o_json_fn=rtc.task.output_files[0],
max_nchunks=int(max_nchunks))
if __name__ == '__main__':
sys.exit(scatter_main(
args=sys.argv[1:], const=Constants, rtc_runner=rtc_runner, alog=log)) | 0.490724 | 0.279964 |
import binascii
import rsa
import base64
import requests
import re
import json
def prelogin():
url="https://login.sina.com.cn/sso/prelogin.php?entry=account&callback=sinaSSOController.preloginCallBack&su=MTU2MjAxNTE0NzU%3D&rsakt=mod&client=ssologin.js(v1.4.15)&_=1476186181803"
html=requests.get(url).text
jsonStr = re.findall(r'\((\{.*?\})\)', html)[0]
data = json.loads(jsonStr)
servertime = data["servertime"]
nonce = data["nonce"]
pubkey = data["pubkey"]
rsakv = data["rsakv"]
return servertime, nonce, pubkey, rsakv
def getSu(username):
su = base64.b64encode(username.encode('utf-8')).decode('utf-8')
return su
def getSp(password, servertime, nonce, pubkey):
pubkey = int(pubkey, 16)
key = rsa.PublicKey(pubkey, 65537)
message = str(servertime) + '\t' + str(nonce) + '\n' + str(password)
message = message.encode('utf-8')
sp = rsa.encrypt(message, key)
sp = binascii.b2a_hex(sp)
return sp
def main():
servertime, nonce, pubkey, rsakv = prelogin()
su = getSu("15802252189")
sp = getSp("kobe81", servertime, nonce, pubkey)
postData = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'userticket': '1',
"pagerefer": "http://open.weibo.com/wiki/2/statuses/home_timeline",
"vsnf": "1",
"su": su,
"service": "miniblog",
"servertime": servertime,
"nonce": nonce,
"pwencode": "rsa2",
"rsakv": rsakv,
"sp": sp,
"sr": "1440*900",
"encoding": "UTF-8",
"prelt": "126",
"url": "http://open.weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack",
"returntype": "META",
}
loginURL = r'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)'
session = requests.Session()
res = session.post(loginURL, data=postData)
lst= res.cookies.items()
cookie=''
for each in lst:
cookie+= each[0]+'='+each[1]+';'
with open('cookies','w') as f:
f.write(cookie)
print 'cookies have refreshed' | weibo_mine_hot/Refresh_cookie.py | import binascii
import rsa
import base64
import requests
import re
import json
def prelogin():
url="https://login.sina.com.cn/sso/prelogin.php?entry=account&callback=sinaSSOController.preloginCallBack&su=MTU2MjAxNTE0NzU%3D&rsakt=mod&client=ssologin.js(v1.4.15)&_=1476186181803"
html=requests.get(url).text
jsonStr = re.findall(r'\((\{.*?\})\)', html)[0]
data = json.loads(jsonStr)
servertime = data["servertime"]
nonce = data["nonce"]
pubkey = data["pubkey"]
rsakv = data["rsakv"]
return servertime, nonce, pubkey, rsakv
def getSu(username):
su = base64.b64encode(username.encode('utf-8')).decode('utf-8')
return su
def getSp(password, servertime, nonce, pubkey):
pubkey = int(pubkey, 16)
key = rsa.PublicKey(pubkey, 65537)
message = str(servertime) + '\t' + str(nonce) + '\n' + str(password)
message = message.encode('utf-8')
sp = rsa.encrypt(message, key)
sp = binascii.b2a_hex(sp)
return sp
def main():
servertime, nonce, pubkey, rsakv = prelogin()
su = getSu("15802252189")
sp = getSp("kobe81", servertime, nonce, pubkey)
postData = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'userticket': '1',
"pagerefer": "http://open.weibo.com/wiki/2/statuses/home_timeline",
"vsnf": "1",
"su": su,
"service": "miniblog",
"servertime": servertime,
"nonce": nonce,
"pwencode": "rsa2",
"rsakv": rsakv,
"sp": sp,
"sr": "1440*900",
"encoding": "UTF-8",
"prelt": "126",
"url": "http://open.weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack",
"returntype": "META",
}
loginURL = r'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)'
session = requests.Session()
res = session.post(loginURL, data=postData)
lst= res.cookies.items()
cookie=''
for each in lst:
cookie+= each[0]+'='+each[1]+';'
with open('cookies','w') as f:
f.write(cookie)
print 'cookies have refreshed' | 0.199932 | 0.124719 |
from kubernetes.client import CoreV1Api
from app.models.user import User
from app.models.route import RouteItem
dashboard_route = RouteItem(**{
'path': '/dashboard',
'name': 'Dashboard',
'component': 'LAYOUT',
'redirect': '/dashboard/overview',
'meta': {
"title": 'routes.adminDashboard.dashboard',
"icon": 'bx:bx-home',
},
"children": [
{
'path': 'overview',
'name': 'nodeOverview',
'component': '/admin/dashboard/nodeOverview',
'meta': {
'title': 'routes.adminDashboard.nodeOverview'
}
},
{
'path': 'cluster-alerts',
'name': 'clusterAlerts',
'component': '/admin/dashboard/clusterAlerts',
'meta': {
'title': 'routes.adminDashboard.clusterAlerts'
}
},
{
'path': 'ceph-overview',
'name': 'cephOverview',
'component': '/admin/dashboard/cephOverview',
'meta': {
'title': 'routes.adminDashboard.cephOverview'
}
}
]
})
terminal_route = RouteItem(**{
'path': '/terminal',
'name': 'Terminal',
'component': 'LAYOUT',
'redirect': '/terminal/ceph',
'meta': {
"title": 'routes.adminTerminal.terminal',
"icon": 'bx:bx-terminal',
},
"children": [
{
'path': 'system',
'name': 'systemTerminal',
'component': '/admin/terminal/webTerminal',
'props': {
'podNamespace': 'kubezephyr',
'podName': 'kubectl',
},
'meta': {
'title': 'routes.adminTerminal.systemTerminal'
}
},
{
'path': 'ceph',
'name': 'cephTerminal',
'component': '/admin/terminal/webTerminal',
'props': {
'podNamespace': 'rook-ceph',
'podName': 'rook-ceph-tools',
},
'meta': {
'title': 'routes.adminTerminal.cephTerminal'
}
},
]
})
containers_route = RouteItem(**{
'path': '/containers',
'name': 'Containers',
'component': '/admin/containers/index',
'meta': {
"title": 'routes.adminContainers.manage',
"icon": 'simple-icons:kubernetes',
},
})
users_route = RouteItem(**{
'path': '/users',
'name': 'Users',
'component': '/admin/users/index',
'meta': {
"title": 'routes.adminUsers.manage',
"icon": 'bx:bx-user',
},
})
alert_channel_route = RouteItem(**{
'path': '/alert-channel',
'name': 'AlertChannel',
'component': '/admin/alertChannel/index',
'meta': {
"title": 'routes.adminAlertChannel.manage',
"icon": 'carbon:notification',
},
})
account_route = RouteItem(**{
'path': '/account',
'name': 'Account',
'component': 'LAYOUT',
'redirect': '/account/change-password',
'meta': {
"title": 'routes.account.account',
"icon": 'mdi:card-account-details-outline',
},
"children": [
{
'path': 'change-password',
'name': 'ChangePassword',
'component': '/account/changePassword/index',
'meta': {
"title": 'routes.account.password',
"icon": 'carbon:password',
},
},
]
})
def generate_admin_route(user: User, core_v1_api: CoreV1Api):
return([dashboard_route, terminal_route, containers_route, users_route, alert_channel_route, account_route]) | app/core/route_admin.py | from kubernetes.client import CoreV1Api
from app.models.user import User
from app.models.route import RouteItem
dashboard_route = RouteItem(**{
'path': '/dashboard',
'name': 'Dashboard',
'component': 'LAYOUT',
'redirect': '/dashboard/overview',
'meta': {
"title": 'routes.adminDashboard.dashboard',
"icon": 'bx:bx-home',
},
"children": [
{
'path': 'overview',
'name': 'nodeOverview',
'component': '/admin/dashboard/nodeOverview',
'meta': {
'title': 'routes.adminDashboard.nodeOverview'
}
},
{
'path': 'cluster-alerts',
'name': 'clusterAlerts',
'component': '/admin/dashboard/clusterAlerts',
'meta': {
'title': 'routes.adminDashboard.clusterAlerts'
}
},
{
'path': 'ceph-overview',
'name': 'cephOverview',
'component': '/admin/dashboard/cephOverview',
'meta': {
'title': 'routes.adminDashboard.cephOverview'
}
}
]
})
terminal_route = RouteItem(**{
'path': '/terminal',
'name': 'Terminal',
'component': 'LAYOUT',
'redirect': '/terminal/ceph',
'meta': {
"title": 'routes.adminTerminal.terminal',
"icon": 'bx:bx-terminal',
},
"children": [
{
'path': 'system',
'name': 'systemTerminal',
'component': '/admin/terminal/webTerminal',
'props': {
'podNamespace': 'kubezephyr',
'podName': 'kubectl',
},
'meta': {
'title': 'routes.adminTerminal.systemTerminal'
}
},
{
'path': 'ceph',
'name': 'cephTerminal',
'component': '/admin/terminal/webTerminal',
'props': {
'podNamespace': 'rook-ceph',
'podName': 'rook-ceph-tools',
},
'meta': {
'title': 'routes.adminTerminal.cephTerminal'
}
},
]
})
containers_route = RouteItem(**{
'path': '/containers',
'name': 'Containers',
'component': '/admin/containers/index',
'meta': {
"title": 'routes.adminContainers.manage',
"icon": 'simple-icons:kubernetes',
},
})
users_route = RouteItem(**{
'path': '/users',
'name': 'Users',
'component': '/admin/users/index',
'meta': {
"title": 'routes.adminUsers.manage',
"icon": 'bx:bx-user',
},
})
alert_channel_route = RouteItem(**{
'path': '/alert-channel',
'name': 'AlertChannel',
'component': '/admin/alertChannel/index',
'meta': {
"title": 'routes.adminAlertChannel.manage',
"icon": 'carbon:notification',
},
})
account_route = RouteItem(**{
'path': '/account',
'name': 'Account',
'component': 'LAYOUT',
'redirect': '/account/change-password',
'meta': {
"title": 'routes.account.account',
"icon": 'mdi:card-account-details-outline',
},
"children": [
{
'path': 'change-password',
'name': 'ChangePassword',
'component': '/account/changePassword/index',
'meta': {
"title": 'routes.account.password',
"icon": 'carbon:password',
},
},
]
})
def generate_admin_route(user: User, core_v1_api: CoreV1Api):
return([dashboard_route, terminal_route, containers_route, users_route, alert_channel_route, account_route]) | 0.412648 | 0.130009 |
import unittest
import libyang
import asyncio
import logging
import time
import itertools
from multiprocessing import Process, Queue
from goldstone.lib.connector.sysrepo import Connector
from goldstone.lib.server_connector import create_server_connector
from goldstone.lib.errors import *
from goldstone.lib.util import call
from goldstone.lib.core import *
from goldstone.xlate.openconfig.interfaces import InterfaceServer
class MockGSInterfaceServer(ServerBase):
def __init__(self, conn):
super().__init__(conn, "goldstone-interfaces")
self.handlers = {
"interfaces": {
"interface": {
"name": NoOp,
"config": {
"admin-status": NoOp,
"name": NoOp,
"description": NoOp,
"loopback-mode": NoOp,
"prbs-mode": NoOp,
},
"ethernet": NoOp,
"switched-vlan": NoOp,
"component-connection": NoOp,
}
}
}
def oper_cb(self, xpath, priv):
interfaces = [
{
"name": "Ethernet1_1",
"state": {"admin-status": "UP", "oper-status": "UP"},
},
{
"name": "Ethernet2_1",
"state": {"admin-status": "UP", "oper-status": "UP"},
},
]
return {"interfaces": {"interface": interfaces}}
def run_mock_gs_server(q):
conn = Connector()
server = MockGSInterfaceServer(conn)
async def _main():
tasks = await server.start()
async def evloop():
while True:
await asyncio.sleep(1)
try:
q.get(False)
except:
pass
else:
return
tasks.append(evloop())
tasks = [
t if isinstance(t, asyncio.Task) else asyncio.create_task(t) for t in tasks
]
done, _ = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
for task in done:
e = task.exception()
if e:
raise e
asyncio.run(_main())
conn.stop()
class TestInterfaceServer(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
logging.basicConfig(level=logging.DEBUG)
self.conn = Connector()
self.conn.delete_all("goldstone-interfaces")
self.conn.delete_all("openconfig-interfaces")
self.conn.apply()
self.q = Queue()
self.process = Process(target=run_mock_gs_server, args=(self.q,))
self.process.start()
self.server = InterfaceServer(self.conn, reconciliation_interval=1)
self.tasks = list(asyncio.create_task(c) for c in await self.server.start())
async def test_get_ifname(self):
def test():
time.sleep(2) # wait for the mock server
conn = Connector()
data = conn.get_operational(
"/openconfig-interfaces:interfaces/interface/name"
)
self.assertEqual(data, ["Ethernet1_1", "Ethernet2_1"])
await asyncio.create_task(asyncio.to_thread(test))
async def test_set_admin_status(self):
def test():
time.sleep(2) # wait for the mock server
conn = Connector()
name = "Ethernet1_1"
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/name",
name,
)
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/type",
"iana-if-type:ethernetCsmacd",
)
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/enabled",
"true",
)
conn.apply()
xpath = f"/goldstone-interfaces:interfaces/interface[name='{name}']/config/admin-status"
data = conn.get_operational(xpath, one=True)
self.assertEqual(data, "UP")
name = "Ethernet1_1"
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/enabled",
"false",
)
conn.apply()
xpath = f"/goldstone-interfaces:interfaces/interface[name='{name}']/config/admin-status"
data = conn.get_operational(xpath, one=True)
data = libyang.xpath_get(data, xpath)
self.assertEqual(data, "DOWN")
name = "Ethernet1_1"
conn.delete(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/enabled",
)
conn.apply()
xpath = f"/goldstone-interfaces:interfaces/interface[name='{name}']/config/admin-status"
data = conn.get_operational(xpath, one=True)
self.assertEqual(data, "UP") # the default value of 'enabled' is "true"
await asyncio.create_task(asyncio.to_thread(test))
async def test_reconcile(self):
def test():
time.sleep(2) # wait for the mock server
conn = Connector()
name = "Ethernet1_1"
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/name",
name,
)
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/type",
"iana-if-type:ethernetCsmacd",
)
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/enabled",
"true",
)
conn.apply()
xpath = f"/goldstone-interfaces:interfaces/interface[name='{name}']/config/admin-status"
data = conn.get_operational(xpath, one=True)
self.assertEqual(data, "UP")
conn.set(xpath, "DOWN") # make the configuration inconsistent
conn.apply()
time.sleep(2)
data = conn.get_operational(xpath, one=True)
self.assertEqual(
data, "UP"
) # the primitive model configuration must become consistent again
await asyncio.create_task(asyncio.to_thread(test))
async def asyncTearDown(self):
await call(self.server.stop)
[t.cancel() for t in self.tasks]
self.conn.stop()
self.q.put(True)
self.process.join()
if __name__ == "__main__":
unittest.main() | src/xlate/openconfig/tests/test.py | import unittest
import libyang
import asyncio
import logging
import time
import itertools
from multiprocessing import Process, Queue
from goldstone.lib.connector.sysrepo import Connector
from goldstone.lib.server_connector import create_server_connector
from goldstone.lib.errors import *
from goldstone.lib.util import call
from goldstone.lib.core import *
from goldstone.xlate.openconfig.interfaces import InterfaceServer
class MockGSInterfaceServer(ServerBase):
def __init__(self, conn):
super().__init__(conn, "goldstone-interfaces")
self.handlers = {
"interfaces": {
"interface": {
"name": NoOp,
"config": {
"admin-status": NoOp,
"name": NoOp,
"description": NoOp,
"loopback-mode": NoOp,
"prbs-mode": NoOp,
},
"ethernet": NoOp,
"switched-vlan": NoOp,
"component-connection": NoOp,
}
}
}
def oper_cb(self, xpath, priv):
interfaces = [
{
"name": "Ethernet1_1",
"state": {"admin-status": "UP", "oper-status": "UP"},
},
{
"name": "Ethernet2_1",
"state": {"admin-status": "UP", "oper-status": "UP"},
},
]
return {"interfaces": {"interface": interfaces}}
def run_mock_gs_server(q):
conn = Connector()
server = MockGSInterfaceServer(conn)
async def _main():
tasks = await server.start()
async def evloop():
while True:
await asyncio.sleep(1)
try:
q.get(False)
except:
pass
else:
return
tasks.append(evloop())
tasks = [
t if isinstance(t, asyncio.Task) else asyncio.create_task(t) for t in tasks
]
done, _ = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
for task in done:
e = task.exception()
if e:
raise e
asyncio.run(_main())
conn.stop()
class TestInterfaceServer(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
logging.basicConfig(level=logging.DEBUG)
self.conn = Connector()
self.conn.delete_all("goldstone-interfaces")
self.conn.delete_all("openconfig-interfaces")
self.conn.apply()
self.q = Queue()
self.process = Process(target=run_mock_gs_server, args=(self.q,))
self.process.start()
self.server = InterfaceServer(self.conn, reconciliation_interval=1)
self.tasks = list(asyncio.create_task(c) for c in await self.server.start())
async def test_get_ifname(self):
def test():
time.sleep(2) # wait for the mock server
conn = Connector()
data = conn.get_operational(
"/openconfig-interfaces:interfaces/interface/name"
)
self.assertEqual(data, ["Ethernet1_1", "Ethernet2_1"])
await asyncio.create_task(asyncio.to_thread(test))
async def test_set_admin_status(self):
def test():
time.sleep(2) # wait for the mock server
conn = Connector()
name = "Ethernet1_1"
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/name",
name,
)
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/type",
"iana-if-type:ethernetCsmacd",
)
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/enabled",
"true",
)
conn.apply()
xpath = f"/goldstone-interfaces:interfaces/interface[name='{name}']/config/admin-status"
data = conn.get_operational(xpath, one=True)
self.assertEqual(data, "UP")
name = "Ethernet1_1"
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/enabled",
"false",
)
conn.apply()
xpath = f"/goldstone-interfaces:interfaces/interface[name='{name}']/config/admin-status"
data = conn.get_operational(xpath, one=True)
data = libyang.xpath_get(data, xpath)
self.assertEqual(data, "DOWN")
name = "Ethernet1_1"
conn.delete(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/enabled",
)
conn.apply()
xpath = f"/goldstone-interfaces:interfaces/interface[name='{name}']/config/admin-status"
data = conn.get_operational(xpath, one=True)
self.assertEqual(data, "UP") # the default value of 'enabled' is "true"
await asyncio.create_task(asyncio.to_thread(test))
async def test_reconcile(self):
def test():
time.sleep(2) # wait for the mock server
conn = Connector()
name = "Ethernet1_1"
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/name",
name,
)
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/type",
"iana-if-type:ethernetCsmacd",
)
conn.set(
f"/openconfig-interfaces:interfaces/interface[name='{name}']/config/enabled",
"true",
)
conn.apply()
xpath = f"/goldstone-interfaces:interfaces/interface[name='{name}']/config/admin-status"
data = conn.get_operational(xpath, one=True)
self.assertEqual(data, "UP")
conn.set(xpath, "DOWN") # make the configuration inconsistent
conn.apply()
time.sleep(2)
data = conn.get_operational(xpath, one=True)
self.assertEqual(
data, "UP"
) # the primitive model configuration must become consistent again
await asyncio.create_task(asyncio.to_thread(test))
async def asyncTearDown(self):
await call(self.server.stop)
[t.cancel() for t in self.tasks]
self.conn.stop()
self.q.put(True)
self.process.join()
if __name__ == "__main__":
unittest.main() | 0.428712 | 0.135919 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .aspp import ASPP_Module
up_kwargs = {'mode': 'bilinear', 'align_corners': False}
norm_layer = nn.BatchNorm2d
class _ConvBNReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, relu6=False, norm_layer=norm_layer):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = norm_layer(out_channels)
self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class ASPPPlusHead(nn.Module):
def __init__(self, num_classes, in_channels, norm_layer=norm_layer, up_kwargs=up_kwargs, in_index=[0, 3]):
super(ASPPPlusHead, self).__init__()
self._up_kwargs = up_kwargs
self.in_index = in_index
self.aspp = ASPP_Module(in_channels, [12, 24, 36], norm_layer=norm_layer, up_kwargs=up_kwargs)
self.c1_block = _ConvBNReLU(in_channels // 8, in_channels // 8, 3, padding=1, norm_layer=norm_layer)
self.block = nn.Sequential(
_ConvBNReLU(in_channels // 4, in_channels // 4, 3, padding=1, norm_layer=norm_layer),
nn.Dropout(0.5),
_ConvBNReLU(in_channels // 4, in_channels // 4, 3, padding=1, norm_layer=norm_layer),
nn.Dropout(0.1),
nn.Conv2d(in_channels // 4, num_classes, 1))
def _transform_inputs(self, inputs):
if isinstance(self.in_index, (list, tuple)):
inputs = [inputs[i] for i in self.in_index]
elif isinstance(self.in_index, int):
inputs = inputs[self.in_index]
return inputs
def forward(self, inputs):
inputs = self._transform_inputs(inputs)
c1, x = inputs
size = c1.size()[2:]
c1 = self.c1_block(c1)
x = self.aspp(x)
x = F.interpolate(x, size, **self._up_kwargs)
return self.block(torch.cat([x, c1], dim=1)) | models/head/aspp_plus.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .aspp import ASPP_Module
up_kwargs = {'mode': 'bilinear', 'align_corners': False}
norm_layer = nn.BatchNorm2d
class _ConvBNReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, relu6=False, norm_layer=norm_layer):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = norm_layer(out_channels)
self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class ASPPPlusHead(nn.Module):
def __init__(self, num_classes, in_channels, norm_layer=norm_layer, up_kwargs=up_kwargs, in_index=[0, 3]):
super(ASPPPlusHead, self).__init__()
self._up_kwargs = up_kwargs
self.in_index = in_index
self.aspp = ASPP_Module(in_channels, [12, 24, 36], norm_layer=norm_layer, up_kwargs=up_kwargs)
self.c1_block = _ConvBNReLU(in_channels // 8, in_channels // 8, 3, padding=1, norm_layer=norm_layer)
self.block = nn.Sequential(
_ConvBNReLU(in_channels // 4, in_channels // 4, 3, padding=1, norm_layer=norm_layer),
nn.Dropout(0.5),
_ConvBNReLU(in_channels // 4, in_channels // 4, 3, padding=1, norm_layer=norm_layer),
nn.Dropout(0.1),
nn.Conv2d(in_channels // 4, num_classes, 1))
def _transform_inputs(self, inputs):
if isinstance(self.in_index, (list, tuple)):
inputs = [inputs[i] for i in self.in_index]
elif isinstance(self.in_index, int):
inputs = inputs[self.in_index]
return inputs
def forward(self, inputs):
inputs = self._transform_inputs(inputs)
c1, x = inputs
size = c1.size()[2:]
c1 = self.c1_block(c1)
x = self.aspp(x)
x = F.interpolate(x, size, **self._up_kwargs)
return self.block(torch.cat([x, c1], dim=1)) | 0.928878 | 0.360433 |
import time
import unittest
from inoft_vocal_framework.audio_editing.audioclip import AudioBlock
class MyTestCase(unittest.TestCase):
def test_something(self):
start = time.time()
audio_block_1 = AudioBlock()
audio_block_1.create_track()
"""river_track = Track(is_primary=False, loop_until_primary_tracks_finish=True)
river_background = Sound(local_filepath="F:/Sons utiles/2009/LucasFilm Sound Effects Library/LucasFilm Sound Effects Library CD2 - Elements/track 43.mp3")
river_background.change_volume(-6.0)
river_track.append_sound(river_background)
forest_track = Track(is_primary=False, loop_until_primary_tracks_finish=True)
forest_background = Sound(local_filepath="F:/Sons utiles/2009/LucasFilm Sound Effects Library/LucasFilm Sound Effects Library CD1 - Animal Sounds/track 95.mp3")
forest_background.change_volume(-6.0)
forest_track.append_sound(forest_background)"""
"""from inoft_vocal_engine.speech_synthesis.polly.client import PollyClient
from inoft_vocal_engine.speech_synthesis.polly import VOICES"""
# track_voice = audio_block_1.create_track(primary=True, loop=False)
"""voice_sound = track_voice.create_sound(local_filepath=PollyClient().synthesize(
text="Je suis un test d'audio dynamique ?",
voice_id=VOICES.French_France_Female_CELINE.id,
filepath_to_save_to="F:/Sons utiles/test_synthesised_dialogue.mp3"
), custom_key="voice", player_start_time=track_voice.start_time)"""
"""rifle_shots = track_voice.create_sound(
local_filepath="F:/Sons utiles/Sound Effects/Guns/Automatic/238916__qubodup__rifle-shooting.flac",
player_start_time=voice_sound.player_end_time + 20, player_end_time=voice_sound.player_end_time + 40
)"""
background_music_track = audio_block_1.create_track(primary=True)
"""background_music = background_music_track.create_sound(
local_filepath="F:/Sons utiles/Musics/Vintage (1940s) French Music/Pour Vous J'Avais Fait Cette Chanson - Jean Sablon.wav",
player_start_time=background_music_track.start_time
)"""
background_music_track.create_sound(
engine_file_key="ambiance",
player_start_time=background_music_track.player_start_time,
player_end_time=background_music_track.player_start_time + 20
)
background_music = background_music_track.create_sound(
engine_file_key="output_final",
player_start_time=background_music_track.player_start_time + 10
)
background_music.volume = 45
file_url = audio_block_1.render_2(out_filepath="F:/Sons utiles/tests/test_python_1.mp3", format_type="mp3")
print(file_url)
if __name__ == '__main__':
unittest.main() | tests/audio_engine/test_audio_editing.py | import time
import unittest
from inoft_vocal_framework.audio_editing.audioclip import AudioBlock
class MyTestCase(unittest.TestCase):
def test_something(self):
start = time.time()
audio_block_1 = AudioBlock()
audio_block_1.create_track()
"""river_track = Track(is_primary=False, loop_until_primary_tracks_finish=True)
river_background = Sound(local_filepath="F:/Sons utiles/2009/LucasFilm Sound Effects Library/LucasFilm Sound Effects Library CD2 - Elements/track 43.mp3")
river_background.change_volume(-6.0)
river_track.append_sound(river_background)
forest_track = Track(is_primary=False, loop_until_primary_tracks_finish=True)
forest_background = Sound(local_filepath="F:/Sons utiles/2009/LucasFilm Sound Effects Library/LucasFilm Sound Effects Library CD1 - Animal Sounds/track 95.mp3")
forest_background.change_volume(-6.0)
forest_track.append_sound(forest_background)"""
"""from inoft_vocal_engine.speech_synthesis.polly.client import PollyClient
from inoft_vocal_engine.speech_synthesis.polly import VOICES"""
# track_voice = audio_block_1.create_track(primary=True, loop=False)
"""voice_sound = track_voice.create_sound(local_filepath=PollyClient().synthesize(
text="Je suis un test d'audio dynamique ?",
voice_id=VOICES.French_France_Female_CELINE.id,
filepath_to_save_to="F:/Sons utiles/test_synthesised_dialogue.mp3"
), custom_key="voice", player_start_time=track_voice.start_time)"""
"""rifle_shots = track_voice.create_sound(
local_filepath="F:/Sons utiles/Sound Effects/Guns/Automatic/238916__qubodup__rifle-shooting.flac",
player_start_time=voice_sound.player_end_time + 20, player_end_time=voice_sound.player_end_time + 40
)"""
background_music_track = audio_block_1.create_track(primary=True)
"""background_music = background_music_track.create_sound(
local_filepath="F:/Sons utiles/Musics/Vintage (1940s) French Music/Pour Vous J'Avais Fait Cette Chanson - Jean Sablon.wav",
player_start_time=background_music_track.start_time
)"""
background_music_track.create_sound(
engine_file_key="ambiance",
player_start_time=background_music_track.player_start_time,
player_end_time=background_music_track.player_start_time + 20
)
background_music = background_music_track.create_sound(
engine_file_key="output_final",
player_start_time=background_music_track.player_start_time + 10
)
background_music.volume = 45
file_url = audio_block_1.render_2(out_filepath="F:/Sons utiles/tests/test_python_1.mp3", format_type="mp3")
print(file_url)
if __name__ == '__main__':
unittest.main() | 0.291384 | 0.239744 |
r'''A data-parallel Mean metric.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import weights_broadcast_ops
from hybridbackend.tensorflow.distribute.communicator import CollectiveOps
from hybridbackend.tensorflow.distribute.communicator_pool import \
CommunicatorPool
def _allreduce_mean(comm, inputs, inputs_deps):
r'''Communicator call to reduce mean across workers.
'''
with ops.control_dependencies(inputs_deps):
if isinstance(inputs, (list, tuple)):
inputs = inputs[0]
sum_inputs = comm.allreduce(inputs, CollectiveOps.SUM)
return sum_inputs, None
def mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
r'''Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
'''
with vs.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metrics_impl.metric_variable([], dtypes.float32, name='total')
count = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
values_sum = math_ops.reduce_sum(values)
else:
values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
values_sum = math_ops.reduce_sum(values)
num_values = math_ops.reduce_sum(weights)
stacked = array_ops.stack([values_sum, num_values])
sum_stacked = CommunicatorPool.get().call(
_allreduce_mean, stacked, trainable=False)
if isinstance(sum_stacked, (list, tuple)):
sum_stacked = sum_stacked[0]
values_sum, num_values = array_ops.unstack(sum_stacked)
update_total_op = state_ops.assign_add(total, values_sum)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
# pylint: disable=protected-access
metric_op = (
metrics_impl._safe_scalar_div(total, count, 'value')
if hasattr(metrics_impl, '_safe_scalar_div')
else metrics_impl._safe_div(total, count, 'value'))
if metrics_collections:
ops.add_to_collections(metrics_collections, metric_op)
# pylint: disable=protected-access
update_op = (
metrics_impl._safe_scalar_div(
update_total_op, update_count_op, 'update_op')
if hasattr(metrics_impl, '_safe_scalar_div')
else metrics_impl._safe_div(
update_total_op, update_count_op, 'update_op'))
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return metric_op, update_op | hybridbackend/tensorflow/metrics/mean.py |
r'''A data-parallel Mean metric.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import weights_broadcast_ops
from hybridbackend.tensorflow.distribute.communicator import CollectiveOps
from hybridbackend.tensorflow.distribute.communicator_pool import \
CommunicatorPool
def _allreduce_mean(comm, inputs, inputs_deps):
r'''Communicator call to reduce mean across workers.
'''
with ops.control_dependencies(inputs_deps):
if isinstance(inputs, (list, tuple)):
inputs = inputs[0]
sum_inputs = comm.allreduce(inputs, CollectiveOps.SUM)
return sum_inputs, None
def mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
r'''Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
'''
with vs.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metrics_impl.metric_variable([], dtypes.float32, name='total')
count = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
values_sum = math_ops.reduce_sum(values)
else:
values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
values_sum = math_ops.reduce_sum(values)
num_values = math_ops.reduce_sum(weights)
stacked = array_ops.stack([values_sum, num_values])
sum_stacked = CommunicatorPool.get().call(
_allreduce_mean, stacked, trainable=False)
if isinstance(sum_stacked, (list, tuple)):
sum_stacked = sum_stacked[0]
values_sum, num_values = array_ops.unstack(sum_stacked)
update_total_op = state_ops.assign_add(total, values_sum)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
# pylint: disable=protected-access
metric_op = (
metrics_impl._safe_scalar_div(total, count, 'value')
if hasattr(metrics_impl, '_safe_scalar_div')
else metrics_impl._safe_div(total, count, 'value'))
if metrics_collections:
ops.add_to_collections(metrics_collections, metric_op)
# pylint: disable=protected-access
update_op = (
metrics_impl._safe_scalar_div(
update_total_op, update_count_op, 'update_op')
if hasattr(metrics_impl, '_safe_scalar_div')
else metrics_impl._safe_div(
update_total_op, update_count_op, 'update_op'))
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return metric_op, update_op | 0.938738 | 0.510802 |
Copyright (c) 2017, WinQuant Information and Technology Co. Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
# built-in modules
import datetime as dt
import functools
# third-party modules
import numpy as np
import pandas as pd
# customized modules
import data.api.base as base
from data.config import *
from data.driver import mongodb
from data.driver import mysql
from data.driver import sqlite3
import util.calendar as uc
# Get RIC exchange code given the Datayes ones.
RIC_EXCHANGE_CODE = { 'XSHG': 'SH',
'XSHE': 'SZ' }
# Get the Datayes ones given the RIC ones.
DATAYES_EXCHANGE_CODE = { 'SH': 'XSHG',
'SZ': 'XSHE' }
DEFAULT_START_DATE = dt.date( 2016, 1, 4 )
TICK_DEFAULT_START_DATE = '20160104'
TICK_DATE_FORMAT = '%Y%m%d'
WIND_DEFAULT_START_DATE = '20160104'
WIND_DATE_FORMAT = '%Y%m%d'
# @functools.lru_cache( maxsize=32 )
def _getUniverse( asOfDate, country='CN' ):
'''Get stock universe as of the given date.
Parameters
----------
asOfDate : datetime.date
Data date of the stock classification or
None if not found;
country : str
Country identifier, currently, only CN supported.
Returns
-------
universeInfo : dict
Universe information with `Sector` universe sector and `Stocks` stock information.
Exceptions
----------
raise Exception when duplicated records found.
'''
username, password = <PASSWORD>DB_CRED
db = mongodb.getAuthenticatedConnection( MONGODB_URL,
MONGODB_PORT,
username,
password,
'universe' )
universe = db.stocks.find_one( { 'Date': { '$lte': dt.datetime.combine( asOfDate,
dt.datetime.min.time() ) },
'Country': country },
sort=[ ( 'Date', mongodb.pymongo.DESCENDING ) ] )
return universe
def getStockClassification( asOfDate, exch=None, country='CN', alive=True ):
'''Get stock classification.
Parameters
----------
asOfDate : datetime.date
Data date to get all available stock names;
exch : str
Exchange name of the stocks expected or None to get all names in the country;
country : str
Country of the stocks traded in;
alive : boolean
An indicator whether return alive stocks only or include all stocks on the exchange.
Returns
-------
industryClassification : pandas.DataFrame
Industry classification.
Exceptions
----------
raise Exception when duplicated records found.
'''
universe = _getUniverse( asOfDate, country )
industryClassification = pd.read_json( universe[ 'Sectors' ] )
industryClassification.sort_index( inplace=True )
if exch is not None:
validExch = DATAYES_EXCHANGE_CODE.get( exch, exch )
industryClassification = industryClassification[
industryClassification.exchangeCD == validExch ]
if alive:
# Get listed stocks
stockInfo = getStockInformation( asOfDate )
stockInfo = stockInfo[ stockInfo.listStatusCD == 'L' ]
industryClassification = industryClassification[
industryClassification.isNew == 1 ]
industryClassification = industryClassification[
industryClassification.secID.isin( stockInfo.secID ) ]
return industryClassification
def getStockInformation( asOfDate, exch=None, country='CN' ):
'''Get stock fundamental information, e.g. listing date, non-rest floting, etc.
Parameters
----------
asOfDate : datetime.date
Data date to get all available stock names;
exch : str or None
Exchange name of the stocks expected or None to get all names in the country;
country : str
Country of the stocks traded in.
Returns
-------
stockInformation : pandas.DataFrame
Stock information.
Exceptions
----------
raise Exception when duplicated records found.
'''
universe = _getUniverse( asOfDate, country )
stockInfo = pd.read_json( universe[ 'Stocks' ] )
stockInfo.sort_index( inplace=True )
if exch is not None:
validExch = DATAYES_EXCHANGE_CODE.get( exch, exch )
stockInfo = stockInfo[ stockInfo.exchangeCD == validExch ]
return stockInfo
def getExchangeStockNames( asOfDate, exch=None, country='CN', alive=True ):
'''Get all stock names in the given exchange.
Parameters
----------
asOfDate : datetime.date
Data date to get all available stock names;
exch : str
Exchange name of the stocks expected or None to get all names under the country;
country : str
Country of the stocks traded on;
alive : boolean
An indicator whether return alive stocks only or include all stocks on the exchange.
Returns
-------
stocks : list of str
Names of all stocks traded on the exchange.
Exceptions
----------
raise Exception when duplicated records found.
'''
classification = getStockClassification( asOfDate, exch=exch,
country=country, alive=alive )
if classification is None:
stocks = None
else:
stocks = list( classification.secID )
return stocks
def getDailyData( secId, startDate=DEFAULT_START_DATE, endDate=dt.date.today() ):
'''Get daily data for the given stocks during the date range.
Parameters
----------
secId : str
Security ID of the stock;
startDate : datetime.date
Start date of the daily data queried inclusively;
endDate : datetime.date
End date of the daily data queried inclusively.
Returns
-------
dailyData : pandas.DataFrame
Requested daily data in pandas.DataFrame.
Exceptions
----------
raise Exception when duplicated records found on the given stock name.
'''
# Get authenticated MongoDB connection
username, password = <PASSWORD>
db = mongodb.getAuthenticatedConnection( MONGODB_URL, MONGODB_PORT,
username, password, '<PASSWORD>Data' )
# Query data
cursor = db.stocks.find( { 'SecID': secId } )
# Sanity check
nRecords = cursor.count()
if nRecords == 0:
dailyData = None
elif nRecords > 1:
raise Exception( 'Duplicated {n:d} records found for stock {s:s}.'.format(
n=nRecords, s=secId ) )
else:
data = cursor.next()
dailyData = pd.read_json( data[ 'Data' ] )
dailyData.sort_index( inplace=True )
# Filtered by date
startDateStr = startDate.strftime( '%Y-%m-%d' )
endDateStr = endDate.strftime( '%Y-%m-%d' )
greaterDates = dailyData.tradeDate >= startDateStr
smallerDates = dailyData.tradeDate <= endDateStr
dailyData = dailyData[ np.logical_and( greaterDates, smallerDates ) ]
return dailyData
def getBinData( secId, startDate=DEFAULT_START_DATE, endDate=dt.date.today() ):
'''Get minute-by-minute data for the given stock during the date range.
Parameters
----------
secId : str
Security ID of the stock;
startDate : datetime.date
Start date of the bin data required inclusively,
endDate : datetime.date
End date of the bin data required inclusively.
Returns
-------
binData : pandas.Panel
Requested bin data in pandas.Panel.
Exceptoins
----------
raise Exception when duplicated records found on the given stock name.
'''
# Get authenticated MongoDB connection
username, password = MONGODB_CRED
db = mongodb.getAuthenticatedConnection( MONGODB_URL, MONGODB_PORT,
username, password, 'binData' )
# Query data
cursor = db.stocks.find( { 'SecID': secId, 'Date': {
'$gte': { 'Date': dt.datetime.combine( startDate, dt.datetime.min.time() ) },
'$lte': { 'Date': dt.datetime.combine( endDate, dt.datetime.min.time() ) } } } )
data = {}
for item in cursor:
# Build DataFrame's to convert to a Panel.
date = item[ 'Date' ].date()
if date in data:
raise Exception( 'Duplicated records on {d:s} found.'.format(
d=str( date ) ) )
else:
dayBinData = pd.read_json( item[ 'Data' ] )
dayBinData.sort_index( inplace=True )
data[ date ] = dayBinData
return pd.Panel( data )
class BinDataSource( base.BinDataSource ):
'''Get stocks bin data from the database.
'''
def __init__( self ):
'''Initialize a BinDataSource for stocks.
'''
super( BinDataSource, self ).__init__( 'indicator_stock' )
class WindDataSource( object ):
'''Get data from Wind.
'''
def __init__( self ):
'''Initialize a WindDataSource object.
'''
username, password = MYSQL_WIND_CRED
dbname = 'wind'
self.conn = mysql.getAuthenticatedConnection( MYSQL_WIND_URL, MYSQL_WIND_PORT,
username, password, dbname, encoding='gbk' )
def getStockDailyData( self, secId=None, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for the given instrument in the specified date range.
Parameters
----------
secId : str
Wind stock code; if None, get the dailyData on all the stocks;
startDate : str
start date of the data in the format %Y%m%d;
endDate : str
end date of the data in the format %Y%m%d.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specified stock. The order of the rows is not guaranteed.
Exceptions
----------
raise Exception when error occurs reading the daily data.
'''
tableName = 'ashareeodprices'
sql = mysql.buildSql( tableName, secId, startDate, endDate )
df = pd.read_sql( sql, self.conn )
return df
def getIndexDailyData( self, secId=None, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for the given index in the specified date range.
Parameters
----------
secId : str
Wind stock code; if None, get the dailyData on all the stocks;
startDate : str
start date of the data in the format %Y%m%d;
endDate : str
end date of the data in the format %Y%m%d.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specified index. The order of the rows is not guaranteed.
Exceptions
----------
raise Exception when error occurs reading the daily data.
'''
tableName = 'aindexeodprices'
sql = mysql.buildSql( tableName, secId, startDate, endDate )
df = pd.read_sql( sql, self.conn )
return df
def getFundamentals( self, tableName, secIds=None, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ), dateColName='REPORT_PERIOD' ):
'''Get fundamentals for the given stocks in the specific date range.
Parameters
----------
tableName : str
data source table;
secIds : list of str or None
Wind stock codes whose fundamental data to retrieve; if None, get the
fundamentals on all the stocks;
startDate : str
start date of the data in the format %Y%m%d;
endDate : str
end date of the data in the format %Y%m%d;
dateColName : str
the name of the date column.
Returns
-------
fundamentals : pandas.DataFrame
fundamental data for the stocks specified. The order of the rows is not
guaranteed.
Exceptions
----------
raise Exception when error occurs reading the data.
'''
if secIds is None:
sql = mysql.buildSql( tableName, secIds, startDate, endDate,
dateColumn=dateColName )
df = pd.read_sql( sql, self.conn )
else:
dfs = []
# batch the query 100 per query
step = 100
index = 0
total = len( secIds )
while index < total:
sql = mysql.buildSqlWithSecIds( tableName,
secIds[ index : index + step ], startDate=startDate,
endDate=endDate, dateColumn=dateColName )
dfs.append( pd.read_sql( sql, self.conn ) )
index += step
df = pd.concat( dfs )
return df
def getDailyDataOnDate( self, secIds, dataDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for all instruments on the given date.
Parameters
----------
secIds : list of str
All instruments to read;
dataDate : str
data date in the format %Y%m%d.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specific trading date.
'''
tableName = 'ashareeodprices'
dfs = []
step = 100
index = 0
total = len( secIds )
while index < total:
sql = mysql.buildSqlWithSecIds( tableName, secIds[ index : index + step ],
startDate=dataDate, endDate=dataDate )
dfs.append( pd.read_sql( sql, self.conn ) )
index += step
df = pd.concat( dfs ).drop_duplicates( [ 'S_INFO_WINDCODE' ] )
# sort by securities identifier
df.set_index( [ 'S_INFO_WINDCODE' ], inplace=True )
return df
def getDailyDataWithFields( self, secIds, fields, startDate, endDate, tableName='ashareeodprices' ):
'''Get selected data on the given date.
Parameters
----------
secIds : list of str
All securities concerned;
fields : list of str
fields concerned, if not specified (None), all columns are extracted;
startDate : str
start data date in the format %Y%m%d;
endDate : str
end data date in the format %Y%m%d;
tableName : str
name of the data table, by default, ashareeodprices.
Returns
-------
pivot : pandas.DataFrame
panel with three dimension
1. data filed;
2. data date, sorted by date ascendingly;
3. sec id.
'''
dfs = []
step = 100
index = 0
total = len( secIds )
while index < total:
sql = mysql.buildSqlWithSecIds( tableName, secIds[ index : index + step ],
startDate=startDate, endDate=endDate, dataColumns=fields )
dfs.append( pd.read_sql( sql, self.conn ) )
index += step
df = pd.concat( dfs ).drop_duplicates( [ 'TRADE_DT', 'S_INFO_WINDCODE' ] )
df.sort_values( 'TRADE_DT', inplace=True, ascending=True )
return df.pivot( 'TRADE_DT', 'S_INFO_WINDCODE' )
def getDividendInformation( self, secId=None, startDate=DEFAULT_START_DATE, endDate=dt.date.today(),
realizedOnly=True ):
'''Get dividend information from Wind database.
Parameters
----------
secId : str
Wind stock code;
startDate : datetime.date
start date of the dividend data;
endDate : datetime.date
end date of the dividend data.
Returns
-------
dividendInfo : pandas.DataFrame
All dividend info in pandas DataFrame.
'''
tableName = 'asharedividend'
sql = mysql.buildSql( tableName, secId, startDate, endDate, dateColumn='EX_DT' )
if realizedOnly:
# modify the SQL to constraint the row range.
sql += " AND S_DIV_PROGRESS='3'"
df = pd.read_sql( sql, self.conn )
return df
def getRightIssueInformation( self, secId=None, startDate=DEFAULT_START_DATE, endDate=dt.date.today(),
realizedOnly=True ):
'''Get dividend information from Wind database.
Parameters
----------
secId : str
Wind stock code;
startDate : datetime.date
start date of the dividend data;
endDate : datetime.date
end date of the dividend data.
Returns
-------
dividendInfo : pandas.DataFrame
All dividend info in pandas DataFrame.
'''
tableName = 'asharerightissue'
sql = mysql.buildSql( tableName, secId, startDate, endDate, dateColumn='S_RIGHTSISSUE_EXDIVIDENDDATE' )
if realizedOnly:
# modify the SQL to constraint the row range.
sql += " AND S_RIGHTSISSUE_PROGRESS='3'"
df = pd.read_sql( sql, self.conn )
return df
def getBusinessDates( self, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get business dates during the given date range.
Parameters
----------
startDate : str
Start date of the business date range in the format '%Y%m%d';
endDate : str
end date of the business dates range in the format '%Y%m%d'.
Returns
-------
businessDates : pandas.Series
All business dates during the date range.
'''
sql = "SELECT * FROM asharecalendar WHERE TRADE_DAYS >= '{sd:s}' AND TRADE_DAYS <= '{ed:s}'".format(
sd=startDate, ed=endDate )
df = pd.read_sql( sql, self.conn )
# since 2012-01-04, Shanghai stock exchange and Shenzhen stock exchange share the
# same trading calendar.
return df[ df.S_INFO_EXCHMARKET == 'SSE' ].TRADE_DAYS.sort_values()
def getDelistedStocks( self, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get delisted stocks from the given start date to now.
Parameters
----------
startDate : str
Start date of the business date range in the format %Y%m%d;
endDate : datetime.date
end date of the business dates range in the format %Y%m%d.
Returns
-------
delistedStocks : pandas.Series
delisted stocks in a pandas Series with S_INFO_DELISTDATE indexed by stock codes.
'''
sql = "SELECT S_INFO_WINDCODE, S_INFO_DELISTDATE FROM asharedescription WHERE S_INFO_DELISTDATE >= '{sd:s}' AND S_INFO_DELISTDATE <= '{ed:s}'".format( sd=startDate, ed=endDate )
df = pd.read_sql( sql, self.conn )
delistedStocks = df.S_INFO_DELISTDATE
delistedStocks.index = df.S_INFO_WINDCODE
return delistedStocks
def getSuspensionDates( self, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get suspending dates during the given date range.
Parameters
----------
startDate : str
Start date of the business date range in the format %Y%m%d;
endDate : datetime.date
end date of the business dates range in the format %Y%m%d.
Returns
-------
df : pandas.DataFrame
All suspending dates during the date range.
'''
sql = "SELECT S_INFO_WINDCODE, S_DQ_SUSPENDDATE FROM asharetradingsuspension WHERE S_DQ_SUSPENDDATE >= '{sd:s}' AND S_DQ_SUSPENDDATE <= '{ed:s}'".format(
sd=startDate, ed=endDate )
df = pd.read_sql( sql, self.conn )
return df
class CachedWindSource( WindDataSource ):
'''In memory Wind source, which loads the necessary Wind data in batch and stores in memory.
'''
def __init__( self, secIds, startDate, endDate ):
'''Initialize a in-memory data source.
Parameters
----------
secIds : list of str
All securities concerned;
startDate : str
backtest start date in the format %Y%m%d;
endDate : str
backtest end date in the format %Y%m%d.
'''
super( CachedWindSource, self ).__init__()
# calculate data start date and end date based on the backtest date
stockCalendar = uc.AShareTradingCalendar( self,
startDate=uc.DEFAULT_TS_START_DATE )
# to calculate the data start date, preceed backtest start date by 100-day.
try:
dataStartDate = stockCalendar.prevTradingDate( startDate, n=100 )
except Exception:
dataStartDate = startDate
try:
dataEndDate = stockCalendar.nextTradingDate( endDate )
except Exception:
dataEndDate = endDate
# load all data into memory
dfs = []
step = 100
index = 0
total = len( secIds )
tableName = 'ashareeodprices'
while index < total:
sql = mysql.buildSqlWithSecIds( tableName, secIds[ index : index + step ],
startDate=dataStartDate, endDate=dataEndDate )
df = pd.read_sql( sql, self.conn )
dfs.append( df )
index += step
df = pd.concat( dfs )
df.sort_values( 'TRADE_DT', inplace=True, ascending=True )
self.data = df
def getStockDailyData( self, secId=None, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for the given instrument in the specified date range.
Parameters
----------
secId : str
Wind stock code; if None, get the dailyData on all the stocks;
startDate : str
start date of the data in the format %Y%m%d inclusively;
endDate : str
end date of the data in the format %Y%m%d inclusively.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specified index. The order of the rows is not guaranteed.
Exceptions
----------
raise Exception when error occurs reading the daily data.
'''
dataFilter = ( self.data.TRADE_DT >= startDate ) & ( self.data.TRADE_DT <= endDate )
dailyData = self.data[ dataFilter ]
if secId is not None:
dailyData = dailyData[ dailyData.S_INFO_WINDCODE == secId ]
return dailyData
def getDailyDataOnDate( self, secIds, dataDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for all instruments on the given date.
Parameters
----------
secIds : list of str
All instruments to read;
dataDate : str
data date in the format %Y%m%d.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specific trading date.
'''
dataFilter = self.data.S_INFO_WINDCODE.isin( secIds ) & \
( self.data.TRADE_DT == dataDate )
df = self.data[ dataFilter ]
# sort by securities identifier
df.set_index( [ 'S_INFO_WINDCODE' ], inplace=True )
return df
def getDailyDataWithFields( self, secIds, fields, startDate, endDate, tableName='ashareeodprices' ):
'''Get selected data on the given date.
Parameters
----------
secIds : list of str
All securities concerned;
fields : list of str
fields concerned, if not specified (None), all columns are extracted;
startDate : str
start data date in the format %Y%m%d;
endDate : str
end data date in the format %Y%m%d;
tableName : str
name of the data table, by default, ashareeodprices.
Returns
-------
data : pandas.DataFrame.pivot
requested data in pivoted DataFrame.
'''
dataWithField = self.data[ fields + [ 'S_INFO_WINDCODE', 'TRADE_DT' ] ]
dataFilter = ( dataWithField.TRADE_DT >= startDate ) & \
( dataWithField.TRADE_DT <= endDate )
dataWithField = dataWithField[ dataFilter ]
dataWithField = dataWithField[ dataWithField.S_INFO_WINDCODE.isin( secIds ) ]
return dataWithField.pivot( 'TRADE_DT', 'S_INFO_WINDCODE' ) | api/stocks.py | Copyright (c) 2017, WinQuant Information and Technology Co. Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
# built-in modules
import datetime as dt
import functools
# third-party modules
import numpy as np
import pandas as pd
# customized modules
import data.api.base as base
from data.config import *
from data.driver import mongodb
from data.driver import mysql
from data.driver import sqlite3
import util.calendar as uc
# Get RIC exchange code given the Datayes ones.
RIC_EXCHANGE_CODE = { 'XSHG': 'SH',
'XSHE': 'SZ' }
# Get the Datayes ones given the RIC ones.
DATAYES_EXCHANGE_CODE = { 'SH': 'XSHG',
'SZ': 'XSHE' }
DEFAULT_START_DATE = dt.date( 2016, 1, 4 )
TICK_DEFAULT_START_DATE = '20160104'
TICK_DATE_FORMAT = '%Y%m%d'
WIND_DEFAULT_START_DATE = '20160104'
WIND_DATE_FORMAT = '%Y%m%d'
# @functools.lru_cache( maxsize=32 )
def _getUniverse( asOfDate, country='CN' ):
'''Get stock universe as of the given date.
Parameters
----------
asOfDate : datetime.date
Data date of the stock classification or
None if not found;
country : str
Country identifier, currently, only CN supported.
Returns
-------
universeInfo : dict
Universe information with `Sector` universe sector and `Stocks` stock information.
Exceptions
----------
raise Exception when duplicated records found.
'''
username, password = <PASSWORD>DB_CRED
db = mongodb.getAuthenticatedConnection( MONGODB_URL,
MONGODB_PORT,
username,
password,
'universe' )
universe = db.stocks.find_one( { 'Date': { '$lte': dt.datetime.combine( asOfDate,
dt.datetime.min.time() ) },
'Country': country },
sort=[ ( 'Date', mongodb.pymongo.DESCENDING ) ] )
return universe
def getStockClassification( asOfDate, exch=None, country='CN', alive=True ):
'''Get stock classification.
Parameters
----------
asOfDate : datetime.date
Data date to get all available stock names;
exch : str
Exchange name of the stocks expected or None to get all names in the country;
country : str
Country of the stocks traded in;
alive : boolean
An indicator whether return alive stocks only or include all stocks on the exchange.
Returns
-------
industryClassification : pandas.DataFrame
Industry classification.
Exceptions
----------
raise Exception when duplicated records found.
'''
universe = _getUniverse( asOfDate, country )
industryClassification = pd.read_json( universe[ 'Sectors' ] )
industryClassification.sort_index( inplace=True )
if exch is not None:
validExch = DATAYES_EXCHANGE_CODE.get( exch, exch )
industryClassification = industryClassification[
industryClassification.exchangeCD == validExch ]
if alive:
# Get listed stocks
stockInfo = getStockInformation( asOfDate )
stockInfo = stockInfo[ stockInfo.listStatusCD == 'L' ]
industryClassification = industryClassification[
industryClassification.isNew == 1 ]
industryClassification = industryClassification[
industryClassification.secID.isin( stockInfo.secID ) ]
return industryClassification
def getStockInformation( asOfDate, exch=None, country='CN' ):
'''Get stock fundamental information, e.g. listing date, non-rest floting, etc.
Parameters
----------
asOfDate : datetime.date
Data date to get all available stock names;
exch : str or None
Exchange name of the stocks expected or None to get all names in the country;
country : str
Country of the stocks traded in.
Returns
-------
stockInformation : pandas.DataFrame
Stock information.
Exceptions
----------
raise Exception when duplicated records found.
'''
universe = _getUniverse( asOfDate, country )
stockInfo = pd.read_json( universe[ 'Stocks' ] )
stockInfo.sort_index( inplace=True )
if exch is not None:
validExch = DATAYES_EXCHANGE_CODE.get( exch, exch )
stockInfo = stockInfo[ stockInfo.exchangeCD == validExch ]
return stockInfo
def getExchangeStockNames( asOfDate, exch=None, country='CN', alive=True ):
'''Get all stock names in the given exchange.
Parameters
----------
asOfDate : datetime.date
Data date to get all available stock names;
exch : str
Exchange name of the stocks expected or None to get all names under the country;
country : str
Country of the stocks traded on;
alive : boolean
An indicator whether return alive stocks only or include all stocks on the exchange.
Returns
-------
stocks : list of str
Names of all stocks traded on the exchange.
Exceptions
----------
raise Exception when duplicated records found.
'''
classification = getStockClassification( asOfDate, exch=exch,
country=country, alive=alive )
if classification is None:
stocks = None
else:
stocks = list( classification.secID )
return stocks
def getDailyData( secId, startDate=DEFAULT_START_DATE, endDate=dt.date.today() ):
'''Get daily data for the given stocks during the date range.
Parameters
----------
secId : str
Security ID of the stock;
startDate : datetime.date
Start date of the daily data queried inclusively;
endDate : datetime.date
End date of the daily data queried inclusively.
Returns
-------
dailyData : pandas.DataFrame
Requested daily data in pandas.DataFrame.
Exceptions
----------
raise Exception when duplicated records found on the given stock name.
'''
# Get authenticated MongoDB connection
username, password = <PASSWORD>
db = mongodb.getAuthenticatedConnection( MONGODB_URL, MONGODB_PORT,
username, password, '<PASSWORD>Data' )
# Query data
cursor = db.stocks.find( { 'SecID': secId } )
# Sanity check
nRecords = cursor.count()
if nRecords == 0:
dailyData = None
elif nRecords > 1:
raise Exception( 'Duplicated {n:d} records found for stock {s:s}.'.format(
n=nRecords, s=secId ) )
else:
data = cursor.next()
dailyData = pd.read_json( data[ 'Data' ] )
dailyData.sort_index( inplace=True )
# Filtered by date
startDateStr = startDate.strftime( '%Y-%m-%d' )
endDateStr = endDate.strftime( '%Y-%m-%d' )
greaterDates = dailyData.tradeDate >= startDateStr
smallerDates = dailyData.tradeDate <= endDateStr
dailyData = dailyData[ np.logical_and( greaterDates, smallerDates ) ]
return dailyData
def getBinData( secId, startDate=DEFAULT_START_DATE, endDate=dt.date.today() ):
'''Get minute-by-minute data for the given stock during the date range.
Parameters
----------
secId : str
Security ID of the stock;
startDate : datetime.date
Start date of the bin data required inclusively,
endDate : datetime.date
End date of the bin data required inclusively.
Returns
-------
binData : pandas.Panel
Requested bin data in pandas.Panel.
Exceptoins
----------
raise Exception when duplicated records found on the given stock name.
'''
# Get authenticated MongoDB connection
username, password = MONGODB_CRED
db = mongodb.getAuthenticatedConnection( MONGODB_URL, MONGODB_PORT,
username, password, 'binData' )
# Query data
cursor = db.stocks.find( { 'SecID': secId, 'Date': {
'$gte': { 'Date': dt.datetime.combine( startDate, dt.datetime.min.time() ) },
'$lte': { 'Date': dt.datetime.combine( endDate, dt.datetime.min.time() ) } } } )
data = {}
for item in cursor:
# Build DataFrame's to convert to a Panel.
date = item[ 'Date' ].date()
if date in data:
raise Exception( 'Duplicated records on {d:s} found.'.format(
d=str( date ) ) )
else:
dayBinData = pd.read_json( item[ 'Data' ] )
dayBinData.sort_index( inplace=True )
data[ date ] = dayBinData
return pd.Panel( data )
class BinDataSource( base.BinDataSource ):
'''Get stocks bin data from the database.
'''
def __init__( self ):
'''Initialize a BinDataSource for stocks.
'''
super( BinDataSource, self ).__init__( 'indicator_stock' )
class WindDataSource( object ):
'''Get data from Wind.
'''
def __init__( self ):
'''Initialize a WindDataSource object.
'''
username, password = MYSQL_WIND_CRED
dbname = 'wind'
self.conn = mysql.getAuthenticatedConnection( MYSQL_WIND_URL, MYSQL_WIND_PORT,
username, password, dbname, encoding='gbk' )
def getStockDailyData( self, secId=None, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for the given instrument in the specified date range.
Parameters
----------
secId : str
Wind stock code; if None, get the dailyData on all the stocks;
startDate : str
start date of the data in the format %Y%m%d;
endDate : str
end date of the data in the format %Y%m%d.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specified stock. The order of the rows is not guaranteed.
Exceptions
----------
raise Exception when error occurs reading the daily data.
'''
tableName = 'ashareeodprices'
sql = mysql.buildSql( tableName, secId, startDate, endDate )
df = pd.read_sql( sql, self.conn )
return df
def getIndexDailyData( self, secId=None, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for the given index in the specified date range.
Parameters
----------
secId : str
Wind stock code; if None, get the dailyData on all the stocks;
startDate : str
start date of the data in the format %Y%m%d;
endDate : str
end date of the data in the format %Y%m%d.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specified index. The order of the rows is not guaranteed.
Exceptions
----------
raise Exception when error occurs reading the daily data.
'''
tableName = 'aindexeodprices'
sql = mysql.buildSql( tableName, secId, startDate, endDate )
df = pd.read_sql( sql, self.conn )
return df
def getFundamentals( self, tableName, secIds=None, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ), dateColName='REPORT_PERIOD' ):
'''Get fundamentals for the given stocks in the specific date range.
Parameters
----------
tableName : str
data source table;
secIds : list of str or None
Wind stock codes whose fundamental data to retrieve; if None, get the
fundamentals on all the stocks;
startDate : str
start date of the data in the format %Y%m%d;
endDate : str
end date of the data in the format %Y%m%d;
dateColName : str
the name of the date column.
Returns
-------
fundamentals : pandas.DataFrame
fundamental data for the stocks specified. The order of the rows is not
guaranteed.
Exceptions
----------
raise Exception when error occurs reading the data.
'''
if secIds is None:
sql = mysql.buildSql( tableName, secIds, startDate, endDate,
dateColumn=dateColName )
df = pd.read_sql( sql, self.conn )
else:
dfs = []
# batch the query 100 per query
step = 100
index = 0
total = len( secIds )
while index < total:
sql = mysql.buildSqlWithSecIds( tableName,
secIds[ index : index + step ], startDate=startDate,
endDate=endDate, dateColumn=dateColName )
dfs.append( pd.read_sql( sql, self.conn ) )
index += step
df = pd.concat( dfs )
return df
def getDailyDataOnDate( self, secIds, dataDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for all instruments on the given date.
Parameters
----------
secIds : list of str
All instruments to read;
dataDate : str
data date in the format %Y%m%d.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specific trading date.
'''
tableName = 'ashareeodprices'
dfs = []
step = 100
index = 0
total = len( secIds )
while index < total:
sql = mysql.buildSqlWithSecIds( tableName, secIds[ index : index + step ],
startDate=dataDate, endDate=dataDate )
dfs.append( pd.read_sql( sql, self.conn ) )
index += step
df = pd.concat( dfs ).drop_duplicates( [ 'S_INFO_WINDCODE' ] )
# sort by securities identifier
df.set_index( [ 'S_INFO_WINDCODE' ], inplace=True )
return df
def getDailyDataWithFields( self, secIds, fields, startDate, endDate, tableName='ashareeodprices' ):
'''Get selected data on the given date.
Parameters
----------
secIds : list of str
All securities concerned;
fields : list of str
fields concerned, if not specified (None), all columns are extracted;
startDate : str
start data date in the format %Y%m%d;
endDate : str
end data date in the format %Y%m%d;
tableName : str
name of the data table, by default, ashareeodprices.
Returns
-------
pivot : pandas.DataFrame
panel with three dimension
1. data filed;
2. data date, sorted by date ascendingly;
3. sec id.
'''
dfs = []
step = 100
index = 0
total = len( secIds )
while index < total:
sql = mysql.buildSqlWithSecIds( tableName, secIds[ index : index + step ],
startDate=startDate, endDate=endDate, dataColumns=fields )
dfs.append( pd.read_sql( sql, self.conn ) )
index += step
df = pd.concat( dfs ).drop_duplicates( [ 'TRADE_DT', 'S_INFO_WINDCODE' ] )
df.sort_values( 'TRADE_DT', inplace=True, ascending=True )
return df.pivot( 'TRADE_DT', 'S_INFO_WINDCODE' )
def getDividendInformation( self, secId=None, startDate=DEFAULT_START_DATE, endDate=dt.date.today(),
realizedOnly=True ):
'''Get dividend information from Wind database.
Parameters
----------
secId : str
Wind stock code;
startDate : datetime.date
start date of the dividend data;
endDate : datetime.date
end date of the dividend data.
Returns
-------
dividendInfo : pandas.DataFrame
All dividend info in pandas DataFrame.
'''
tableName = 'asharedividend'
sql = mysql.buildSql( tableName, secId, startDate, endDate, dateColumn='EX_DT' )
if realizedOnly:
# modify the SQL to constraint the row range.
sql += " AND S_DIV_PROGRESS='3'"
df = pd.read_sql( sql, self.conn )
return df
def getRightIssueInformation( self, secId=None, startDate=DEFAULT_START_DATE, endDate=dt.date.today(),
realizedOnly=True ):
'''Get dividend information from Wind database.
Parameters
----------
secId : str
Wind stock code;
startDate : datetime.date
start date of the dividend data;
endDate : datetime.date
end date of the dividend data.
Returns
-------
dividendInfo : pandas.DataFrame
All dividend info in pandas DataFrame.
'''
tableName = 'asharerightissue'
sql = mysql.buildSql( tableName, secId, startDate, endDate, dateColumn='S_RIGHTSISSUE_EXDIVIDENDDATE' )
if realizedOnly:
# modify the SQL to constraint the row range.
sql += " AND S_RIGHTSISSUE_PROGRESS='3'"
df = pd.read_sql( sql, self.conn )
return df
def getBusinessDates( self, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get business dates during the given date range.
Parameters
----------
startDate : str
Start date of the business date range in the format '%Y%m%d';
endDate : str
end date of the business dates range in the format '%Y%m%d'.
Returns
-------
businessDates : pandas.Series
All business dates during the date range.
'''
sql = "SELECT * FROM asharecalendar WHERE TRADE_DAYS >= '{sd:s}' AND TRADE_DAYS <= '{ed:s}'".format(
sd=startDate, ed=endDate )
df = pd.read_sql( sql, self.conn )
# since 2012-01-04, Shanghai stock exchange and Shenzhen stock exchange share the
# same trading calendar.
return df[ df.S_INFO_EXCHMARKET == 'SSE' ].TRADE_DAYS.sort_values()
def getDelistedStocks( self, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get delisted stocks from the given start date to now.
Parameters
----------
startDate : str
Start date of the business date range in the format %Y%m%d;
endDate : datetime.date
end date of the business dates range in the format %Y%m%d.
Returns
-------
delistedStocks : pandas.Series
delisted stocks in a pandas Series with S_INFO_DELISTDATE indexed by stock codes.
'''
sql = "SELECT S_INFO_WINDCODE, S_INFO_DELISTDATE FROM asharedescription WHERE S_INFO_DELISTDATE >= '{sd:s}' AND S_INFO_DELISTDATE <= '{ed:s}'".format( sd=startDate, ed=endDate )
df = pd.read_sql( sql, self.conn )
delistedStocks = df.S_INFO_DELISTDATE
delistedStocks.index = df.S_INFO_WINDCODE
return delistedStocks
def getSuspensionDates( self, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get suspending dates during the given date range.
Parameters
----------
startDate : str
Start date of the business date range in the format %Y%m%d;
endDate : datetime.date
end date of the business dates range in the format %Y%m%d.
Returns
-------
df : pandas.DataFrame
All suspending dates during the date range.
'''
sql = "SELECT S_INFO_WINDCODE, S_DQ_SUSPENDDATE FROM asharetradingsuspension WHERE S_DQ_SUSPENDDATE >= '{sd:s}' AND S_DQ_SUSPENDDATE <= '{ed:s}'".format(
sd=startDate, ed=endDate )
df = pd.read_sql( sql, self.conn )
return df
class CachedWindSource( WindDataSource ):
'''In memory Wind source, which loads the necessary Wind data in batch and stores in memory.
'''
def __init__( self, secIds, startDate, endDate ):
'''Initialize a in-memory data source.
Parameters
----------
secIds : list of str
All securities concerned;
startDate : str
backtest start date in the format %Y%m%d;
endDate : str
backtest end date in the format %Y%m%d.
'''
super( CachedWindSource, self ).__init__()
# calculate data start date and end date based on the backtest date
stockCalendar = uc.AShareTradingCalendar( self,
startDate=uc.DEFAULT_TS_START_DATE )
# to calculate the data start date, preceed backtest start date by 100-day.
try:
dataStartDate = stockCalendar.prevTradingDate( startDate, n=100 )
except Exception:
dataStartDate = startDate
try:
dataEndDate = stockCalendar.nextTradingDate( endDate )
except Exception:
dataEndDate = endDate
# load all data into memory
dfs = []
step = 100
index = 0
total = len( secIds )
tableName = 'ashareeodprices'
while index < total:
sql = mysql.buildSqlWithSecIds( tableName, secIds[ index : index + step ],
startDate=dataStartDate, endDate=dataEndDate )
df = pd.read_sql( sql, self.conn )
dfs.append( df )
index += step
df = pd.concat( dfs )
df.sort_values( 'TRADE_DT', inplace=True, ascending=True )
self.data = df
def getStockDailyData( self, secId=None, startDate=WIND_DEFAULT_START_DATE,
endDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for the given instrument in the specified date range.
Parameters
----------
secId : str
Wind stock code; if None, get the dailyData on all the stocks;
startDate : str
start date of the data in the format %Y%m%d inclusively;
endDate : str
end date of the data in the format %Y%m%d inclusively.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specified index. The order of the rows is not guaranteed.
Exceptions
----------
raise Exception when error occurs reading the daily data.
'''
dataFilter = ( self.data.TRADE_DT >= startDate ) & ( self.data.TRADE_DT <= endDate )
dailyData = self.data[ dataFilter ]
if secId is not None:
dailyData = dailyData[ dailyData.S_INFO_WINDCODE == secId ]
return dailyData
def getDailyDataOnDate( self, secIds, dataDate=dt.date.today().strftime( WIND_DATE_FORMAT ) ):
'''Get daily data for all instruments on the given date.
Parameters
----------
secIds : list of str
All instruments to read;
dataDate : str
data date in the format %Y%m%d.
Returns
-------
dailyData : pandas.DataFrame
daily data for the specific trading date.
'''
dataFilter = self.data.S_INFO_WINDCODE.isin( secIds ) & \
( self.data.TRADE_DT == dataDate )
df = self.data[ dataFilter ]
# sort by securities identifier
df.set_index( [ 'S_INFO_WINDCODE' ], inplace=True )
return df
def getDailyDataWithFields( self, secIds, fields, startDate, endDate, tableName='ashareeodprices' ):
'''Get selected data on the given date.
Parameters
----------
secIds : list of str
All securities concerned;
fields : list of str
fields concerned, if not specified (None), all columns are extracted;
startDate : str
start data date in the format %Y%m%d;
endDate : str
end data date in the format %Y%m%d;
tableName : str
name of the data table, by default, ashareeodprices.
Returns
-------
data : pandas.DataFrame.pivot
requested data in pivoted DataFrame.
'''
dataWithField = self.data[ fields + [ 'S_INFO_WINDCODE', 'TRADE_DT' ] ]
dataFilter = ( dataWithField.TRADE_DT >= startDate ) & \
( dataWithField.TRADE_DT <= endDate )
dataWithField = dataWithField[ dataFilter ]
dataWithField = dataWithField[ dataWithField.S_INFO_WINDCODE.isin( secIds ) ]
return dataWithField.pivot( 'TRADE_DT', 'S_INFO_WINDCODE' ) | 0.807271 | 0.184327 |
from datetime import timedelta
from unittest.mock import patch
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import override
from factory import create_batch
from libya_elections.constants import PHONE_NOT_ACTIVATED, NOT_WHITELISTED_NUMBER, \
POLLING_REPORT_CENTER_MISMATCH, INVALID_CENTER_ID, PHONE_ACTIVATED, POLLING_REPORT_INVALID, \
POLLING_NOT_OPEN, POLLING_REPORT_RECEIVED, CENTER_OPENING_NOT_AUTHORIZED, \
PRELIMINARY_VOTES_REPORT, CENTER_OPENED, CENTER_OPEN_INDICATOR, FIRST_PERIOD_NUMBER, \
LAST_PERIOD_NUMBER, POLLING_REPORT_RECEIVED_VERY_HIGH_TURNOUT, \
POLLING_REPORT_RECEIVED_NO_REGISTRANTS, RESPONSE_SERVER_ERROR, NO_SUCH_CENTER
from libya_elections.phone_numbers import get_random_phone_number
from polling_reports.handlers import ReportsShortCodeHandler
from polling_reports.models import StaffPhone, PollingReport, CenterOpen, PreliminaryVoteCount
from register.models import Whitelist, Registration, SMS
from register.tests.base import LibyaRapidTest
from register.tests.factories import WhitelistFactory, RegistrationCenterFactory, \
RegistrationFactory
from text_messages.models import MessageText
from text_messages.utils import get_message
from voting.tests.factories import ElectionFactory
DONT_CARE = object() # Unique object used to indicate a don't-care condition in the tests
def get_message_label(message_code):
"""Return the message label of the message with the given code"""
return MessageText.objects.get(number=message_code).label
class PollingReportLogicTestCase(LibyaRapidTest):
def setUp(self):
self.NUMBER = get_random_phone_number()
# Most tests need the number whitelisted, so whitelist by default
WhitelistFactory(phone_number=self.NUMBER)
self.election = ElectionFactory(
polling_start_time=now() - timedelta(hours=1),
polling_end_time=now() + timedelta(hours=1),
)
def check_it_out(self,
message,
expected_response_code,
expected_msg_type,
expect_phone_activated,
expect_report_saved,
expect_center_opened,
expect_votes_saved=DONT_CARE,
# change the test environment:
activation_center_opening_period=True,
polling_report_period=True,
):
"""
"Send" the message and see if the response and side effects are what we expect.
"""
conn = self.lookup_connections(identities=[self.NUMBER])[0]
self.assertEqual(self.NUMBER, conn.identity)
fields = {'to_addr': settings.REPORTS_SHORT_CODE,
'from_addr': conn.identity}
# These names are getting way long...
opening_enabled_function = \
'polling_reports.handlers.center_opening_enabled'
with patch('polling_reports.handlers.polling_reports_enabled') as pr_enabled, \
patch(opening_enabled_function) as ce_enabled:
ce_enabled.return_value = activation_center_opening_period
pr_enabled.return_value = polling_report_period
self.receive(message, conn, fields=fields)
actual_response_code = self.get_last_response_code()
actual_msg_type = self.get_last_response().sms.msg_type
if expected_response_code not in (DONT_CARE, actual_response_code):
expected_label = get_message_label(expected_response_code)
actual_label = get_message_label(actual_response_code)
self.fail("Expected response code was %s (%s), got %s (%s)" %
(expected_response_code, expected_label,
actual_response_code, actual_label))
if expected_msg_type not in (DONT_CARE, actual_msg_type):
self.fail("Expected msg_type was %s, got %s" % (expected_msg_type, actual_msg_type))
if expect_phone_activated is not DONT_CARE:
exists = StaffPhone.objects.filter(phone_number=self.NUMBER).exists()
if expect_phone_activated:
self.assertTrue(exists)
else:
self.assertFalse(exists)
if expect_report_saved is not DONT_CARE:
exists = PollingReport.objects.all().exists()
if expect_report_saved:
self.assertTrue(exists)
else:
self.assertFalse(exists)
if expect_center_opened is not DONT_CARE:
exists = CenterOpen.objects.all().exists()
if expect_center_opened:
self.assertTrue(exists)
else:
self.assertFalse(exists)
if expect_votes_saved is not DONT_CARE:
exists = PreliminaryVoteCount.objects.all().exists()
if expect_votes_saved:
self.assertTrue(exists)
else:
self.assertFalse(exists)
# Also test that the message came back in Arabic, just to be safe,
# by getting the code's message in arabic. Just look at the part up
# to the first replaceable parameter, that's enough to make sure we
# used the right language.
with override(language='ar'):
expected_message = get_message(expected_response_code).msg
# Strip off everything from the first replaceable parameter
if '{' in expected_message:
offset = expected_message.find('{')
expected_message = expected_message[:offset]
if '%s' in expected_message:
offset = expected_message.find('%s')
expected_message = expected_message[:offset]
actual_message = self.get_last_response_message()
self.assertTrue(actual_message.startswith(expected_message),
msg="Expected %r to start with %r" % (actual_message, expected_message))
class PollingReportTestNotWhitelisted(PollingReportLogicTestCase):
def test_not_whitelisted(self):
Whitelist.objects.all().delete()
self.check_it_out("doesnt matter", NOT_WHITELISTED_NUMBER, DONT_CARE, False, False, False)
def test_exception_during_processing(self):
with patch.object(ReportsShortCodeHandler, 'is_addressed_to_us') as is_addressed:
is_addressed.side_effect = ValueError
self.check_it_out("anything", RESPONSE_SERVER_ERROR, DONT_CARE, False, False, False)
class PollingReportTestPhoneNotActivated(PollingReportLogicTestCase):
"""
Tests for when the phone has NOT been activated to a center already.
"""
def test_bad_message_formats(self):
self.check_it_out("not numbers", expected_response_code=PHONE_NOT_ACTIVATED,
expected_msg_type=SMS.NOT_ACTIVATED,
expect_phone_activated=False, expect_report_saved=False,
expect_center_opened=False)
self.check_it_out("27", expected_response_code=PHONE_NOT_ACTIVATED,
expected_msg_type=SMS.NOT_ACTIVATED,
expect_phone_activated=False, expect_report_saved=False,
expect_center_opened=False)
self.check_it_out("27*23434*14", expected_response_code=PHONE_NOT_ACTIVATED,
expected_msg_type=SMS.NOT_ACTIVATED,
expect_phone_activated=False, expect_report_saved=False,
expect_center_opened=False)
def test_outside_activation_period(self):
self.election.delete()
center = RegistrationCenterFactory()
self.check_it_out("%d*%d" % (center.center_id, center.center_id),
POLLING_NOT_OPEN,
DONT_CARE,
expect_phone_activated=False,
expect_report_saved=False,
expect_center_opened=False,
# Activation period not open:
activation_center_opening_period=False,
)
def test_mismatched_numbers(self):
center1 = RegistrationCenterFactory()
center2 = RegistrationCenterFactory()
self.check_it_out("%d*%d" % (center1.center_id, center2.center_id),
POLLING_REPORT_CENTER_MISMATCH,
SMS.POLLING_REPORT_INVALID,
False, False,
expect_center_opened=False)
def test_no_such_center(self):
center_num = NO_SUCH_CENTER
self.check_it_out("%d*%d" % (center_num, center_num),
INVALID_CENTER_ID,
SMS.POLLING_REPORT_INVALID,
False, False,
expect_center_opened=False)
def test_valid_activation_message(self):
center = RegistrationCenterFactory()
# The LAST message we receive will be that the center was opened,
# but we'll also check that we got a PHONE_ACTIVATED before that
self.check_it_out("%d*%d" % (center.center_id, center.center_id),
CENTER_OPENED,
SMS.ACTIVATE,
expect_phone_activated=True,
expect_report_saved=False,
expect_center_opened=True)
self.assertIn(PHONE_ACTIVATED, self.get_all_response_codes())
StaffPhone.objects.get(phone_number=self.NUMBER,
registration_center=center)
class CenterOpenTestPhoneActivated(PollingReportLogicTestCase):
"""Phone activated, one number in the message"""
def setUp(self):
super(CenterOpenTestPhoneActivated, self).setUp()
self.center = RegistrationCenterFactory()
# Activate the phone to the center:
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=self.center)
def test_valid_center_opening(self):
# Center opening message
self.check_it_out(
"%d*%d" % (CENTER_OPEN_INDICATOR, self.center.center_id),
expected_response_code=CENTER_OPENED,
expected_msg_type=SMS.ACTIVATE,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=True
)
CenterOpen.objects.get(phone_number=self.NUMBER,
registration_center=self.center)
def test_wrong_center_center_opening(self):
# Center opening message, not the center the phone is activated to
center2 = RegistrationCenterFactory()
self.check_it_out(
"%d*%d" % (CENTER_OPEN_INDICATOR, center2.center_id),
expected_response_code=CENTER_OPENING_NOT_AUTHORIZED,
expected_msg_type=SMS.NOT_ACTIVATED,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False
)
def test_invalid_center_center_opening(self):
# Center opening message, not a valid center
self.check_it_out(
"%d*99" % CENTER_OPEN_INDICATOR,
expected_response_code=INVALID_CENTER_ID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False
)
def test_not_in_center_opening_period(self):
self.check_it_out(
"%d*%d" % (CENTER_OPEN_INDICATOR, self.center.center_id),
expected_response_code=POLLING_NOT_OPEN,
expected_msg_type=DONT_CARE,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False,
activation_center_opening_period=False,
)
class PollingReportTestPhoneActivated(PollingReportLogicTestCase):
"""
Tests for when the phone HAS been activated to a center already
and message has two numbers
"""
def setUp(self):
super(PollingReportTestPhoneActivated, self).setUp()
self.center = RegistrationCenterFactory()
# Activate the phone to the center:
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=self.center)
# Create some registrations so that reports don't arrive for a center with no registrations.
create_batch(RegistrationFactory, 11, registration_center=self.center, archive_time=None)
def test_activated_poll_report_but_polling_period_not_open(self):
# Looks like a poll report but polling is not open
self.check_it_out(
"%d*2" % FIRST_PERIOD_NUMBER,
expected_response_code=POLLING_NOT_OPEN,
expected_msg_type=DONT_CARE,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False,
polling_report_period=False
)
def test_valid_report_period_first(self):
# A valid polling report
self.check_it_out(
"%d*2" % FIRST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=self.center,
period_number=FIRST_PERIOD_NUMBER,
num_voters=2)
def test_valid_report_period_last(self):
# A valid polling report
self.check_it_out(
"%d*2" % LAST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=self.center,
period_number=LAST_PERIOD_NUMBER,
num_voters=2)
def test_valid_report_copy_center(self):
# A valid polling report to a copy center
copy_center = RegistrationCenterFactory(copy_of=self.center)
# Replace the StaffPhone with one registered to the copy center.
StaffPhone.objects.all().delete()
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=copy_center)
self.check_it_out(
"%d*2" % LAST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=copy_center,
period_number=LAST_PERIOD_NUMBER,
num_voters=2)
def test_valid_report_high_turnout(self):
# A valid polling report, but with high turnout
self.check_it_out(
"%d*10" % LAST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED_VERY_HIGH_TURNOUT,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=self.center,
period_number=LAST_PERIOD_NUMBER,
num_voters=10)
def test_valid_report_with_no_registrations(self):
# A valid polling report, but to a center with 0 registrations
Registration.objects.filter(registration_center=self.center).update(archive_time=now())
self.check_it_out(
"%d*10" % LAST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED_NO_REGISTRANTS,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=self.center,
period_number=LAST_PERIOD_NUMBER,
num_voters=10)
def test_bad_first_number_of_two(self):
# Not quite a polling report nor a center open message
self.check_it_out(
"%d*2" % (LAST_PERIOD_NUMBER + 1),
expected_response_code=POLLING_REPORT_INVALID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False
)
class PrelimVoteReportTestPhoneActivated(PollingReportLogicTestCase):
def setUp(self):
super(PrelimVoteReportTestPhoneActivated, self).setUp()
self.center = RegistrationCenterFactory()
# Activate the phone to the center:
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=self.center)
self.election = ElectionFactory(
polling_start_time=now() - timedelta(hours=2),
polling_end_time=now() + timedelta(hours=2),
)
def test_prelim_vote_report(self):
self.check_it_out("5#3#2",
expected_response_code=PRELIMINARY_VOTES_REPORT,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE,
expect_report_saved=False,
expect_center_opened=False,
expect_votes_saved=True)
report = PreliminaryVoteCount.objects.get(
election=self.election,
option=3
)
self.assertEqual(2, report.num_votes)
class BadMessageTestPhoneActivatedNeitherOneNorTwoNumbers(PollingReportLogicTestCase):
def setUp(self):
super(BadMessageTestPhoneActivatedNeitherOneNorTwoNumbers, self).setUp()
self.center = RegistrationCenterFactory()
# Activate the phone to the center:
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=self.center)
def test_bad_message_formats(self):
self.check_it_out("not numbers", expected_response_code=POLLING_REPORT_INVALID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, expect_report_saved=False,
expect_center_opened=False)
self.check_it_out("", expected_response_code=POLLING_REPORT_INVALID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, expect_report_saved=False,
expect_center_opened=False)
self.check_it_out("27*23434*14", expected_response_code=POLLING_REPORT_INVALID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, expect_report_saved=False,
expect_center_opened=False) | polling_reports/tests/test_logic.py | from datetime import timedelta
from unittest.mock import patch
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import override
from factory import create_batch
from libya_elections.constants import PHONE_NOT_ACTIVATED, NOT_WHITELISTED_NUMBER, \
POLLING_REPORT_CENTER_MISMATCH, INVALID_CENTER_ID, PHONE_ACTIVATED, POLLING_REPORT_INVALID, \
POLLING_NOT_OPEN, POLLING_REPORT_RECEIVED, CENTER_OPENING_NOT_AUTHORIZED, \
PRELIMINARY_VOTES_REPORT, CENTER_OPENED, CENTER_OPEN_INDICATOR, FIRST_PERIOD_NUMBER, \
LAST_PERIOD_NUMBER, POLLING_REPORT_RECEIVED_VERY_HIGH_TURNOUT, \
POLLING_REPORT_RECEIVED_NO_REGISTRANTS, RESPONSE_SERVER_ERROR, NO_SUCH_CENTER
from libya_elections.phone_numbers import get_random_phone_number
from polling_reports.handlers import ReportsShortCodeHandler
from polling_reports.models import StaffPhone, PollingReport, CenterOpen, PreliminaryVoteCount
from register.models import Whitelist, Registration, SMS
from register.tests.base import LibyaRapidTest
from register.tests.factories import WhitelistFactory, RegistrationCenterFactory, \
RegistrationFactory
from text_messages.models import MessageText
from text_messages.utils import get_message
from voting.tests.factories import ElectionFactory
DONT_CARE = object() # Unique object used to indicate a don't-care condition in the tests
def get_message_label(message_code):
"""Return the message label of the message with the given code"""
return MessageText.objects.get(number=message_code).label
class PollingReportLogicTestCase(LibyaRapidTest):
def setUp(self):
self.NUMBER = get_random_phone_number()
# Most tests need the number whitelisted, so whitelist by default
WhitelistFactory(phone_number=self.NUMBER)
self.election = ElectionFactory(
polling_start_time=now() - timedelta(hours=1),
polling_end_time=now() + timedelta(hours=1),
)
def check_it_out(self,
message,
expected_response_code,
expected_msg_type,
expect_phone_activated,
expect_report_saved,
expect_center_opened,
expect_votes_saved=DONT_CARE,
# change the test environment:
activation_center_opening_period=True,
polling_report_period=True,
):
"""
"Send" the message and see if the response and side effects are what we expect.
"""
conn = self.lookup_connections(identities=[self.NUMBER])[0]
self.assertEqual(self.NUMBER, conn.identity)
fields = {'to_addr': settings.REPORTS_SHORT_CODE,
'from_addr': conn.identity}
# These names are getting way long...
opening_enabled_function = \
'polling_reports.handlers.center_opening_enabled'
with patch('polling_reports.handlers.polling_reports_enabled') as pr_enabled, \
patch(opening_enabled_function) as ce_enabled:
ce_enabled.return_value = activation_center_opening_period
pr_enabled.return_value = polling_report_period
self.receive(message, conn, fields=fields)
actual_response_code = self.get_last_response_code()
actual_msg_type = self.get_last_response().sms.msg_type
if expected_response_code not in (DONT_CARE, actual_response_code):
expected_label = get_message_label(expected_response_code)
actual_label = get_message_label(actual_response_code)
self.fail("Expected response code was %s (%s), got %s (%s)" %
(expected_response_code, expected_label,
actual_response_code, actual_label))
if expected_msg_type not in (DONT_CARE, actual_msg_type):
self.fail("Expected msg_type was %s, got %s" % (expected_msg_type, actual_msg_type))
if expect_phone_activated is not DONT_CARE:
exists = StaffPhone.objects.filter(phone_number=self.NUMBER).exists()
if expect_phone_activated:
self.assertTrue(exists)
else:
self.assertFalse(exists)
if expect_report_saved is not DONT_CARE:
exists = PollingReport.objects.all().exists()
if expect_report_saved:
self.assertTrue(exists)
else:
self.assertFalse(exists)
if expect_center_opened is not DONT_CARE:
exists = CenterOpen.objects.all().exists()
if expect_center_opened:
self.assertTrue(exists)
else:
self.assertFalse(exists)
if expect_votes_saved is not DONT_CARE:
exists = PreliminaryVoteCount.objects.all().exists()
if expect_votes_saved:
self.assertTrue(exists)
else:
self.assertFalse(exists)
# Also test that the message came back in Arabic, just to be safe,
# by getting the code's message in arabic. Just look at the part up
# to the first replaceable parameter, that's enough to make sure we
# used the right language.
with override(language='ar'):
expected_message = get_message(expected_response_code).msg
# Strip off everything from the first replaceable parameter
if '{' in expected_message:
offset = expected_message.find('{')
expected_message = expected_message[:offset]
if '%s' in expected_message:
offset = expected_message.find('%s')
expected_message = expected_message[:offset]
actual_message = self.get_last_response_message()
self.assertTrue(actual_message.startswith(expected_message),
msg="Expected %r to start with %r" % (actual_message, expected_message))
class PollingReportTestNotWhitelisted(PollingReportLogicTestCase):
def test_not_whitelisted(self):
Whitelist.objects.all().delete()
self.check_it_out("doesnt matter", NOT_WHITELISTED_NUMBER, DONT_CARE, False, False, False)
def test_exception_during_processing(self):
with patch.object(ReportsShortCodeHandler, 'is_addressed_to_us') as is_addressed:
is_addressed.side_effect = ValueError
self.check_it_out("anything", RESPONSE_SERVER_ERROR, DONT_CARE, False, False, False)
class PollingReportTestPhoneNotActivated(PollingReportLogicTestCase):
"""
Tests for when the phone has NOT been activated to a center already.
"""
def test_bad_message_formats(self):
self.check_it_out("not numbers", expected_response_code=PHONE_NOT_ACTIVATED,
expected_msg_type=SMS.NOT_ACTIVATED,
expect_phone_activated=False, expect_report_saved=False,
expect_center_opened=False)
self.check_it_out("27", expected_response_code=PHONE_NOT_ACTIVATED,
expected_msg_type=SMS.NOT_ACTIVATED,
expect_phone_activated=False, expect_report_saved=False,
expect_center_opened=False)
self.check_it_out("27*23434*14", expected_response_code=PHONE_NOT_ACTIVATED,
expected_msg_type=SMS.NOT_ACTIVATED,
expect_phone_activated=False, expect_report_saved=False,
expect_center_opened=False)
def test_outside_activation_period(self):
self.election.delete()
center = RegistrationCenterFactory()
self.check_it_out("%d*%d" % (center.center_id, center.center_id),
POLLING_NOT_OPEN,
DONT_CARE,
expect_phone_activated=False,
expect_report_saved=False,
expect_center_opened=False,
# Activation period not open:
activation_center_opening_period=False,
)
def test_mismatched_numbers(self):
center1 = RegistrationCenterFactory()
center2 = RegistrationCenterFactory()
self.check_it_out("%d*%d" % (center1.center_id, center2.center_id),
POLLING_REPORT_CENTER_MISMATCH,
SMS.POLLING_REPORT_INVALID,
False, False,
expect_center_opened=False)
def test_no_such_center(self):
center_num = NO_SUCH_CENTER
self.check_it_out("%d*%d" % (center_num, center_num),
INVALID_CENTER_ID,
SMS.POLLING_REPORT_INVALID,
False, False,
expect_center_opened=False)
def test_valid_activation_message(self):
center = RegistrationCenterFactory()
# The LAST message we receive will be that the center was opened,
# but we'll also check that we got a PHONE_ACTIVATED before that
self.check_it_out("%d*%d" % (center.center_id, center.center_id),
CENTER_OPENED,
SMS.ACTIVATE,
expect_phone_activated=True,
expect_report_saved=False,
expect_center_opened=True)
self.assertIn(PHONE_ACTIVATED, self.get_all_response_codes())
StaffPhone.objects.get(phone_number=self.NUMBER,
registration_center=center)
class CenterOpenTestPhoneActivated(PollingReportLogicTestCase):
"""Phone activated, one number in the message"""
def setUp(self):
super(CenterOpenTestPhoneActivated, self).setUp()
self.center = RegistrationCenterFactory()
# Activate the phone to the center:
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=self.center)
def test_valid_center_opening(self):
# Center opening message
self.check_it_out(
"%d*%d" % (CENTER_OPEN_INDICATOR, self.center.center_id),
expected_response_code=CENTER_OPENED,
expected_msg_type=SMS.ACTIVATE,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=True
)
CenterOpen.objects.get(phone_number=self.NUMBER,
registration_center=self.center)
def test_wrong_center_center_opening(self):
# Center opening message, not the center the phone is activated to
center2 = RegistrationCenterFactory()
self.check_it_out(
"%d*%d" % (CENTER_OPEN_INDICATOR, center2.center_id),
expected_response_code=CENTER_OPENING_NOT_AUTHORIZED,
expected_msg_type=SMS.NOT_ACTIVATED,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False
)
def test_invalid_center_center_opening(self):
# Center opening message, not a valid center
self.check_it_out(
"%d*99" % CENTER_OPEN_INDICATOR,
expected_response_code=INVALID_CENTER_ID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False
)
def test_not_in_center_opening_period(self):
self.check_it_out(
"%d*%d" % (CENTER_OPEN_INDICATOR, self.center.center_id),
expected_response_code=POLLING_NOT_OPEN,
expected_msg_type=DONT_CARE,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False,
activation_center_opening_period=False,
)
class PollingReportTestPhoneActivated(PollingReportLogicTestCase):
"""
Tests for when the phone HAS been activated to a center already
and message has two numbers
"""
def setUp(self):
super(PollingReportTestPhoneActivated, self).setUp()
self.center = RegistrationCenterFactory()
# Activate the phone to the center:
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=self.center)
# Create some registrations so that reports don't arrive for a center with no registrations.
create_batch(RegistrationFactory, 11, registration_center=self.center, archive_time=None)
def test_activated_poll_report_but_polling_period_not_open(self):
# Looks like a poll report but polling is not open
self.check_it_out(
"%d*2" % FIRST_PERIOD_NUMBER,
expected_response_code=POLLING_NOT_OPEN,
expected_msg_type=DONT_CARE,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False,
polling_report_period=False
)
def test_valid_report_period_first(self):
# A valid polling report
self.check_it_out(
"%d*2" % FIRST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=self.center,
period_number=FIRST_PERIOD_NUMBER,
num_voters=2)
def test_valid_report_period_last(self):
# A valid polling report
self.check_it_out(
"%d*2" % LAST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=self.center,
period_number=LAST_PERIOD_NUMBER,
num_voters=2)
def test_valid_report_copy_center(self):
# A valid polling report to a copy center
copy_center = RegistrationCenterFactory(copy_of=self.center)
# Replace the StaffPhone with one registered to the copy center.
StaffPhone.objects.all().delete()
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=copy_center)
self.check_it_out(
"%d*2" % LAST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=copy_center,
period_number=LAST_PERIOD_NUMBER,
num_voters=2)
def test_valid_report_high_turnout(self):
# A valid polling report, but with high turnout
self.check_it_out(
"%d*10" % LAST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED_VERY_HIGH_TURNOUT,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=self.center,
period_number=LAST_PERIOD_NUMBER,
num_voters=10)
def test_valid_report_with_no_registrations(self):
# A valid polling report, but to a center with 0 registrations
Registration.objects.filter(registration_center=self.center).update(archive_time=now())
self.check_it_out(
"%d*10" % LAST_PERIOD_NUMBER,
expected_response_code=POLLING_REPORT_RECEIVED_NO_REGISTRANTS,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=True,
expect_center_opened=False
)
PollingReport.objects.get(registration_center=self.center,
period_number=LAST_PERIOD_NUMBER,
num_voters=10)
def test_bad_first_number_of_two(self):
# Not quite a polling report nor a center open message
self.check_it_out(
"%d*2" % (LAST_PERIOD_NUMBER + 1),
expected_response_code=POLLING_REPORT_INVALID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, # it already was
expect_report_saved=False,
expect_center_opened=False
)
class PrelimVoteReportTestPhoneActivated(PollingReportLogicTestCase):
def setUp(self):
super(PrelimVoteReportTestPhoneActivated, self).setUp()
self.center = RegistrationCenterFactory()
# Activate the phone to the center:
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=self.center)
self.election = ElectionFactory(
polling_start_time=now() - timedelta(hours=2),
polling_end_time=now() + timedelta(hours=2),
)
def test_prelim_vote_report(self):
self.check_it_out("5#3#2",
expected_response_code=PRELIMINARY_VOTES_REPORT,
expected_msg_type=SMS.POLLING_REPORT,
expect_phone_activated=DONT_CARE,
expect_report_saved=False,
expect_center_opened=False,
expect_votes_saved=True)
report = PreliminaryVoteCount.objects.get(
election=self.election,
option=3
)
self.assertEqual(2, report.num_votes)
class BadMessageTestPhoneActivatedNeitherOneNorTwoNumbers(PollingReportLogicTestCase):
def setUp(self):
super(BadMessageTestPhoneActivatedNeitherOneNorTwoNumbers, self).setUp()
self.center = RegistrationCenterFactory()
# Activate the phone to the center:
StaffPhone.objects.create(phone_number=self.NUMBER, registration_center=self.center)
def test_bad_message_formats(self):
self.check_it_out("not numbers", expected_response_code=POLLING_REPORT_INVALID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, expect_report_saved=False,
expect_center_opened=False)
self.check_it_out("", expected_response_code=POLLING_REPORT_INVALID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, expect_report_saved=False,
expect_center_opened=False)
self.check_it_out("27*23434*14", expected_response_code=POLLING_REPORT_INVALID,
expected_msg_type=SMS.POLLING_REPORT_INVALID,
expect_phone_activated=DONT_CARE, expect_report_saved=False,
expect_center_opened=False) | 0.633637 | 0.230627 |
from __future__ import print_function, division
import numpy as np
import operator
from qsrlib_qsrs.qsr_arg_relations_abstractclass import QSR_Arg_Relations_Abstractclass
from qsrlib_io.world_qsr_trace import *
class QSR_Arg_Relations_Distance(QSR_Arg_Relations_Abstractclass):
"""Argument distance relations.
.. note:: The relations are defined on the intervals of distance thresholds [d\ :sub:`k`, d\ :sub:`k+1`).
Values of the abstract properties
* **_unique_id** = "argd"
* **_all_possible_relations** = depends on what user has passed
* **_dtype** = "points"
QSR specific `dynamic_args`
* **'qsr_relations_and_values'**: A dictionary with keys being the relations labels and values the distance thresholds as an int or a float.
.. seealso:: For further details, refer to its :doc:`description. <../handwritten/qsrs/argd>`
"""
_unique_id = "argd"
"""str: Unique identifier of the QSR."""
_all_possible_relations = ()
"""tuple: All possible relations of the QSR."""
_dtype = "points"
"""str: Kind of data the QSR operates with, see self._dtype_map for possible values."""
def __init__(self):
"""Constructor."""
super(QSR_Arg_Relations_Distance, self).__init__()
# todo: should be private/protected
self.allowed_value_types = (int, float)
"""tuple: distance thresholds can only be int or float"""
# todo: should be private/protected
self.value_sort_key = operator.itemgetter(1)
"""operator.itemgetter: Sort keys/values by threshold value."""
def _process_qsr_parameters_from_request_parameters(self, req_params, **kwargs):
"""
:param req_params:
:type req_params:
:param kwargs: kwargs arguments.
:raises: KeyError
"""
try:
self._set_qsr_relations_and_values(qsr_relations_and_values=req_params["dynamic_args"][self._unique_id]["qsr_relations_and_values"])
except KeyError:
raise KeyError("qsr_relations_and_values not set")
def _compute_qsr(self, data1, data2, qsr_params, **kwargs):
"""
:param data1:
:type data1:
:param data2:
:type data2:
:param qsr_params:
:type qsr_params:
:param kwargs: kwargs arguments.
:return: argd relation.
:rtype: str
"""
if np.isnan(data1.z) or np.isnan(data2.z):
d = np.sqrt(np.square(data1.x - data2.x) + np.square(data1.y - data2.y))
else:
d = np.sqrt(np.square(data1.x - data2.x) + np.square(data1.y - data2.y) + np.square(data1.z - data2.z))
for thres, relation in zip(self.all_possible_values, self._all_possible_relations):
if d <= thres:
return relation
return self._all_possible_relations[-1] | qsrlib/src/qsrlib_qsrs/qsr_arg_relations_distance.py | from __future__ import print_function, division
import numpy as np
import operator
from qsrlib_qsrs.qsr_arg_relations_abstractclass import QSR_Arg_Relations_Abstractclass
from qsrlib_io.world_qsr_trace import *
class QSR_Arg_Relations_Distance(QSR_Arg_Relations_Abstractclass):
"""Argument distance relations.
.. note:: The relations are defined on the intervals of distance thresholds [d\ :sub:`k`, d\ :sub:`k+1`).
Values of the abstract properties
* **_unique_id** = "argd"
* **_all_possible_relations** = depends on what user has passed
* **_dtype** = "points"
QSR specific `dynamic_args`
* **'qsr_relations_and_values'**: A dictionary with keys being the relations labels and values the distance thresholds as an int or a float.
.. seealso:: For further details, refer to its :doc:`description. <../handwritten/qsrs/argd>`
"""
_unique_id = "argd"
"""str: Unique identifier of the QSR."""
_all_possible_relations = ()
"""tuple: All possible relations of the QSR."""
_dtype = "points"
"""str: Kind of data the QSR operates with, see self._dtype_map for possible values."""
def __init__(self):
"""Constructor."""
super(QSR_Arg_Relations_Distance, self).__init__()
# todo: should be private/protected
self.allowed_value_types = (int, float)
"""tuple: distance thresholds can only be int or float"""
# todo: should be private/protected
self.value_sort_key = operator.itemgetter(1)
"""operator.itemgetter: Sort keys/values by threshold value."""
def _process_qsr_parameters_from_request_parameters(self, req_params, **kwargs):
"""
:param req_params:
:type req_params:
:param kwargs: kwargs arguments.
:raises: KeyError
"""
try:
self._set_qsr_relations_and_values(qsr_relations_and_values=req_params["dynamic_args"][self._unique_id]["qsr_relations_and_values"])
except KeyError:
raise KeyError("qsr_relations_and_values not set")
def _compute_qsr(self, data1, data2, qsr_params, **kwargs):
"""
:param data1:
:type data1:
:param data2:
:type data2:
:param qsr_params:
:type qsr_params:
:param kwargs: kwargs arguments.
:return: argd relation.
:rtype: str
"""
if np.isnan(data1.z) or np.isnan(data2.z):
d = np.sqrt(np.square(data1.x - data2.x) + np.square(data1.y - data2.y))
else:
d = np.sqrt(np.square(data1.x - data2.x) + np.square(data1.y - data2.y) + np.square(data1.z - data2.z))
for thres, relation in zip(self.all_possible_values, self._all_possible_relations):
if d <= thres:
return relation
return self._all_possible_relations[-1] | 0.697609 | 0.408778 |
from mayavi import mlab
from BDSpace import Space
from BDSpace.Figure.Sphere import *
from BDSpace.Curve.Parametric import Arc
from BDSpace.Coordinates import Cartesian
import BDSpaceVis as Visual
solar_system = Space('Solar System')
sun = Sphere('Sun', r_outer=0.2)
mercury = Space('Mercury', Cartesian(origin=[0.5, 0.5, 0.5]))
venus = Space('Venus', Cartesian(origin=[1, 1, 1]))
earth_orbit = Arc(name='Earth orbit', a=1, b=2, start=0, stop=2*np.pi, right=True)
earth = Sphere(name='Earth', coordinate_system=Cartesian(origin=[1.5, 1.5, 1.5]), r_outer=0.02)
mars = Space('Mars', Cartesian(origin=[2, 2, 2]))
solar_system.add_element(sun)
#solar_system.add_element(mercury)
#solar_system.add_element(venus)
solar_system.add_element(earth_orbit)
solar_system.add_element(earth)
#solar_system.add_element(mars)
#moon = SphericalCone('Moon', Cartesian(origin=[0.2, 0.2, 0.2]),
# r_inner=0.5, r_outer=1.0, theta=np.pi/6)
#earth.add_element(moon)
#lunohod = SphericalWedge('Lunohod', Cartesian(origin=[0.1, 0.1, 0.1]),
# r_inner=0.05, r_outer=0.1, phi=np.pi/2, theta=np.pi/2)
#lunohod = SphericalSection('Lunohod', Cartesian(origin=[0.1, 0.1, 0.1]),
# r_inner=0.05, r_outer=0.1, h1=0.01, h2=0.03)
#moon.add_element(lunohod)
#phobos = Space('Phobos', Cartesian(origin=[0.2, 0.2, 0.2]))
#deimos = Space('Deimos', Cartesian(origin=[-0.2, 0, 0]))
#mars.add_element(phobos)
#mars.add_element(deimos)
fig = mlab.figure('CS demo', bgcolor=(0.5, 0.5, 0.5)) # Create the mayavi figure
views = Visual.gen_space_views(fig, solar_system)
views['Solar System'].set_cs_visible(False)
views['Sun'].set_cs_visible(False)
views['Earth'].set_cs_visible(True)
views['Sun'].set_color((1.0, 1.0, 0.2))
views['Earth'].set_color((0.0, 0.0, 0.5))
Visual.draw_space(views)
#views['Lunohod'].set_wireframe(True)
#views['Moon'].set_wireframe(True)
#views['Moon'].set_cs_visible(True)
@mlab.show
@mlab.animate(delay=100)
def anim():
while True:
Visual.draw_space(views)
earth.coordinate_system.rotate_axis_angle(np.array([1, 1, 1], dtype=np.double), np.deg2rad(1))
#moon.coordinate_system.rotate_axis_angle(np.array([0, 0, 1], dtype=np.double), np.deg2rad(1))
yield
anim() | demo/05_space_visualization_planets.py | from mayavi import mlab
from BDSpace import Space
from BDSpace.Figure.Sphere import *
from BDSpace.Curve.Parametric import Arc
from BDSpace.Coordinates import Cartesian
import BDSpaceVis as Visual
solar_system = Space('Solar System')
sun = Sphere('Sun', r_outer=0.2)
mercury = Space('Mercury', Cartesian(origin=[0.5, 0.5, 0.5]))
venus = Space('Venus', Cartesian(origin=[1, 1, 1]))
earth_orbit = Arc(name='Earth orbit', a=1, b=2, start=0, stop=2*np.pi, right=True)
earth = Sphere(name='Earth', coordinate_system=Cartesian(origin=[1.5, 1.5, 1.5]), r_outer=0.02)
mars = Space('Mars', Cartesian(origin=[2, 2, 2]))
solar_system.add_element(sun)
#solar_system.add_element(mercury)
#solar_system.add_element(venus)
solar_system.add_element(earth_orbit)
solar_system.add_element(earth)
#solar_system.add_element(mars)
#moon = SphericalCone('Moon', Cartesian(origin=[0.2, 0.2, 0.2]),
# r_inner=0.5, r_outer=1.0, theta=np.pi/6)
#earth.add_element(moon)
#lunohod = SphericalWedge('Lunohod', Cartesian(origin=[0.1, 0.1, 0.1]),
# r_inner=0.05, r_outer=0.1, phi=np.pi/2, theta=np.pi/2)
#lunohod = SphericalSection('Lunohod', Cartesian(origin=[0.1, 0.1, 0.1]),
# r_inner=0.05, r_outer=0.1, h1=0.01, h2=0.03)
#moon.add_element(lunohod)
#phobos = Space('Phobos', Cartesian(origin=[0.2, 0.2, 0.2]))
#deimos = Space('Deimos', Cartesian(origin=[-0.2, 0, 0]))
#mars.add_element(phobos)
#mars.add_element(deimos)
fig = mlab.figure('CS demo', bgcolor=(0.5, 0.5, 0.5)) # Create the mayavi figure
views = Visual.gen_space_views(fig, solar_system)
views['Solar System'].set_cs_visible(False)
views['Sun'].set_cs_visible(False)
views['Earth'].set_cs_visible(True)
views['Sun'].set_color((1.0, 1.0, 0.2))
views['Earth'].set_color((0.0, 0.0, 0.5))
Visual.draw_space(views)
#views['Lunohod'].set_wireframe(True)
#views['Moon'].set_wireframe(True)
#views['Moon'].set_cs_visible(True)
@mlab.show
@mlab.animate(delay=100)
def anim():
while True:
Visual.draw_space(views)
earth.coordinate_system.rotate_axis_angle(np.array([1, 1, 1], dtype=np.double), np.deg2rad(1))
#moon.coordinate_system.rotate_axis_angle(np.array([0, 0, 1], dtype=np.double), np.deg2rad(1))
yield
anim() | 0.546738 | 0.529203 |
import time
from core.utils.const import SQLTuning
import logging
from core.utils.engines import get_engine
from rest_framework.response import Response
from core.utils.sql_utils import extract_tables
CUSTOM_ERROR = logging.getLogger('SqlManager.core.sql_tuning')
import re
class SqlTuning(object):
def __init__(self, instance, db_name, sqltext):
query_engine = get_engine(instance=instance)
self.engine = query_engine
self.db_name = db_name
self.sqltext = sqltext
self.sql_variable = '''
select
lower(variable_name) variable_name,
variable_value
from performance_schema.global_variables
where upper(variable_name) in ('%s')
order by variable_name;''' % ('\',\''.join(SQLTuning.SYS_PARM_FILTER))
self.sql_optimizer_switch = '''
select variable_value
from performance_schema.global_variables
where upper(variable_name) = 'OPTIMIZER_SWITCH';
'''
self.sql_table_info = '''
select
table_name,
engine,
row_format as format,
table_rows,
avg_row_length as avg_row,
round((data_length + index_length) / 1024 / 1024, 2) as total_mb,
round((data_length) / 1024 / 1024, 2) as data_mb,
round((index_length) / 1024 / 1024, 2) as index_mb
from information_schema.tables
where table_schema = '%s' and table_name = '%s'
'''
self.sql_table_index = '''
select
table_name,
index_name,
non_unique,
seq_in_index,
column_name,
collation,
cardinality,
nullable,
index_type
from information_schema.statistics
where table_schema = '%s' and table_name = '%s'
order by 1, 3;
'''
def is_number(self, num):
pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$')
result = pattern.match(num)
if result:
return True
else:
return False
def __extract_tables(self):
"""获取sql语句中的表名"""
return [i['name'].strip('`') for i in extract_tables(self.sqltext)]
def basic_information(self):
return self.engine.query(sql="select @@version", close_conn=False).to_sep_dict()
def sys_parameter(self):
# 获取mysql版本信息
server_version = self.engine.server_version
if server_version < (5, 7, 0):
sql = self.sql_variable.replace('performance_schema', 'information_schema')
else:
sql = self.sql_variable
return self.engine.query2(sql=sql, close_conn=False,cursorclass='pymysql.cursors.DictCursor')
def optimizer_switch(self):
# 获取mysql版本信息
server_version = self.engine.server_version
if server_version < (5, 7, 0):
sql = self.sql_optimizer_switch.replace('performance_schema', 'information_schema')
else:
sql = self.sql_optimizer_switch
aa = self.engine.query(sql=sql, close_conn=True)
res = []
a = aa.rows[0][0].split(',')
for v in a:
b = v.split('=')
dic = {'v_name':b[0],'v_value':b[1]}
res.append(dic)
return res
def sqlplan(self):
plan = self.engine.query2(db_name=self.db_name, sql="explain " + self.sqltext, close_conn=False,cursorclass='pymysql.cursors.DictCursor')
optimizer_rewrite_sql = self.engine.query(sql="show warnings", close_conn=False).to_sep_dict()
return plan, optimizer_rewrite_sql
# 获取关联表信息存在缺陷,只能获取到一张表
def object_statistics(self):
object_statistics = []
for index, table_name in enumerate(self.__extract_tables()):
object_statistics.append({
"structure": self.engine.query(
db_name=self.db_name, sql=f"show create table `{table_name}`;",
close_conn=False).to_sep_dict(),
"table_info": self.engine.query(
sql=self.sql_table_info % (self.db_name, table_name),
close_conn=False).to_sep_dict(),
"index_info": self.engine.query(
sql=self.sql_table_index % (self.db_name, table_name),
close_conn=False).to_sep_dict()
})
return object_statistics
def exec_sql(self):
result = {"EXECUTE_TIME": 0,
"session": {},
"PROFILING_DETAIL": {},
"PROFILING_SUMMARY": {'column_list': [], 'rows': []}
}
sql_profiling = """select concat(upper(left(variable_name,1)),
substring(lower(variable_name),
2,
(length(variable_name)-1))) var_name,
variable_value var_value
from performance_schema.session_status order by 1"""
# 获取mysql版本信息
server_version = self.engine.server_version
if server_version < (5, 7, 0):
sql = sql_profiling.replace('performance_schema', 'information_schema')
else:
sql = sql_profiling
self.engine.query(sql="set profiling=1", close_conn=False).to_sep_dict()
records = self.engine.query(sql="select ifnull(max(query_id),0) from INFORMATION_SCHEMA.PROFILING",
close_conn=False).to_sep_dict()
query_id = records['rows'][0][0] + 3 # skip next sql
# 获取执行前信息
bb = self.engine.query(sql=sql, close_conn=False).to_sep_dict()
# 执行查询语句,统计执行时间
t_start = time.time()
self.engine.query(sql=self.sqltext, close_conn=False).to_sep_dict()
t_end = time.time()
cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
result['EXECUTE_TIME'] = cost_time
# 获取执行后信息
aa = self.engine.query(sql=sql, close_conn=False).to_sep_dict()
# 获取PROFILING_DETAIL信息
result['PROFILING_DETAIL'] = self.engine.query2(
sql="select STATE,DURATION,CPU_USER,CPU_SYSTEM,BLOCK_OPS_IN,BLOCK_OPS_OUT ,MESSAGES_SENT ,MESSAGES_RECEIVED ,PAGE_FAULTS_MAJOR ,PAGE_FAULTS_MINOR ,SWAPS from INFORMATION_SCHEMA.PROFILING where query_id=" + str(
query_id) + " order by seq", close_conn=False,cursorclass='pymysql.cursors.DictCursor')
result['PROFILING_SUMMARY'] = self.engine.query2(
sql="SELECT STATE,SUM(DURATION) AS Total_R,ROUND(100*SUM(DURATION)/(SELECT SUM(DURATION) FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID=" + str(
query_id) + "),2) AS Pct_R,COUNT(*) AS Calls,SUM(DURATION)/COUNT(*) AS R_Call FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID=" + str(
query_id) + " GROUP BY STATE ORDER BY Total_R DESC", close_conn=False,cursorclass='pymysql.cursors.DictCursor')
# 处理执行前后对比信息
session = []
b = bb['rows']
a = aa['rows']
len = b.__len__()
for i in range (len):
dict = {'v_name': '', 'bef': '', 'aft': '', 'diff': ''}
dict['v_name'] = a[i][0]
dict['aft'] = a[i][1] if a[i][1] else 0
dict['bef'] = b[i][1] if b[i][1] else 0
if a[i][1] is not '' and self.is_number(str(dict['aft'])):
dict['diff'] = float(float(a[i][1]) - float(b[i][1]))
session.append(dict)
result['session'] = session
return result
def opt_parameter(self):
# 获取mysql执行计划参数
server_version = self.engine.server_version
if server_version < (5, 7, 0):
sql = self.sql_optimizer_switch.replace('performance_schema', 'information_schema')
else:
sql = self.sql_optimizer_switch
res = []
a = self.get_connection.search(sql=sql)['data'][0]['v_name'].split(',')
for v in a:
b = v.split('=')
dic = {'v_name':b[0],'v_value':b[1]}
res.append(dic)
return res
class Instance(object):
def __init__(self, instance):
self.instance_name = instance['instance_name']
self.db_type = instance['db_type']
self.host = instance['host']
self.port = instance['port']
self.user = instance['user']
self.password = instance['password']
self.charset = instance['charset'] | src/core/api/sql_tuning.py |
import time
from core.utils.const import SQLTuning
import logging
from core.utils.engines import get_engine
from rest_framework.response import Response
from core.utils.sql_utils import extract_tables
CUSTOM_ERROR = logging.getLogger('SqlManager.core.sql_tuning')
import re
class SqlTuning(object):
def __init__(self, instance, db_name, sqltext):
query_engine = get_engine(instance=instance)
self.engine = query_engine
self.db_name = db_name
self.sqltext = sqltext
self.sql_variable = '''
select
lower(variable_name) variable_name,
variable_value
from performance_schema.global_variables
where upper(variable_name) in ('%s')
order by variable_name;''' % ('\',\''.join(SQLTuning.SYS_PARM_FILTER))
self.sql_optimizer_switch = '''
select variable_value
from performance_schema.global_variables
where upper(variable_name) = 'OPTIMIZER_SWITCH';
'''
self.sql_table_info = '''
select
table_name,
engine,
row_format as format,
table_rows,
avg_row_length as avg_row,
round((data_length + index_length) / 1024 / 1024, 2) as total_mb,
round((data_length) / 1024 / 1024, 2) as data_mb,
round((index_length) / 1024 / 1024, 2) as index_mb
from information_schema.tables
where table_schema = '%s' and table_name = '%s'
'''
self.sql_table_index = '''
select
table_name,
index_name,
non_unique,
seq_in_index,
column_name,
collation,
cardinality,
nullable,
index_type
from information_schema.statistics
where table_schema = '%s' and table_name = '%s'
order by 1, 3;
'''
def is_number(self, num):
pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$')
result = pattern.match(num)
if result:
return True
else:
return False
def __extract_tables(self):
"""获取sql语句中的表名"""
return [i['name'].strip('`') for i in extract_tables(self.sqltext)]
def basic_information(self):
return self.engine.query(sql="select @@version", close_conn=False).to_sep_dict()
def sys_parameter(self):
# 获取mysql版本信息
server_version = self.engine.server_version
if server_version < (5, 7, 0):
sql = self.sql_variable.replace('performance_schema', 'information_schema')
else:
sql = self.sql_variable
return self.engine.query2(sql=sql, close_conn=False,cursorclass='pymysql.cursors.DictCursor')
def optimizer_switch(self):
# 获取mysql版本信息
server_version = self.engine.server_version
if server_version < (5, 7, 0):
sql = self.sql_optimizer_switch.replace('performance_schema', 'information_schema')
else:
sql = self.sql_optimizer_switch
aa = self.engine.query(sql=sql, close_conn=True)
res = []
a = aa.rows[0][0].split(',')
for v in a:
b = v.split('=')
dic = {'v_name':b[0],'v_value':b[1]}
res.append(dic)
return res
def sqlplan(self):
plan = self.engine.query2(db_name=self.db_name, sql="explain " + self.sqltext, close_conn=False,cursorclass='pymysql.cursors.DictCursor')
optimizer_rewrite_sql = self.engine.query(sql="show warnings", close_conn=False).to_sep_dict()
return plan, optimizer_rewrite_sql
# 获取关联表信息存在缺陷,只能获取到一张表
def object_statistics(self):
object_statistics = []
for index, table_name in enumerate(self.__extract_tables()):
object_statistics.append({
"structure": self.engine.query(
db_name=self.db_name, sql=f"show create table `{table_name}`;",
close_conn=False).to_sep_dict(),
"table_info": self.engine.query(
sql=self.sql_table_info % (self.db_name, table_name),
close_conn=False).to_sep_dict(),
"index_info": self.engine.query(
sql=self.sql_table_index % (self.db_name, table_name),
close_conn=False).to_sep_dict()
})
return object_statistics
def exec_sql(self):
result = {"EXECUTE_TIME": 0,
"session": {},
"PROFILING_DETAIL": {},
"PROFILING_SUMMARY": {'column_list': [], 'rows': []}
}
sql_profiling = """select concat(upper(left(variable_name,1)),
substring(lower(variable_name),
2,
(length(variable_name)-1))) var_name,
variable_value var_value
from performance_schema.session_status order by 1"""
# 获取mysql版本信息
server_version = self.engine.server_version
if server_version < (5, 7, 0):
sql = sql_profiling.replace('performance_schema', 'information_schema')
else:
sql = sql_profiling
self.engine.query(sql="set profiling=1", close_conn=False).to_sep_dict()
records = self.engine.query(sql="select ifnull(max(query_id),0) from INFORMATION_SCHEMA.PROFILING",
close_conn=False).to_sep_dict()
query_id = records['rows'][0][0] + 3 # skip next sql
# 获取执行前信息
bb = self.engine.query(sql=sql, close_conn=False).to_sep_dict()
# 执行查询语句,统计执行时间
t_start = time.time()
self.engine.query(sql=self.sqltext, close_conn=False).to_sep_dict()
t_end = time.time()
cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
result['EXECUTE_TIME'] = cost_time
# 获取执行后信息
aa = self.engine.query(sql=sql, close_conn=False).to_sep_dict()
# 获取PROFILING_DETAIL信息
result['PROFILING_DETAIL'] = self.engine.query2(
sql="select STATE,DURATION,CPU_USER,CPU_SYSTEM,BLOCK_OPS_IN,BLOCK_OPS_OUT ,MESSAGES_SENT ,MESSAGES_RECEIVED ,PAGE_FAULTS_MAJOR ,PAGE_FAULTS_MINOR ,SWAPS from INFORMATION_SCHEMA.PROFILING where query_id=" + str(
query_id) + " order by seq", close_conn=False,cursorclass='pymysql.cursors.DictCursor')
result['PROFILING_SUMMARY'] = self.engine.query2(
sql="SELECT STATE,SUM(DURATION) AS Total_R,ROUND(100*SUM(DURATION)/(SELECT SUM(DURATION) FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID=" + str(
query_id) + "),2) AS Pct_R,COUNT(*) AS Calls,SUM(DURATION)/COUNT(*) AS R_Call FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID=" + str(
query_id) + " GROUP BY STATE ORDER BY Total_R DESC", close_conn=False,cursorclass='pymysql.cursors.DictCursor')
# 处理执行前后对比信息
session = []
b = bb['rows']
a = aa['rows']
len = b.__len__()
for i in range (len):
dict = {'v_name': '', 'bef': '', 'aft': '', 'diff': ''}
dict['v_name'] = a[i][0]
dict['aft'] = a[i][1] if a[i][1] else 0
dict['bef'] = b[i][1] if b[i][1] else 0
if a[i][1] is not '' and self.is_number(str(dict['aft'])):
dict['diff'] = float(float(a[i][1]) - float(b[i][1]))
session.append(dict)
result['session'] = session
return result
def opt_parameter(self):
# 获取mysql执行计划参数
server_version = self.engine.server_version
if server_version < (5, 7, 0):
sql = self.sql_optimizer_switch.replace('performance_schema', 'information_schema')
else:
sql = self.sql_optimizer_switch
res = []
a = self.get_connection.search(sql=sql)['data'][0]['v_name'].split(',')
for v in a:
b = v.split('=')
dic = {'v_name':b[0],'v_value':b[1]}
res.append(dic)
return res
class Instance(object):
def __init__(self, instance):
self.instance_name = instance['instance_name']
self.db_type = instance['db_type']
self.host = instance['host']
self.port = instance['port']
self.user = instance['user']
self.password = instance['password']
self.charset = instance['charset'] | 0.395835 | 0.121816 |
import os
import logging
from distutils import log
from distutils.util import byte_compile
from distutils.dir_util import remove_tree, mkpath, copy_tree
from distutils.file_util import copy_file
from distutils.sysconfig import get_python_version
from distutils.command.bdist import bdist
from . import COMMON_USER_OPTIONS, VERSION_TEXT, EDITION, LOGGER
from .utils import add_docs, write_info_src, write_info_bin
class DistBinary(bdist):
"""Create a generic binary distribution.
DistBinary is meant to replace distutils.bdist.
"""
description = "create a built (binary) distribution"
user_options = COMMON_USER_OPTIONS + [
("bdist-dir=", "d",
"temporary directory for creating the distribution"),
("dist-dir=", "d",
"directory to put final built distributions in"),
]
boolean_options = ["debug", "byte-code-only", "keep-temp"]
log = LOGGER
def initialize_options(self):
"""Initialize the options."""
bdist.initialize_options(self)
self.bdist_dir = None
self.byte_code_only = False
self.label = None
self.edition = EDITION
self.debug = False
self.keep_temp = False
def finalize_options(self):
"""Finalize the options."""
bdist.finalize_options(self)
def _get_fullname():
label = "-{}".format(self.label) if self.label else ""
python_version = "-py{}".format(get_python_version()) \
if self.byte_code_only else ""
return "{name}{label}-{version}{edition}{pyver}".format(
name=self.distribution.get_name(),
label=label,
version=self.distribution.get_version(),
edition=self.edition or "",
pyver=python_version)
self.distribution.get_fullname = _get_fullname
if self.bdist_dir is None:
self.bdist_dir = os.path.join(self.dist_dir,
"bdist.{}".format(self.plat_name))
if self.debug:
self.log.setLevel(logging.DEBUG)
log.set_threshold(1) # Set Distutils logging level to DEBUG
def _remove_sources(self):
"""Remove Python source files from the build directory."""
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(base, filename)
self.log.info("Removing source '%s'", filepath)
os.unlink(filepath)
def _copy_from_pycache(self, start_dir):
"""Copy .py files from __pycache__."""
for base, dirs, files in os.walk(start_dir):
for filename in files:
if filename.endswith(".pyc"):
filepath = os.path.join(base, filename)
new_name = "{}.pyc".format(filename.split(".")[0])
os.rename(filepath, os.path.join(base, "..", new_name))
for base, dirs, files in os.walk(start_dir):
if base.endswith("__pycache__"):
os.rmdir(base)
def run(self):
"""Run the command."""
self.log.info("Installing library code to %s", self.bdist_dir)
self.log.info("Generating INFO_SRC and INFO_BIN files")
write_info_src(VERSION_TEXT)
write_info_bin()
dist_name = self.distribution.get_fullname()
self.dist_target = os.path.join(self.dist_dir, dist_name)
self.log.info("Distribution will be available as '%s'",
self.dist_target)
# build command: just to get the build_base
cmdbuild = self.get_finalized_command("build")
self.build_base = cmdbuild.build_base
# install command
install = self.reinitialize_command("install_lib",
reinit_subcommands=1)
install.compile = False
install.warn_dir = 0
install.install_dir = self.bdist_dir
self.log.info("Installing to %s", self.bdist_dir)
self.run_command("install_lib")
# install_egg_info command
cmd_egginfo = self.get_finalized_command("install_egg_info")
cmd_egginfo.install_dir = self.bdist_dir
self.run_command("install_egg_info")
installed_files = install.get_outputs()
# compile and remove sources
if self.byte_code_only:
byte_compile(installed_files, optimize=0, force=True,
prefix=install.install_dir)
self._remove_sources()
if get_python_version().startswith('3'):
self.log.info("Copying byte code from __pycache__")
self._copy_from_pycache(os.path.join(self.bdist_dir, "mysql"))
self._copy_from_pycache(os.path.join(self.bdist_dir, "mysqlx"))
# create distribution
info_files = [
("README.txt", "README.txt"),
("LICENSE.txt", "LICENSE.txt"),
("README.rst", "README.rst"),
("CONTRIBUTING.rst", "CONTRIBUTING.rst"),
("docs/INFO_SRC", "INFO_SRC"),
("docs/INFO_BIN", "INFO_BIN"),
]
copy_tree(self.bdist_dir, self.dist_target)
mkpath(os.path.join(self.dist_target))
for src, dst in info_files:
if dst is None:
dest_name, _ = copy_file(src, self.dist_target)
else:
dest_name, _ = copy_file(src,
os.path.join(self.dist_target, dst))
add_docs(os.path.join(self.dist_target, "docs"))
if not self.keep_temp:
remove_tree(self.build_base, dry_run=self.dry_run) | cpydist/bdist.py | import os
import logging
from distutils import log
from distutils.util import byte_compile
from distutils.dir_util import remove_tree, mkpath, copy_tree
from distutils.file_util import copy_file
from distutils.sysconfig import get_python_version
from distutils.command.bdist import bdist
from . import COMMON_USER_OPTIONS, VERSION_TEXT, EDITION, LOGGER
from .utils import add_docs, write_info_src, write_info_bin
class DistBinary(bdist):
"""Create a generic binary distribution.
DistBinary is meant to replace distutils.bdist.
"""
description = "create a built (binary) distribution"
user_options = COMMON_USER_OPTIONS + [
("bdist-dir=", "d",
"temporary directory for creating the distribution"),
("dist-dir=", "d",
"directory to put final built distributions in"),
]
boolean_options = ["debug", "byte-code-only", "keep-temp"]
log = LOGGER
def initialize_options(self):
"""Initialize the options."""
bdist.initialize_options(self)
self.bdist_dir = None
self.byte_code_only = False
self.label = None
self.edition = EDITION
self.debug = False
self.keep_temp = False
def finalize_options(self):
"""Finalize the options."""
bdist.finalize_options(self)
def _get_fullname():
label = "-{}".format(self.label) if self.label else ""
python_version = "-py{}".format(get_python_version()) \
if self.byte_code_only else ""
return "{name}{label}-{version}{edition}{pyver}".format(
name=self.distribution.get_name(),
label=label,
version=self.distribution.get_version(),
edition=self.edition or "",
pyver=python_version)
self.distribution.get_fullname = _get_fullname
if self.bdist_dir is None:
self.bdist_dir = os.path.join(self.dist_dir,
"bdist.{}".format(self.plat_name))
if self.debug:
self.log.setLevel(logging.DEBUG)
log.set_threshold(1) # Set Distutils logging level to DEBUG
def _remove_sources(self):
"""Remove Python source files from the build directory."""
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(base, filename)
self.log.info("Removing source '%s'", filepath)
os.unlink(filepath)
def _copy_from_pycache(self, start_dir):
"""Copy .py files from __pycache__."""
for base, dirs, files in os.walk(start_dir):
for filename in files:
if filename.endswith(".pyc"):
filepath = os.path.join(base, filename)
new_name = "{}.pyc".format(filename.split(".")[0])
os.rename(filepath, os.path.join(base, "..", new_name))
for base, dirs, files in os.walk(start_dir):
if base.endswith("__pycache__"):
os.rmdir(base)
def run(self):
"""Run the command."""
self.log.info("Installing library code to %s", self.bdist_dir)
self.log.info("Generating INFO_SRC and INFO_BIN files")
write_info_src(VERSION_TEXT)
write_info_bin()
dist_name = self.distribution.get_fullname()
self.dist_target = os.path.join(self.dist_dir, dist_name)
self.log.info("Distribution will be available as '%s'",
self.dist_target)
# build command: just to get the build_base
cmdbuild = self.get_finalized_command("build")
self.build_base = cmdbuild.build_base
# install command
install = self.reinitialize_command("install_lib",
reinit_subcommands=1)
install.compile = False
install.warn_dir = 0
install.install_dir = self.bdist_dir
self.log.info("Installing to %s", self.bdist_dir)
self.run_command("install_lib")
# install_egg_info command
cmd_egginfo = self.get_finalized_command("install_egg_info")
cmd_egginfo.install_dir = self.bdist_dir
self.run_command("install_egg_info")
installed_files = install.get_outputs()
# compile and remove sources
if self.byte_code_only:
byte_compile(installed_files, optimize=0, force=True,
prefix=install.install_dir)
self._remove_sources()
if get_python_version().startswith('3'):
self.log.info("Copying byte code from __pycache__")
self._copy_from_pycache(os.path.join(self.bdist_dir, "mysql"))
self._copy_from_pycache(os.path.join(self.bdist_dir, "mysqlx"))
# create distribution
info_files = [
("README.txt", "README.txt"),
("LICENSE.txt", "LICENSE.txt"),
("README.rst", "README.rst"),
("CONTRIBUTING.rst", "CONTRIBUTING.rst"),
("docs/INFO_SRC", "INFO_SRC"),
("docs/INFO_BIN", "INFO_BIN"),
]
copy_tree(self.bdist_dir, self.dist_target)
mkpath(os.path.join(self.dist_target))
for src, dst in info_files:
if dst is None:
dest_name, _ = copy_file(src, self.dist_target)
else:
dest_name, _ = copy_file(src,
os.path.join(self.dist_target, dst))
add_docs(os.path.join(self.dist_target, "docs"))
if not self.keep_temp:
remove_tree(self.build_base, dry_run=self.dry_run) | 0.499756 | 0.106365 |
from django.conf.urls import url
from . import views
app_name = "containertemplates"
urlpatterns = [
url(
regex=r"^site$",
view=views.ContainerTemplateSiteListView.as_view(),
name="site-list",
),
url(
regex=r"^site/detail/(?P<containertemplatesite>[0-9a-f-]+)$",
view=views.ContainerTemplateSiteDetailView.as_view(),
name="site-detail",
),
url(
regex=r"^site/create$",
view=views.ContainerTemplateSiteCreateView.as_view(),
name="site-create",
),
url(
regex=r"^site/update/(?P<containertemplatesite>[0-9a-f-]+)$",
view=views.ContainerTemplateSiteUpdateView.as_view(),
name="site-update",
),
url(
regex=r"^site/delete/(?P<containertemplatesite>[0-9a-f-]+)$",
view=views.ContainerTemplateSiteDeleteView.as_view(),
name="site-delete",
),
url(
regex=r"^site/duplicate/(?P<containertemplatesite>[0-9a-f-]+)$",
view=views.ContainerTemplateSiteDuplicateView.as_view(),
name="site-duplicate",
),
url(
regex=r"^project/(?P<project>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectListView.as_view(),
name="project-list",
),
url(
regex=r"^project/detail/(?P<containertemplateproject>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectDetailView.as_view(),
name="project-detail",
),
url(
regex=r"^project/create/(?P<project>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectCreateView.as_view(),
name="project-create",
),
url(
regex=r"^project/update/(?P<containertemplateproject>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectUpdateView.as_view(),
name="project-update",
),
url(
regex=r"^project/delete/(?P<containertemplateproject>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectDeleteView.as_view(),
name="project-delete",
),
url(
regex=r"^project/duplicate/(?P<containertemplateproject>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectDuplicateView.as_view(),
name="project-duplicate",
),
url(
regex=r"^project/copy/(?P<project>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectCopyView.as_view(),
name="project-copy",
),
# Ajax views
url(
regex=r"^ajax/get-containertemplate$",
view=views.ContainerTemplateSelectorApiView.as_view(),
name="ajax-get-containertemplate",
),
] | containertemplates/urls.py | from django.conf.urls import url
from . import views
app_name = "containertemplates"
urlpatterns = [
url(
regex=r"^site$",
view=views.ContainerTemplateSiteListView.as_view(),
name="site-list",
),
url(
regex=r"^site/detail/(?P<containertemplatesite>[0-9a-f-]+)$",
view=views.ContainerTemplateSiteDetailView.as_view(),
name="site-detail",
),
url(
regex=r"^site/create$",
view=views.ContainerTemplateSiteCreateView.as_view(),
name="site-create",
),
url(
regex=r"^site/update/(?P<containertemplatesite>[0-9a-f-]+)$",
view=views.ContainerTemplateSiteUpdateView.as_view(),
name="site-update",
),
url(
regex=r"^site/delete/(?P<containertemplatesite>[0-9a-f-]+)$",
view=views.ContainerTemplateSiteDeleteView.as_view(),
name="site-delete",
),
url(
regex=r"^site/duplicate/(?P<containertemplatesite>[0-9a-f-]+)$",
view=views.ContainerTemplateSiteDuplicateView.as_view(),
name="site-duplicate",
),
url(
regex=r"^project/(?P<project>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectListView.as_view(),
name="project-list",
),
url(
regex=r"^project/detail/(?P<containertemplateproject>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectDetailView.as_view(),
name="project-detail",
),
url(
regex=r"^project/create/(?P<project>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectCreateView.as_view(),
name="project-create",
),
url(
regex=r"^project/update/(?P<containertemplateproject>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectUpdateView.as_view(),
name="project-update",
),
url(
regex=r"^project/delete/(?P<containertemplateproject>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectDeleteView.as_view(),
name="project-delete",
),
url(
regex=r"^project/duplicate/(?P<containertemplateproject>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectDuplicateView.as_view(),
name="project-duplicate",
),
url(
regex=r"^project/copy/(?P<project>[0-9a-f-]+)$",
view=views.ContainerTemplateProjectCopyView.as_view(),
name="project-copy",
),
# Ajax views
url(
regex=r"^ajax/get-containertemplate$",
view=views.ContainerTemplateSelectorApiView.as_view(),
name="ajax-get-containertemplate",
),
] | 0.336222 | 0.192805 |
import re
import logging
from typing import TextIO
from typing import Optional
from typing import Sequence, Iterator, List, Dict
from gffpal.parsers.parsers import ParseError, LineParseError
from gffpal.parsers.parsers import MULTISPACE_REGEX
from gffpal.parsers.parsers import (
parse_int,
parse_float,
parse_string_not_empty
)
logger = logging.getLogger(__name__)
class TRNAScanRecord(object):
column_order = [
"seqid",
"num",
"start",
"end",
"trna_type",
"anticodon",
"intron_starts",
"intron_ends",
"infernal_score",
"note",
]
def __init__(
self,
seqid: str,
start: int,
end: int,
trna_type: str,
anticodon: str,
num: Optional[int],
intron_starts: Sequence[int],
intron_ends: Sequence[int],
infernal_score: Optional[float],
note: Optional[str],
) -> None:
self.seqid = seqid
self.start = start
self.end = end
self.trna_type = trna_type
self.anticodon = anticodon
self.num = num
self.intron_starts = intron_starts
self.intron_ends = intron_ends
self.infernal_score = infernal_score
self.note = note
return
@classmethod
def from_line(cls, line: str) -> "TRNAScanRecord":
sline = MULTISPACE_REGEX.split(line.rstrip())
if ((len(sline) != len(cls.column_order)) and
(len(sline) != len(cls.column_order) - 1)):
raise LineParseError(
"Line had the wrong number of columns. "
f"Expected {len(cls.column_order)} or "
f"{len(cls.column_order) - 1} but got {len(sline)}."
)
record: Dict[str, str] = {
k.strip(): v.strip()
for k, v
in zip(cls.column_order, sline)
}
start = parse_int(record["start"], "start")
end = parse_int(record["end"], "end")
num = parse_int(record["num"], "num")
infernal_score = parse_float(
record["infernal_score"],
"infernal_score"
)
if record["intron_starts"] == "0" and record["intron_ends"] == "0":
intron_starts: List[int] = []
intron_ends: List[int] = []
else:
intron_starts = [
parse_int(i.strip(), "intron_starts")
for i
in record["intron_starts"].split(",")
]
intron_ends = [
parse_int(i.strip(), "intron_ends")
for i
in record["intron_ends"].split(",")
]
return cls(
seqid=parse_string_not_empty(record["seqid"], "seqid"),
start=start,
end=end,
trna_type=parse_string_not_empty(record["trna_type"], "trna_type"),
anticodon=parse_string_not_empty(record["anticodon"], "anticodon"),
num=num,
intron_starts=intron_starts,
intron_ends=intron_ends,
infernal_score=infernal_score,
note=record.get("note", None),
)
@classmethod
def from_file(cls, handle: TextIO) -> Iterator["TRNAScanRecord"]:
started = False
for i, line in enumerate(handle, 1):
if line.startswith("-"):
started = True
continue
elif not started:
continue
try:
yield cls.from_line(line)
except LineParseError as e:
if hasattr(handle, "name"):
filename: Optional[str] = handle.name
else:
filename = None
raise ParseError(
filename,
i,
e.message
)
return
class TRNAScanSS(object):
def __init__(
self,
seqid: str,
start: int,
end: int,
trna_type: str,
anticodon: str,
anticodon_start: int,
anticodon_end: int,
score: float,
seq: str,
ss: str,
num: Optional[int],
) -> None:
self.seqid = seqid
self.start = int(start)
self.end = int(end)
self.trna_type = trna_type
self.anticodon = anticodon
self.anticodon_start = anticodon_start
self.anticodon_end = anticodon_end
self.score = score
self.seq = seq
self.ss = ss
self.num = num
return
@classmethod
def from_file(cls, handle: TextIO) -> Iterator["TRNAScanSS"]:
record = {}
id_regex = re.compile(r"(.+)\.trna(\d+)\s+\((\d+)-(\d+)\)")
type_regex = re.compile((
r"Type: (\S+)\s+Anticodon: "
r"(\S+).*\((\d+)-(\d+).*Score: (\d+\.?\d*)"
))
for line in handle:
line = line.strip()
if line == "" or line.startswith("*"):
continue
elif line.startswith("Type"):
match = type_regex.search(line)
if match is None:
raise ValueError(f"Could not parse type line: {line}")
(trna_type, anticodon, anticodon_start,
anticodon_end, score) = match.groups()
record["trna_type"] = trna_type
record["anticodon"] = anticodon
record["anticodon_start"] = anticodon_start
record["anticodon_end"] = anticodon_end
record["score"] = score
elif line.startswith("Possible"):
continue
elif line.startswith("Seq"):
seq = line[5:].strip()
record["seq"] = seq
elif line.startswith("Str"):
ss = line[5:].strip()
record["ss"] = ss
else:
match = id_regex.search(line)
if match is None:
logger.warning("Encountered unexpected line: %s", line)
continue
elif "seqid" in record:
# Its not the first record
yield cls(
record["seqid"],
int(record["start"]),
int(record["end"]),
record["trna_type"],
record["anticodon"],
int(record["anticodon_start"]),
int(record["anticodon_end"]),
float(record["score"]),
record["seq"],
record["ss"],
int(record["num"])
)
seqid, num, start, end = match.groups()
record = {
"seqid": seqid,
"num": num,
"start": start,
"end": end,
}
if "seqid" in record:
yield cls(
record["seqid"],
int(record["start"]),
int(record["end"]),
record["trna_type"],
record["anticodon"],
int(record["anticodon_start"]),
int(record["anticodon_end"]),
float(record["score"]),
record["seq"],
record["ss"],
int(record["num"])
)
return | gffpal/parsers/trnascan.py | import re
import logging
from typing import TextIO
from typing import Optional
from typing import Sequence, Iterator, List, Dict
from gffpal.parsers.parsers import ParseError, LineParseError
from gffpal.parsers.parsers import MULTISPACE_REGEX
from gffpal.parsers.parsers import (
parse_int,
parse_float,
parse_string_not_empty
)
logger = logging.getLogger(__name__)
class TRNAScanRecord(object):
column_order = [
"seqid",
"num",
"start",
"end",
"trna_type",
"anticodon",
"intron_starts",
"intron_ends",
"infernal_score",
"note",
]
def __init__(
self,
seqid: str,
start: int,
end: int,
trna_type: str,
anticodon: str,
num: Optional[int],
intron_starts: Sequence[int],
intron_ends: Sequence[int],
infernal_score: Optional[float],
note: Optional[str],
) -> None:
self.seqid = seqid
self.start = start
self.end = end
self.trna_type = trna_type
self.anticodon = anticodon
self.num = num
self.intron_starts = intron_starts
self.intron_ends = intron_ends
self.infernal_score = infernal_score
self.note = note
return
@classmethod
def from_line(cls, line: str) -> "TRNAScanRecord":
sline = MULTISPACE_REGEX.split(line.rstrip())
if ((len(sline) != len(cls.column_order)) and
(len(sline) != len(cls.column_order) - 1)):
raise LineParseError(
"Line had the wrong number of columns. "
f"Expected {len(cls.column_order)} or "
f"{len(cls.column_order) - 1} but got {len(sline)}."
)
record: Dict[str, str] = {
k.strip(): v.strip()
for k, v
in zip(cls.column_order, sline)
}
start = parse_int(record["start"], "start")
end = parse_int(record["end"], "end")
num = parse_int(record["num"], "num")
infernal_score = parse_float(
record["infernal_score"],
"infernal_score"
)
if record["intron_starts"] == "0" and record["intron_ends"] == "0":
intron_starts: List[int] = []
intron_ends: List[int] = []
else:
intron_starts = [
parse_int(i.strip(), "intron_starts")
for i
in record["intron_starts"].split(",")
]
intron_ends = [
parse_int(i.strip(), "intron_ends")
for i
in record["intron_ends"].split(",")
]
return cls(
seqid=parse_string_not_empty(record["seqid"], "seqid"),
start=start,
end=end,
trna_type=parse_string_not_empty(record["trna_type"], "trna_type"),
anticodon=parse_string_not_empty(record["anticodon"], "anticodon"),
num=num,
intron_starts=intron_starts,
intron_ends=intron_ends,
infernal_score=infernal_score,
note=record.get("note", None),
)
@classmethod
def from_file(cls, handle: TextIO) -> Iterator["TRNAScanRecord"]:
started = False
for i, line in enumerate(handle, 1):
if line.startswith("-"):
started = True
continue
elif not started:
continue
try:
yield cls.from_line(line)
except LineParseError as e:
if hasattr(handle, "name"):
filename: Optional[str] = handle.name
else:
filename = None
raise ParseError(
filename,
i,
e.message
)
return
class TRNAScanSS(object):
def __init__(
self,
seqid: str,
start: int,
end: int,
trna_type: str,
anticodon: str,
anticodon_start: int,
anticodon_end: int,
score: float,
seq: str,
ss: str,
num: Optional[int],
) -> None:
self.seqid = seqid
self.start = int(start)
self.end = int(end)
self.trna_type = trna_type
self.anticodon = anticodon
self.anticodon_start = anticodon_start
self.anticodon_end = anticodon_end
self.score = score
self.seq = seq
self.ss = ss
self.num = num
return
@classmethod
def from_file(cls, handle: TextIO) -> Iterator["TRNAScanSS"]:
record = {}
id_regex = re.compile(r"(.+)\.trna(\d+)\s+\((\d+)-(\d+)\)")
type_regex = re.compile((
r"Type: (\S+)\s+Anticodon: "
r"(\S+).*\((\d+)-(\d+).*Score: (\d+\.?\d*)"
))
for line in handle:
line = line.strip()
if line == "" or line.startswith("*"):
continue
elif line.startswith("Type"):
match = type_regex.search(line)
if match is None:
raise ValueError(f"Could not parse type line: {line}")
(trna_type, anticodon, anticodon_start,
anticodon_end, score) = match.groups()
record["trna_type"] = trna_type
record["anticodon"] = anticodon
record["anticodon_start"] = anticodon_start
record["anticodon_end"] = anticodon_end
record["score"] = score
elif line.startswith("Possible"):
continue
elif line.startswith("Seq"):
seq = line[5:].strip()
record["seq"] = seq
elif line.startswith("Str"):
ss = line[5:].strip()
record["ss"] = ss
else:
match = id_regex.search(line)
if match is None:
logger.warning("Encountered unexpected line: %s", line)
continue
elif "seqid" in record:
# Its not the first record
yield cls(
record["seqid"],
int(record["start"]),
int(record["end"]),
record["trna_type"],
record["anticodon"],
int(record["anticodon_start"]),
int(record["anticodon_end"]),
float(record["score"]),
record["seq"],
record["ss"],
int(record["num"])
)
seqid, num, start, end = match.groups()
record = {
"seqid": seqid,
"num": num,
"start": start,
"end": end,
}
if "seqid" in record:
yield cls(
record["seqid"],
int(record["start"]),
int(record["end"]),
record["trna_type"],
record["anticodon"],
int(record["anticodon_start"]),
int(record["anticodon_end"]),
float(record["score"]),
record["seq"],
record["ss"],
int(record["num"])
)
return | 0.778355 | 0.266486 |
from typing import Iterable, List, Optional
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, func, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import declarative_base, relationship, selectinload
from vkbottle.modules import logger
Base = declarative_base()
logger.debug("Connect to database")
_engine = create_async_engine("sqlite+aiosqlite:///db.sqlite3", future=True)
session = AsyncSession(bind=_engine, expire_on_commit=False)
class User(Base):
__tablename__ = 'users'
vk_id = Column(Integer, primary_key=True, nullable=False)
diary_session = Column(String(length=32))
login = Column(String(length=128))
password = Column(String(length=128)) # maybe to do something with it?
donut_level = Column(Integer, default=0)
refry_id = Column(ForeignKey("users.vk_id"))
refry_user: Optional["User"] = relationship("User", remote_side=[vk_id])
# referral_users: async func with session.execute(...).scalars().all()
chats: List["Chat"] = relationship("Chat", back_populates="user", lazy="selectin", cascade="all, delete-orphan")
children: List["Child"] = relationship("Child", back_populates="user", lazy="selectin", cascade="all, delete-orphan")
async def referral_users(self) -> List["User"]:
stmt = select(User).where(User.refry_id == self.vk_id)
return (await session.execute(stmt)).scalars().all()
async def referral_count(self) -> int:
stmt = select(func.count(User.vk_id)).where(User.refry_id == self.vk_id)
return (await session.execute(stmt)).scalar_one()
@classmethod
async def create(
cls,
vk_id: int,
diary_session: Optional[str] = None,
login: Optional[str] = None,
password: Optional[str] = None
) -> "User": # todo try to optimize
user = cls(vk_id=vk_id, diary_session=diary_session, login=login, password=password)
session.add(user)
try:
await session.flush()
await session.commit()
return await session.get(User, vk_id)
except IntegrityError: # todo?
user = await session.get(User, vk_id)
user.diary_session = diary_session
user.login = login
user.password = password
await session.commit()
return user
@classmethod
async def get(cls, vk_id: int, chats: bool = False, children: bool = False) -> Optional["User"]:
stmt = select(cls).where(User.vk_id == vk_id)
if chats:
stmt = stmt.options(selectinload(cls.chats))
if children:
stmt = stmt.options(selectinload(cls.children))
return (await session.execute(stmt)).scalar_one_or_none()
@classmethod
async def get_all(cls, chats: bool = False, children: bool = False) -> Iterable["User"]:
stmt = select(cls)
if chats:
stmt = stmt.options(selectinload(cls.chats))
if children:
stmt = stmt.options(selectinload(cls.children))
return (await session.execute(stmt)).scalars()
async def delete(self):
await session.delete(self)
await session.commit()
@staticmethod
async def save():
await session.commit()
@staticmethod
async def count() -> int:
return (await session.execute(select(func.count(User.vk_id)))).scalar_one()
def __repr__(self):
return f"<User(vk_id={self.vk_id}, ...)>"
class Child(Base):
__tablename__ = "child"
vk_id = Column(Integer, ForeignKey('users.vk_id'), primary_key=True, nullable=False)
child_id = Column(Integer, primary_key=True, nullable=False)
marks_notify = Column(Boolean, default=False, nullable=False)
user: "User" = relationship("User", lazy="selectin", back_populates="children")
@classmethod
# warning! no checking user with vk_id!
async def create(
cls,
vk_id: int,
child_id: int
):
child = cls(vk_id=vk_id, child_id=child_id)
session.add(child)
try:
await session.flush()
await session.commit()
except IntegrityError: # todo?
child = await session.get(Child, (vk_id, child_id))
return child
@staticmethod
async def save():
await session.commit()
@staticmethod
async def marks_count() -> int:
return (await session.execute(select(func.count(Child.vk_id)).where(Child.marks > 0))).scalar_one()
def __repr__(self):
return f"<Child(vk_id={self.vk_id}, child_id={self.child_id})>"
class Chat(Base):
__tablename__ = "chats"
chat_id = Column(Integer, primary_key=True)
vk_id = Column(Integer, ForeignKey('users.vk_id'))
user: "User" = relationship("User", lazy="selectin", back_populates="chats")
@classmethod
# warning! no checking user with vk_id!
async def create(
cls,
chat_id: int,
vk_id: int,
) -> "Chat":
chat = cls(chat_id=chat_id, vk_id=vk_id)
session.add(chat)
try:
await session.flush()
await session.commit()
except IntegrityError:
chat = await session.get(Chat, chat_id)
chat.vk_id = vk_id
await session.commit()
return chat
@classmethod
async def get(cls, chat_id: int) -> Optional["Chat"]:
return await session.get(Chat, chat_id)
async def delete(self):
await session.delete(self)
await session.commit()
@staticmethod
async def count() -> int:
return (await session.execute(select(func.count(Chat.vk_id)))).scalar_one()
def __repr__(self):
return f"<Chat(chat_id={self.chat_id}, ...)>"
async def start_up():
async with _engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async def close():
logger.debug("Close connection")
await session.close() | vk_bot/db.py | from typing import Iterable, List, Optional
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, func, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import declarative_base, relationship, selectinload
from vkbottle.modules import logger
Base = declarative_base()
logger.debug("Connect to database")
_engine = create_async_engine("sqlite+aiosqlite:///db.sqlite3", future=True)
session = AsyncSession(bind=_engine, expire_on_commit=False)
class User(Base):
__tablename__ = 'users'
vk_id = Column(Integer, primary_key=True, nullable=False)
diary_session = Column(String(length=32))
login = Column(String(length=128))
password = Column(String(length=128)) # maybe to do something with it?
donut_level = Column(Integer, default=0)
refry_id = Column(ForeignKey("users.vk_id"))
refry_user: Optional["User"] = relationship("User", remote_side=[vk_id])
# referral_users: async func with session.execute(...).scalars().all()
chats: List["Chat"] = relationship("Chat", back_populates="user", lazy="selectin", cascade="all, delete-orphan")
children: List["Child"] = relationship("Child", back_populates="user", lazy="selectin", cascade="all, delete-orphan")
async def referral_users(self) -> List["User"]:
stmt = select(User).where(User.refry_id == self.vk_id)
return (await session.execute(stmt)).scalars().all()
async def referral_count(self) -> int:
stmt = select(func.count(User.vk_id)).where(User.refry_id == self.vk_id)
return (await session.execute(stmt)).scalar_one()
@classmethod
async def create(
cls,
vk_id: int,
diary_session: Optional[str] = None,
login: Optional[str] = None,
password: Optional[str] = None
) -> "User": # todo try to optimize
user = cls(vk_id=vk_id, diary_session=diary_session, login=login, password=password)
session.add(user)
try:
await session.flush()
await session.commit()
return await session.get(User, vk_id)
except IntegrityError: # todo?
user = await session.get(User, vk_id)
user.diary_session = diary_session
user.login = login
user.password = password
await session.commit()
return user
@classmethod
async def get(cls, vk_id: int, chats: bool = False, children: bool = False) -> Optional["User"]:
stmt = select(cls).where(User.vk_id == vk_id)
if chats:
stmt = stmt.options(selectinload(cls.chats))
if children:
stmt = stmt.options(selectinload(cls.children))
return (await session.execute(stmt)).scalar_one_or_none()
@classmethod
async def get_all(cls, chats: bool = False, children: bool = False) -> Iterable["User"]:
stmt = select(cls)
if chats:
stmt = stmt.options(selectinload(cls.chats))
if children:
stmt = stmt.options(selectinload(cls.children))
return (await session.execute(stmt)).scalars()
async def delete(self):
await session.delete(self)
await session.commit()
@staticmethod
async def save():
await session.commit()
@staticmethod
async def count() -> int:
return (await session.execute(select(func.count(User.vk_id)))).scalar_one()
def __repr__(self):
return f"<User(vk_id={self.vk_id}, ...)>"
class Child(Base):
__tablename__ = "child"
vk_id = Column(Integer, ForeignKey('users.vk_id'), primary_key=True, nullable=False)
child_id = Column(Integer, primary_key=True, nullable=False)
marks_notify = Column(Boolean, default=False, nullable=False)
user: "User" = relationship("User", lazy="selectin", back_populates="children")
@classmethod
# warning! no checking user with vk_id!
async def create(
cls,
vk_id: int,
child_id: int
):
child = cls(vk_id=vk_id, child_id=child_id)
session.add(child)
try:
await session.flush()
await session.commit()
except IntegrityError: # todo?
child = await session.get(Child, (vk_id, child_id))
return child
@staticmethod
async def save():
await session.commit()
@staticmethod
async def marks_count() -> int:
return (await session.execute(select(func.count(Child.vk_id)).where(Child.marks > 0))).scalar_one()
def __repr__(self):
return f"<Child(vk_id={self.vk_id}, child_id={self.child_id})>"
class Chat(Base):
__tablename__ = "chats"
chat_id = Column(Integer, primary_key=True)
vk_id = Column(Integer, ForeignKey('users.vk_id'))
user: "User" = relationship("User", lazy="selectin", back_populates="chats")
@classmethod
# warning! no checking user with vk_id!
async def create(
cls,
chat_id: int,
vk_id: int,
) -> "Chat":
chat = cls(chat_id=chat_id, vk_id=vk_id)
session.add(chat)
try:
await session.flush()
await session.commit()
except IntegrityError:
chat = await session.get(Chat, chat_id)
chat.vk_id = vk_id
await session.commit()
return chat
@classmethod
async def get(cls, chat_id: int) -> Optional["Chat"]:
return await session.get(Chat, chat_id)
async def delete(self):
await session.delete(self)
await session.commit()
@staticmethod
async def count() -> int:
return (await session.execute(select(func.count(Chat.vk_id)))).scalar_one()
def __repr__(self):
return f"<Chat(chat_id={self.chat_id}, ...)>"
async def start_up():
async with _engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async def close():
logger.debug("Close connection")
await session.close() | 0.46393 | 0.12276 |
import dailyScript as daily
#create each of the days information that will be passed to the daily main script
def main():
#day1
d1_trans1 = ["login", "agent", "createAccount", "1234567", "Emma", "logout"]
#trans2 will print "Account doesn't exist"
d1_trans2 = ["login", "machine", "withdraw", "1234567", "logout"]
d1_trans3 = ["login", "agent", "createAccount", "7654321", "Kathryn", "logout", "0"]
daily.main(d1_trans1, d1_trans2, d1_trans3)
print("---------------------End of Day 1--------------------------")
#day2
d2_trans1 = ["login", "machine", "deposit", "1234567", "Emma", "500", "logout"]
#trans2 will print "Account overdrawn, transaction not completed"
d2_trans2 = ["login", "machine", "withdraw", "1234567", "Emma", "200" "logout"]
d2_trans3 = ["login", "agent", "deposit", "7654321", "Kathryn", "1000", "logout", "0"]
daily.main(d2_trans1, d2_trans2, d2_trans3)
print("---------------------End of Day 2--------------------------")
#day3
d3_trans1 = ["login", "agent", "withdraw", "1234567", "Emma", "10", "logout"]
d3_trans2 = ["login", "machine", "transfer", "7654321", "1234567", "Kathryn", "150", "y", "logout"]
d3_trans3 = ["login", "agent", "createAccount", "1105980", "Mary", "logout", "0"]
daily.main(d3_trans1, d3_trans2, d3_trans3)
print("---------------------End of Day 3--------------------------")
#day4 - one transaction of manual input
d4_trans1 = [] #manual input has to end with exit code 0
d4_trans2 = ["login", "agent", "deleteAccount", "1105980", "Mary", "logout"]
#trans3 will print "This operation is not available in ATM mode"
d4_trans3 = ["login", "machine", "deleteAccount", "1234567", "logout", "0"]
daily.main(d4_trans1, d4_trans2, d4_trans3)
print("---------------------End of Day 4--------------------------")
#day5
#trans1 will print "Account doesn't exist"
d5_trans1 = ["login", "agent", "deposit", "1105980", "logout"]
#trans2 will print "Withdrawals above $1000 rejected in ATM mode"
d5_trans2 = ["login", "machine", "withdraw", "1234567", "Emma", "15000000000", "logout"]
d5_trans3 = ["login", "agent", "createAccount", "1000101", "QueensCS", "logout", "0"]
daily.main(d5_trans1, d5_trans2, d5_trans3)
print("---------------------End of Day 5--------------------------")
'''
After 5 days (If manual input doesn't change accounts or amounts)
valid accounts list -
1234567
7654321
1000101
0000000
master accounts file -
7654321 850 Kathryn
1234567 640 Emma
1000101 0 QueensCS
0000000 000 XXXX
''' | src/weeklyScript.py | import dailyScript as daily
#create each of the days information that will be passed to the daily main script
def main():
#day1
d1_trans1 = ["login", "agent", "createAccount", "1234567", "Emma", "logout"]
#trans2 will print "Account doesn't exist"
d1_trans2 = ["login", "machine", "withdraw", "1234567", "logout"]
d1_trans3 = ["login", "agent", "createAccount", "7654321", "Kathryn", "logout", "0"]
daily.main(d1_trans1, d1_trans2, d1_trans3)
print("---------------------End of Day 1--------------------------")
#day2
d2_trans1 = ["login", "machine", "deposit", "1234567", "Emma", "500", "logout"]
#trans2 will print "Account overdrawn, transaction not completed"
d2_trans2 = ["login", "machine", "withdraw", "1234567", "Emma", "200" "logout"]
d2_trans3 = ["login", "agent", "deposit", "7654321", "Kathryn", "1000", "logout", "0"]
daily.main(d2_trans1, d2_trans2, d2_trans3)
print("---------------------End of Day 2--------------------------")
#day3
d3_trans1 = ["login", "agent", "withdraw", "1234567", "Emma", "10", "logout"]
d3_trans2 = ["login", "machine", "transfer", "7654321", "1234567", "Kathryn", "150", "y", "logout"]
d3_trans3 = ["login", "agent", "createAccount", "1105980", "Mary", "logout", "0"]
daily.main(d3_trans1, d3_trans2, d3_trans3)
print("---------------------End of Day 3--------------------------")
#day4 - one transaction of manual input
d4_trans1 = [] #manual input has to end with exit code 0
d4_trans2 = ["login", "agent", "deleteAccount", "1105980", "Mary", "logout"]
#trans3 will print "This operation is not available in ATM mode"
d4_trans3 = ["login", "machine", "deleteAccount", "1234567", "logout", "0"]
daily.main(d4_trans1, d4_trans2, d4_trans3)
print("---------------------End of Day 4--------------------------")
#day5
#trans1 will print "Account doesn't exist"
d5_trans1 = ["login", "agent", "deposit", "1105980", "logout"]
#trans2 will print "Withdrawals above $1000 rejected in ATM mode"
d5_trans2 = ["login", "machine", "withdraw", "1234567", "Emma", "15000000000", "logout"]
d5_trans3 = ["login", "agent", "createAccount", "1000101", "QueensCS", "logout", "0"]
daily.main(d5_trans1, d5_trans2, d5_trans3)
print("---------------------End of Day 5--------------------------")
'''
After 5 days (If manual input doesn't change accounts or amounts)
valid accounts list -
1234567
7654321
1000101
0000000
master accounts file -
7654321 850 Kathryn
1234567 640 Emma
1000101 0 QueensCS
0000000 000 XXXX
''' | 0.279828 | 0.379148 |
class BUIMask(object):
"""Mask client/servers based on user preferences or user ACL"""
def init_app(self, app):
"""Initialize the mask
:param app: Application context
:type app: :class:`burpui.engines.server.BUIServer`
"""
self.app = app
@property
def is_user_pref(self):
return self.app.config["WITH_SQL"]
def has_filters(self, current_user):
if not current_user.is_anonymous and current_user.acl.is_admin():
if self.is_user_pref:
return self.query_user(current_user.name)
return False
return True
def query_user(self, username):
from .models import Hidden
return Hidden.query.filter_by(user=username).first()
def query_hidden(self, username):
from .models import Hidden
return (
Hidden.query.filter_by(user=username)
.with_entities(Hidden.client, Hidden.server)
.all()
)
def hidden_clients(self, username):
if not self.is_user_pref:
return []
return self.query_hidden(username)
def hidden_servers(self, username):
if not self.is_user_pref:
return []
hidden = self.query_hidden(username)
return [server for client, server in hidden if not client]
def is_client_allowed(self, current_user, client=None, server=None):
if current_user.is_anonymous:
return False
if self.has_filters(current_user) and self.is_user_pref:
hidden = self.hidden_clients(current_user.name)
if (client, server) in hidden:
return False
if current_user.acl.is_admin():
return True
return current_user.acl.is_client_allowed(client, server)
def is_client_rw(self, current_user, client, server=None):
if current_user.is_anonymous:
return False
if self.has_filters(current_user) and self.is_user_pref:
hidden = self.hidden_clients(current_user.name)
if (client, server) in hidden:
return False
if current_user.acl.is_admin():
return True
return current_user.acl.is_client_rw(client, server)
def is_server_allowed(self, current_user, server):
if current_user.is_anonymous:
return False
if self.has_filters(current_user) and self.is_user_pref:
hidden = self.hidden_servers(current_user.name)
if server in hidden:
return False
if current_user.acl.is_admin():
return True
return current_user.acl.is_server_allowed(server)
def is_server_rw(self, current_user, server):
if current_user.is_anonymous:
return False
if self.has_filters(current_user) and self.is_user_pref:
hidden = self.hidden_servers(current_user.name)
if server in hidden:
return False
if current_user.acl.is_admin():
return True
return current_user.acl.is_server_rw(server)
mask = BUIMask() | burpui/filter.py | class BUIMask(object):
"""Mask client/servers based on user preferences or user ACL"""
def init_app(self, app):
"""Initialize the mask
:param app: Application context
:type app: :class:`burpui.engines.server.BUIServer`
"""
self.app = app
@property
def is_user_pref(self):
return self.app.config["WITH_SQL"]
def has_filters(self, current_user):
if not current_user.is_anonymous and current_user.acl.is_admin():
if self.is_user_pref:
return self.query_user(current_user.name)
return False
return True
def query_user(self, username):
from .models import Hidden
return Hidden.query.filter_by(user=username).first()
def query_hidden(self, username):
from .models import Hidden
return (
Hidden.query.filter_by(user=username)
.with_entities(Hidden.client, Hidden.server)
.all()
)
def hidden_clients(self, username):
if not self.is_user_pref:
return []
return self.query_hidden(username)
def hidden_servers(self, username):
if not self.is_user_pref:
return []
hidden = self.query_hidden(username)
return [server for client, server in hidden if not client]
def is_client_allowed(self, current_user, client=None, server=None):
if current_user.is_anonymous:
return False
if self.has_filters(current_user) and self.is_user_pref:
hidden = self.hidden_clients(current_user.name)
if (client, server) in hidden:
return False
if current_user.acl.is_admin():
return True
return current_user.acl.is_client_allowed(client, server)
def is_client_rw(self, current_user, client, server=None):
if current_user.is_anonymous:
return False
if self.has_filters(current_user) and self.is_user_pref:
hidden = self.hidden_clients(current_user.name)
if (client, server) in hidden:
return False
if current_user.acl.is_admin():
return True
return current_user.acl.is_client_rw(client, server)
def is_server_allowed(self, current_user, server):
if current_user.is_anonymous:
return False
if self.has_filters(current_user) and self.is_user_pref:
hidden = self.hidden_servers(current_user.name)
if server in hidden:
return False
if current_user.acl.is_admin():
return True
return current_user.acl.is_server_allowed(server)
def is_server_rw(self, current_user, server):
if current_user.is_anonymous:
return False
if self.has_filters(current_user) and self.is_user_pref:
hidden = self.hidden_servers(current_user.name)
if server in hidden:
return False
if current_user.acl.is_admin():
return True
return current_user.acl.is_server_rw(server)
mask = BUIMask() | 0.782829 | 0.177009 |
from data_structures.linked_lists.singly_linked_list import LinkedList
class DoublyListNode:
def __init__(self, value=None, next=None, previous=None):
self.value = value
self.next = next
self.previous = previous
class DoublyLinkedList(LinkedList):
def __init__(self, head=None, tail=None):
self.head = head
self.tail = tail
# O(n)
def __reversed__(self):
current_node = self.tail
while current_node:
yield current_node.value
current_node = current_node.previous
# O(n)
# O(1) if it inserts before the first or the last item
def insert_before(self, index, value):
new_node = DoublyListNode(value)
if not self.head:
self.head = new_node
self.tail = new_node
return
if index >= 0:
current_node = self.head
current_index = 0
index_step = 1
get_next_node = lambda node: node.next
else:
current_node = self.tail
current_index = -1
index_step = -1
get_next_node = lambda node: node.previous
while current_node:
if current_index == index:
new_node.next = current_node
new_node.previous = current_node.previous
if current_node.previous:
current_node.previous.next = new_node
else:
self.head = new_node
current_node.previous = new_node
return
current_node = get_next_node(current_node)
current_index += index_step
raise IndexError
# O(n)
# O(1) if it pops the first or the last item
def pop(self, index):
if not self.head:
raise IndexError('pop from empty linked list')
if index >= 0:
current_node = self.head
current_index = 0
index_step = 1
get_next_node = lambda node: node.next
else:
current_node = self.tail
current_index = -1
index_step = -1
get_next_node = lambda node: node.previous
while current_node:
if current_index == index:
deleted_value = current_node.value
next_node = current_node.next
previous_node = current_node.previous
if next_node:
next_node.previous = previous_node
if previous_node:
previous_node.next = next_node
if current_node == self.head:
self.head = next_node
if current_node == self.tail:
self.tail = previous_node
return deleted_value
current_node = get_next_node(current_node)
current_index += index_step
raise IndexError
# O(1)
def append(self, value):
new_node = DoublyListNode(value)
if not self.head:
self.head = new_node
self.tail = new_node
return
new_node.previous = self.tail
self.tail.next = new_node
self.tail = new_node
# O(n)
def reverse(self):
current_node = self.head
self.head, self.tail = self.tail, self.head
while current_node:
next_node = current_node.next
current_node.next, current_node.previous = current_node.previous, current_node.next
current_node = next_node | data_structures/linked_lists/doubly_linked_list.py | from data_structures.linked_lists.singly_linked_list import LinkedList
class DoublyListNode:
def __init__(self, value=None, next=None, previous=None):
self.value = value
self.next = next
self.previous = previous
class DoublyLinkedList(LinkedList):
def __init__(self, head=None, tail=None):
self.head = head
self.tail = tail
# O(n)
def __reversed__(self):
current_node = self.tail
while current_node:
yield current_node.value
current_node = current_node.previous
# O(n)
# O(1) if it inserts before the first or the last item
def insert_before(self, index, value):
new_node = DoublyListNode(value)
if not self.head:
self.head = new_node
self.tail = new_node
return
if index >= 0:
current_node = self.head
current_index = 0
index_step = 1
get_next_node = lambda node: node.next
else:
current_node = self.tail
current_index = -1
index_step = -1
get_next_node = lambda node: node.previous
while current_node:
if current_index == index:
new_node.next = current_node
new_node.previous = current_node.previous
if current_node.previous:
current_node.previous.next = new_node
else:
self.head = new_node
current_node.previous = new_node
return
current_node = get_next_node(current_node)
current_index += index_step
raise IndexError
# O(n)
# O(1) if it pops the first or the last item
def pop(self, index):
if not self.head:
raise IndexError('pop from empty linked list')
if index >= 0:
current_node = self.head
current_index = 0
index_step = 1
get_next_node = lambda node: node.next
else:
current_node = self.tail
current_index = -1
index_step = -1
get_next_node = lambda node: node.previous
while current_node:
if current_index == index:
deleted_value = current_node.value
next_node = current_node.next
previous_node = current_node.previous
if next_node:
next_node.previous = previous_node
if previous_node:
previous_node.next = next_node
if current_node == self.head:
self.head = next_node
if current_node == self.tail:
self.tail = previous_node
return deleted_value
current_node = get_next_node(current_node)
current_index += index_step
raise IndexError
# O(1)
def append(self, value):
new_node = DoublyListNode(value)
if not self.head:
self.head = new_node
self.tail = new_node
return
new_node.previous = self.tail
self.tail.next = new_node
self.tail = new_node
# O(n)
def reverse(self):
current_node = self.head
self.head, self.tail = self.tail, self.head
while current_node:
next_node = current_node.next
current_node.next, current_node.previous = current_node.previous, current_node.next
current_node = next_node | 0.619817 | 0.198646 |
from fbs_runtime import _state, FbsError
import os
import sys
def is_windows():
"""
Return True if the current OS is Windows, False otherwise.
"""
return name() == 'Windows'
def is_mac():
"""
Return True if the current OS is macOS, False otherwise.
"""
return name() == 'Mac'
def is_linux():
"""
Return True if the current OS is Linux, False otherwise.
"""
return name() == 'Linux'
def name():
"""
Returns 'Windows', 'Mac' or 'Linux', depending on the current OS. If the OS
can't be determined, FbsError is raised.
"""
if _state.PLATFORM_NAME is None:
_state.PLATFORM_NAME = _get_name()
return _state.PLATFORM_NAME
def _get_name():
if sys.platform in ('win32', 'cygwin'):
return 'Windows'
if sys.platform == 'darwin':
return 'Mac'
if sys.platform.startswith('linux'):
return 'Linux'
raise FbsError('Unknown operating system.')
def is_ubuntu():
try:
return linux_distribution() in ('Ubuntu', 'Linux Mint')
except FileNotFoundError:
return False
def is_arch_linux():
try:
return linux_distribution() == 'Arch Linux'
except FileNotFoundError:
return False
def is_fedora():
try:
return linux_distribution() in ('Fedora', 'CentOS Linux')
except FileNotFoundError:
return False
def linux_distribution():
if _state.LINUX_DISTRIBUTION is None:
_state.LINUX_DISTRIBUTION = _get_linux_distribution()
return _state.LINUX_DISTRIBUTION
def _get_linux_distribution():
if not is_linux():
return ''
try:
os_release = _get_os_release_name()
except OSError:
pass
else:
if os_release:
return os_release
return '<unknown>'
def is_gnome_based():
curr_desktop = os.environ.get('XDG_CURRENT_DESKTOP', '').lower()
return curr_desktop in ('unity', 'gnome', 'x-cinnamon')
def is_kde_based():
curr_desktop = os.environ.get('XDG_CURRENT_DESKTOP', '').lower()
if curr_desktop == 'kde':
return True
gdmsession = os.environ.get('GDMSESSION', '').lower()
return gdmsession.startswith('kde')
def _get_os_release_name():
with open('/etc/os-release', 'r') as f:
for line in f:
line = line.rstrip()
if line.startswith('NAME='):
name = line[len('NAME='):]
return name.strip('"') | venv/Lib/site-packages/fbs_runtime/platform.py | from fbs_runtime import _state, FbsError
import os
import sys
def is_windows():
"""
Return True if the current OS is Windows, False otherwise.
"""
return name() == 'Windows'
def is_mac():
"""
Return True if the current OS is macOS, False otherwise.
"""
return name() == 'Mac'
def is_linux():
"""
Return True if the current OS is Linux, False otherwise.
"""
return name() == 'Linux'
def name():
"""
Returns 'Windows', 'Mac' or 'Linux', depending on the current OS. If the OS
can't be determined, FbsError is raised.
"""
if _state.PLATFORM_NAME is None:
_state.PLATFORM_NAME = _get_name()
return _state.PLATFORM_NAME
def _get_name():
if sys.platform in ('win32', 'cygwin'):
return 'Windows'
if sys.platform == 'darwin':
return 'Mac'
if sys.platform.startswith('linux'):
return 'Linux'
raise FbsError('Unknown operating system.')
def is_ubuntu():
try:
return linux_distribution() in ('Ubuntu', 'Linux Mint')
except FileNotFoundError:
return False
def is_arch_linux():
try:
return linux_distribution() == 'Arch Linux'
except FileNotFoundError:
return False
def is_fedora():
try:
return linux_distribution() in ('Fedora', 'CentOS Linux')
except FileNotFoundError:
return False
def linux_distribution():
if _state.LINUX_DISTRIBUTION is None:
_state.LINUX_DISTRIBUTION = _get_linux_distribution()
return _state.LINUX_DISTRIBUTION
def _get_linux_distribution():
if not is_linux():
return ''
try:
os_release = _get_os_release_name()
except OSError:
pass
else:
if os_release:
return os_release
return '<unknown>'
def is_gnome_based():
curr_desktop = os.environ.get('XDG_CURRENT_DESKTOP', '').lower()
return curr_desktop in ('unity', 'gnome', 'x-cinnamon')
def is_kde_based():
curr_desktop = os.environ.get('XDG_CURRENT_DESKTOP', '').lower()
if curr_desktop == 'kde':
return True
gdmsession = os.environ.get('GDMSESSION', '').lower()
return gdmsession.startswith('kde')
def _get_os_release_name():
with open('/etc/os-release', 'r') as f:
for line in f:
line = line.rstrip()
if line.startswith('NAME='):
name = line[len('NAME='):]
return name.strip('"') | 0.306216 | 0.047558 |
import os
import re
import errno
import socket
import json
from chroma_agent.lib.shell import AgentShell
from toolz.functoolz import pipe
from toolz.itertoolz import getter
from toolz.curried import map as cmap, filter as cfilter, mapcat as cmapcat
import chroma_agent.lib.normalize_device_path as ndp
# Python errno doesn't include this code
errno.NO_MEDIA_ERRNO = 123
DEV_PATH = re.compile('^/dev/[^/]+$')
DISK_BY_ID_PATH = re.compile('^/dev/disk/by-id/')
DISK_BY_PATH_PATH = re.compile('^/dev/disk/by-path/')
MAPPER_PATH = re.compile('^/dev/mapper/')
PRECEDENCE = [
MAPPER_PATH, DISK_BY_ID_PATH, DISK_BY_PATH_PATH,
re.compile('.+')
]
def get_idx(x):
return [index for index, v in enumerate(PRECEDENCE) if v.match(x)][0]
def compare(x, y):
idx1 = get_idx(x)
idx2 = get_idx(y)
if idx1 == idx2:
return 0
elif idx1 > idx2:
return 1
return -1
def sort_paths(xs):
return sorted(xs, cmp=compare)
def scanner_cmd(cmd):
client = socket.socket(socket.AF_UNIX)
client.settimeout(1)
client.connect_ex("/var/run/device-scanner.sock")
client.sendall(json.dumps({"ACTION": cmd}))
client.shutdown(socket.SHUT_WR)
out = ''
while True:
data = client.recv(1024)
size = len(data)
if size == 0:
break
out += data
return json.loads(out)
def get_default(prop, default_value, x):
y = x.get(prop, default_value)
return y if y is not None else default_value
def get_major_minor(x):
return "%s:%s" % (x.get('MAJOR'), x.get('MINOR'))
def as_device(x):
paths = sort_paths(get_default('PATHS', [], x))
path = next(iter(paths), None)
return {
'major_minor': get_major_minor(x),
'path': path,
'paths': paths,
'serial_80': x.get('IML_SCSI_80'),
'serial_83': x.get('IML_SCSI_83'),
'size': int(get_default('IML_SIZE', 0, x)) * 512,
'filesystem_type': x.get('ID_FS_TYPE'),
'device_type': x.get('DEVTYPE'),
'device_path': x.get('DEVPATH'),
'partition_number': x.get('ID_PART_ENTRY_NUMBER'),
'is_ro': x.get('IML_IS_RO'),
'parent': None
}
def get_parent_path(p):
return os.sep.join(p.split(os.sep)[0:-1])
def find_device_by_device_path(p, xs):
return next((d for d in xs if d['device_path'] == p), None)
def mutate_parent_prop(xs):
disks = [x for x in xs if x['device_type'] == 'disk']
partitions = [x for x in xs if x['device_type'] == 'partition']
for x in partitions:
parent_path = get_parent_path(x['device_path'])
device = find_device_by_device_path(parent_path, disks)
if device:
x['parent'] = device['major_minor']
def filter_device(x):
# Exclude zero-sized devices
if x['size'] == 0 or x['is_ro']:
return False
return True
def fetch_device_list():
AgentShell.run(["udevadm", "settle"])
info = scanner_cmd("info")
return pipe(info.itervalues(),
cmap(as_device), cfilter(filter_device), list)
def add_to_ndp(xs, ys):
for x in xs:
for y in ys:
ndp.add_normalized_device(x, y)
def build_ndp_from_device(x):
paths = x['paths']
dev_paths = filter(DEV_PATH.match, paths)
disk_by_id_paths = filter(DISK_BY_ID_PATH.match, paths)
disk_by_path_paths = filter(DISK_BY_PATH_PATH.match, paths)
mapper_paths = filter(MAPPER_PATH.match, paths)
add_to_ndp(dev_paths, disk_by_path_paths)
add_to_ndp(dev_paths, disk_by_id_paths)
add_to_ndp(disk_by_path_paths, mapper_paths)
add_to_ndp(disk_by_id_paths, mapper_paths)
class BlockDevices(object):
MAPPERPATH = os.path.join('/dev', 'mapper')
DISKBYIDPATH = os.path.join('/dev', 'disk', 'by-id')
def __init__(self):
(self.block_device_nodes,
self.node_block_devices) = self._parse_sys_block()
def _parse_sys_block(self):
xs = fetch_device_list()
mutate_parent_prop(xs)
node_block_devices = reduce(
lambda d, x: dict(d, **{x['path']: x['major_minor']}), xs, {})
block_device_nodes = reduce(
lambda d, x: dict(d, **{x['major_minor']: x}), xs, {})
map(build_ndp_from_device, xs)
return (block_device_nodes, node_block_devices)
def paths_to_major_minors(self, device_paths):
"""
Create a list of device major minors for a list of
device paths from _path_to_major_minor dict.
If any of the paths come back as None, continue to the next.
:param device_paths: The list of paths to get
the list of major minors for.
:return: list of dev_major_minors, or an empty
list if any device_path is not found.
"""
return pipe(device_paths,
cmap(self.path_to_major_minor), cfilter(None), list)
def path_to_major_minor(self, device_path):
""" Return device major minor for a given device path """
return self.node_block_devices.get(
ndp.normalized_device_path(device_path))
def composite_device_list(self, source_devices):
"""
This function takes a bunch of devices like MdRaid, EMCPower
which are effectively composite devices made up
from a collection of other devices and returns that
list with the drives and everything nicely assembled.
"""
devices = {}
for device in source_devices:
drive_mms = self.paths_to_major_minors(device['device_paths'])
if drive_mms:
devices[device['uuid']] = {
'path': device['path'],
'block_device': device['mm'],
'drives': drive_mms
}
# Finally add these devices to the canonical path list.
for device_path in device['device_paths']:
ndp.add_normalized_device(device_path, device['path'])
return devices
def find_block_devs(self, folder):
# Map of major_minor to path
# Should be able to look at the paths prop for all devs, and put
# matching MM to path back in a list.
def build_paths(x):
return [(x['major_minor'], path) for path in x['paths']
if path.startswith(folder)]
return pipe(self.block_device_nodes.itervalues(),
cmapcat(build_paths), dict)
@classmethod
def quick_scan(cls):
"""
Return a very quick list of block devices from
a number of sources so we can quickly see changes.
"""
return pipe(fetch_device_list(), cmapcat(getter("paths")), sorted) | chroma-agent/chroma_agent/device_plugins/linux_components/block_devices.py |
import os
import re
import errno
import socket
import json
from chroma_agent.lib.shell import AgentShell
from toolz.functoolz import pipe
from toolz.itertoolz import getter
from toolz.curried import map as cmap, filter as cfilter, mapcat as cmapcat
import chroma_agent.lib.normalize_device_path as ndp
# Python errno doesn't include this code
errno.NO_MEDIA_ERRNO = 123
DEV_PATH = re.compile('^/dev/[^/]+$')
DISK_BY_ID_PATH = re.compile('^/dev/disk/by-id/')
DISK_BY_PATH_PATH = re.compile('^/dev/disk/by-path/')
MAPPER_PATH = re.compile('^/dev/mapper/')
PRECEDENCE = [
MAPPER_PATH, DISK_BY_ID_PATH, DISK_BY_PATH_PATH,
re.compile('.+')
]
def get_idx(x):
return [index for index, v in enumerate(PRECEDENCE) if v.match(x)][0]
def compare(x, y):
idx1 = get_idx(x)
idx2 = get_idx(y)
if idx1 == idx2:
return 0
elif idx1 > idx2:
return 1
return -1
def sort_paths(xs):
return sorted(xs, cmp=compare)
def scanner_cmd(cmd):
client = socket.socket(socket.AF_UNIX)
client.settimeout(1)
client.connect_ex("/var/run/device-scanner.sock")
client.sendall(json.dumps({"ACTION": cmd}))
client.shutdown(socket.SHUT_WR)
out = ''
while True:
data = client.recv(1024)
size = len(data)
if size == 0:
break
out += data
return json.loads(out)
def get_default(prop, default_value, x):
y = x.get(prop, default_value)
return y if y is not None else default_value
def get_major_minor(x):
return "%s:%s" % (x.get('MAJOR'), x.get('MINOR'))
def as_device(x):
paths = sort_paths(get_default('PATHS', [], x))
path = next(iter(paths), None)
return {
'major_minor': get_major_minor(x),
'path': path,
'paths': paths,
'serial_80': x.get('IML_SCSI_80'),
'serial_83': x.get('IML_SCSI_83'),
'size': int(get_default('IML_SIZE', 0, x)) * 512,
'filesystem_type': x.get('ID_FS_TYPE'),
'device_type': x.get('DEVTYPE'),
'device_path': x.get('DEVPATH'),
'partition_number': x.get('ID_PART_ENTRY_NUMBER'),
'is_ro': x.get('IML_IS_RO'),
'parent': None
}
def get_parent_path(p):
return os.sep.join(p.split(os.sep)[0:-1])
def find_device_by_device_path(p, xs):
return next((d for d in xs if d['device_path'] == p), None)
def mutate_parent_prop(xs):
disks = [x for x in xs if x['device_type'] == 'disk']
partitions = [x for x in xs if x['device_type'] == 'partition']
for x in partitions:
parent_path = get_parent_path(x['device_path'])
device = find_device_by_device_path(parent_path, disks)
if device:
x['parent'] = device['major_minor']
def filter_device(x):
# Exclude zero-sized devices
if x['size'] == 0 or x['is_ro']:
return False
return True
def fetch_device_list():
AgentShell.run(["udevadm", "settle"])
info = scanner_cmd("info")
return pipe(info.itervalues(),
cmap(as_device), cfilter(filter_device), list)
def add_to_ndp(xs, ys):
for x in xs:
for y in ys:
ndp.add_normalized_device(x, y)
def build_ndp_from_device(x):
paths = x['paths']
dev_paths = filter(DEV_PATH.match, paths)
disk_by_id_paths = filter(DISK_BY_ID_PATH.match, paths)
disk_by_path_paths = filter(DISK_BY_PATH_PATH.match, paths)
mapper_paths = filter(MAPPER_PATH.match, paths)
add_to_ndp(dev_paths, disk_by_path_paths)
add_to_ndp(dev_paths, disk_by_id_paths)
add_to_ndp(disk_by_path_paths, mapper_paths)
add_to_ndp(disk_by_id_paths, mapper_paths)
class BlockDevices(object):
MAPPERPATH = os.path.join('/dev', 'mapper')
DISKBYIDPATH = os.path.join('/dev', 'disk', 'by-id')
def __init__(self):
(self.block_device_nodes,
self.node_block_devices) = self._parse_sys_block()
def _parse_sys_block(self):
xs = fetch_device_list()
mutate_parent_prop(xs)
node_block_devices = reduce(
lambda d, x: dict(d, **{x['path']: x['major_minor']}), xs, {})
block_device_nodes = reduce(
lambda d, x: dict(d, **{x['major_minor']: x}), xs, {})
map(build_ndp_from_device, xs)
return (block_device_nodes, node_block_devices)
def paths_to_major_minors(self, device_paths):
"""
Create a list of device major minors for a list of
device paths from _path_to_major_minor dict.
If any of the paths come back as None, continue to the next.
:param device_paths: The list of paths to get
the list of major minors for.
:return: list of dev_major_minors, or an empty
list if any device_path is not found.
"""
return pipe(device_paths,
cmap(self.path_to_major_minor), cfilter(None), list)
def path_to_major_minor(self, device_path):
""" Return device major minor for a given device path """
return self.node_block_devices.get(
ndp.normalized_device_path(device_path))
def composite_device_list(self, source_devices):
"""
This function takes a bunch of devices like MdRaid, EMCPower
which are effectively composite devices made up
from a collection of other devices and returns that
list with the drives and everything nicely assembled.
"""
devices = {}
for device in source_devices:
drive_mms = self.paths_to_major_minors(device['device_paths'])
if drive_mms:
devices[device['uuid']] = {
'path': device['path'],
'block_device': device['mm'],
'drives': drive_mms
}
# Finally add these devices to the canonical path list.
for device_path in device['device_paths']:
ndp.add_normalized_device(device_path, device['path'])
return devices
def find_block_devs(self, folder):
# Map of major_minor to path
# Should be able to look at the paths prop for all devs, and put
# matching MM to path back in a list.
def build_paths(x):
return [(x['major_minor'], path) for path in x['paths']
if path.startswith(folder)]
return pipe(self.block_device_nodes.itervalues(),
cmapcat(build_paths), dict)
@classmethod
def quick_scan(cls):
"""
Return a very quick list of block devices from
a number of sources so we can quickly see changes.
"""
return pipe(fetch_device_list(), cmapcat(getter("paths")), sorted) | 0.471223 | 0.146973 |
import time
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
# Resistance in ohms of the resistor
r = 1
# Capacitance of the capacitor
C = 1
Vin = 10
b = 0
old_v2 = 0
delta_t = 5e-3
t = 5e-3
# Initial guess for the first step (literally doesn't matter)
input_vec = [10, 5, -2]
results = [[],[]]
s = time.time()
while True:
# Load the variables from the input vector
v1 = input_vec[0]
v2 = input_vec[1]
iv = input_vec[2]
# Calculate the values in result vector
result_vector = [0, 0, 0]
result_vector[0] = (v1-v2)/r + iv
result_vector[1] = (v2-v1)/r + C * (v2-old_v2)/delta_t
result_vector[2] = v1 - Vin
# Create the Jacobian for this input vector
jac = np.array([[1/r,-1/r,1],[-1/r,1/r+C/delta_t,0],[1,0,0]])
inv_jac = np.linalg.inv(jac)
res = inv_jac @ np.array(result_vector)
old_input_vec = deepcopy(input_vec)
input_vec -= res
if np.allclose(old_input_vec, input_vec, rtol=0, atol=1e-10):
results[0].append(t)
results[1].append(v2)
if t>=1+b:
b += 1
Vin = 0 if Vin == 10 else 10
print(f"t = {t}")
print(input_vec)
print(result_vector)
print(time.time()-s)
#x = input()
if t >= 20:
fig, ax = plt.subplots(1, 1, figsize=(30,10))
ax.scatter(results[0], results[1])
plt.show()
peaks = []
b4 = -1
up = True
for i in results[1]:
if up:
if i > b4:
b4 = i
else:
peaks.append(b4)
up = False
else:
if i > b4:
up = True
print(peaks)
break
t += delta_t
old_v2 = v2
'''
inv_jac = jac.inv()
res = jac * sp.Matrix(result_vector)
input(res)
input_vec = input_vec - res
print(input_vec)
print(result_vector)
input()
''' | Initial Testing/Jacobian Capacitor Try.py |
import time
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
# Resistance in ohms of the resistor
r = 1
# Capacitance of the capacitor
C = 1
Vin = 10
b = 0
old_v2 = 0
delta_t = 5e-3
t = 5e-3
# Initial guess for the first step (literally doesn't matter)
input_vec = [10, 5, -2]
results = [[],[]]
s = time.time()
while True:
# Load the variables from the input vector
v1 = input_vec[0]
v2 = input_vec[1]
iv = input_vec[2]
# Calculate the values in result vector
result_vector = [0, 0, 0]
result_vector[0] = (v1-v2)/r + iv
result_vector[1] = (v2-v1)/r + C * (v2-old_v2)/delta_t
result_vector[2] = v1 - Vin
# Create the Jacobian for this input vector
jac = np.array([[1/r,-1/r,1],[-1/r,1/r+C/delta_t,0],[1,0,0]])
inv_jac = np.linalg.inv(jac)
res = inv_jac @ np.array(result_vector)
old_input_vec = deepcopy(input_vec)
input_vec -= res
if np.allclose(old_input_vec, input_vec, rtol=0, atol=1e-10):
results[0].append(t)
results[1].append(v2)
if t>=1+b:
b += 1
Vin = 0 if Vin == 10 else 10
print(f"t = {t}")
print(input_vec)
print(result_vector)
print(time.time()-s)
#x = input()
if t >= 20:
fig, ax = plt.subplots(1, 1, figsize=(30,10))
ax.scatter(results[0], results[1])
plt.show()
peaks = []
b4 = -1
up = True
for i in results[1]:
if up:
if i > b4:
b4 = i
else:
peaks.append(b4)
up = False
else:
if i > b4:
up = True
print(peaks)
break
t += delta_t
old_v2 = v2
'''
inv_jac = jac.inv()
res = jac * sp.Matrix(result_vector)
input(res)
input_vec = input_vec - res
print(input_vec)
print(result_vector)
input()
''' | 0.451568 | 0.561215 |
from bs4 import BeautifulSoup
import requests
import re
import wget
from subprocess import call
import argparse
# https://sdl-stickershop.line.naver.jp/stickershop/v1/sticker/17941023/IOS/sticker_animation@2x.png
# https://sdl-stickershop.line.naver.jp/stickershop/v1/sticker/17941023/android/sticker.png
def striphtml(data):
p = re.search(r'\(http.+\);',data)
return p.group(0)[1:-16].replace("android/sticker.png","IOS/sticker_popup.png")
def striphtml2(data):
p = re.search(r'\(http.+\);',data)
return p.group(0)[1:-16].replace("android/sticker.png","IOS/sticker_animation@2x.png")
def getNumber(data):
p = re.findall(r'\d+', data)
return p[1]
def crawl(product,isFull):
r = requests.get('https://store.line.me/stickershop/product/' + product + '/zh-Hant')
soup = BeautifulSoup(r.text, 'html.parser')
imageArray = soup.findAll("span", { "class" : "mdCMN09Image" })
i = 1
call(["mkdir",product])
for eachSpan in imageArray:
imageUrl = ""
tmpName = ""
if isFull:
imageUrl = striphtml(str(eachSpan))
tmpName = "sticker_popup"
else:
imageUrl = striphtml2(str(eachSpan))
tmpName = "sticker_animation"
# imageUrl = str(eachSpan)
print imageUrl
fileNumber = getNumber(str(imageUrl))
filename = wget.download(imageUrl)
saveFilename = filename.replace(".png","").replace(tmpName,fileNumber) + ".gif"
try:
temp = call(["./apng2gif", filename, "./" + product + "/" + saveFilename])
except Exception, e:
print "error"
print e;
print "temp"+str(temp)
call(["rm","-rf" ,filename])
i = i + 1
call(['zip',"-r",product + ".zip", "./" + product])
call(["rm","-rf" ,"./" + product])
def main():
product = parse_args().product
full = parse_args().full
crawl(product,full)
def parse_args():
parser = argparse.ArgumentParser(description='product')
parser.add_argument("-p", "--product", help="product number", type=str, required=True)
parser.add_argument("-f", "--full", help="Is full popup image?", type=bool, required=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
main() | line.py | from bs4 import BeautifulSoup
import requests
import re
import wget
from subprocess import call
import argparse
# https://sdl-stickershop.line.naver.jp/stickershop/v1/sticker/17941023/IOS/sticker_animation@2x.png
# https://sdl-stickershop.line.naver.jp/stickershop/v1/sticker/17941023/android/sticker.png
def striphtml(data):
p = re.search(r'\(http.+\);',data)
return p.group(0)[1:-16].replace("android/sticker.png","IOS/sticker_popup.png")
def striphtml2(data):
p = re.search(r'\(http.+\);',data)
return p.group(0)[1:-16].replace("android/sticker.png","IOS/sticker_animation@2x.png")
def getNumber(data):
p = re.findall(r'\d+', data)
return p[1]
def crawl(product,isFull):
r = requests.get('https://store.line.me/stickershop/product/' + product + '/zh-Hant')
soup = BeautifulSoup(r.text, 'html.parser')
imageArray = soup.findAll("span", { "class" : "mdCMN09Image" })
i = 1
call(["mkdir",product])
for eachSpan in imageArray:
imageUrl = ""
tmpName = ""
if isFull:
imageUrl = striphtml(str(eachSpan))
tmpName = "sticker_popup"
else:
imageUrl = striphtml2(str(eachSpan))
tmpName = "sticker_animation"
# imageUrl = str(eachSpan)
print imageUrl
fileNumber = getNumber(str(imageUrl))
filename = wget.download(imageUrl)
saveFilename = filename.replace(".png","").replace(tmpName,fileNumber) + ".gif"
try:
temp = call(["./apng2gif", filename, "./" + product + "/" + saveFilename])
except Exception, e:
print "error"
print e;
print "temp"+str(temp)
call(["rm","-rf" ,filename])
i = i + 1
call(['zip',"-r",product + ".zip", "./" + product])
call(["rm","-rf" ,"./" + product])
def main():
product = parse_args().product
full = parse_args().full
crawl(product,full)
def parse_args():
parser = argparse.ArgumentParser(description='product')
parser.add_argument("-p", "--product", help="product number", type=str, required=True)
parser.add_argument("-f", "--full", help="Is full popup image?", type=bool, required=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
main() | 0.188287 | 0.071009 |
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, Boolean, create_engine
Base = declarative_base()
engine = create_engine('sqlite:///db.db?check_same_thread=False')
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
state = Column(Integer, default=0)
language_code = Column(String, nullable=False)
telegram_id = Column(String, nullable=False)
name = Column(String, default='', nullable=False)
link = Column(String, default='', nullable=False)
work = Column(String, default='', nullable=False)
about = Column(String, default='', nullable=False)
companies = Column(String, default='', nullable=False)
is_active = Column(Boolean, default=True, nullable=False)
is_verified = Column(Boolean, default=False, nullable=False)
created_at = Column(DateTime, default=datetime.datetime.now)
updated_at = Column(DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now)
def __repr__(self):
return (f'<User {self.id} state:{self.state} telegram_id:{self.telegram_id} telegram_id:{self.created_at}>')
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
name = Column(String, default='', nullable=False, unique=True)
password = Column(String, nullable=False, unique=True)
company_admin = Column(String, default='', nullable=False)
created_at = Column(DateTime, default=datetime.datetime.now)
updated_at = Column(DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now)
def __repr__(self):
return (f'<Company {self.id} name:{self.name} company_admin:{self.company_admin} created_at:{self.created_at}>')
class Pair(Base):
__tablename__ = 'pair'
id = Column(Integer, primary_key=True)
user_a = Column(String, nullable=False)
user_b = Column(String, nullable=False)
company = Column(String, nullable=False)
paired_at = Column(DateTime, nullable=False)
created_at = Column(
DateTime, default=datetime.datetime.now, nullable=False)
updated_at = Column(DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now, nullable=False)
def __repr__(self):
return (f'<Pair {self.id} user_a:{self.user_a} user_b:{self.user_b} paired_at:{self.paired_at}>')
Base.metadata.create_all(engine) | src/backend/models.py | import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, Boolean, create_engine
Base = declarative_base()
engine = create_engine('sqlite:///db.db?check_same_thread=False')
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
state = Column(Integer, default=0)
language_code = Column(String, nullable=False)
telegram_id = Column(String, nullable=False)
name = Column(String, default='', nullable=False)
link = Column(String, default='', nullable=False)
work = Column(String, default='', nullable=False)
about = Column(String, default='', nullable=False)
companies = Column(String, default='', nullable=False)
is_active = Column(Boolean, default=True, nullable=False)
is_verified = Column(Boolean, default=False, nullable=False)
created_at = Column(DateTime, default=datetime.datetime.now)
updated_at = Column(DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now)
def __repr__(self):
return (f'<User {self.id} state:{self.state} telegram_id:{self.telegram_id} telegram_id:{self.created_at}>')
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
name = Column(String, default='', nullable=False, unique=True)
password = Column(String, nullable=False, unique=True)
company_admin = Column(String, default='', nullable=False)
created_at = Column(DateTime, default=datetime.datetime.now)
updated_at = Column(DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now)
def __repr__(self):
return (f'<Company {self.id} name:{self.name} company_admin:{self.company_admin} created_at:{self.created_at}>')
class Pair(Base):
__tablename__ = 'pair'
id = Column(Integer, primary_key=True)
user_a = Column(String, nullable=False)
user_b = Column(String, nullable=False)
company = Column(String, nullable=False)
paired_at = Column(DateTime, nullable=False)
created_at = Column(
DateTime, default=datetime.datetime.now, nullable=False)
updated_at = Column(DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now, nullable=False)
def __repr__(self):
return (f'<Pair {self.id} user_a:{self.user_a} user_b:{self.user_b} paired_at:{self.paired_at}>')
Base.metadata.create_all(engine) | 0.55447 | 0.113064 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', models.CharField(max_length=10)),
('pincode', models.CharField(max_length=7)),
('address', models.CharField(max_length=256)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Hotel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('gst', models.DecimalField(decimal_places=2, max_digits=4)),
('hoteladmin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.CustomUser')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.SmallIntegerField(default=1)),
('order_time', models.DateTimeField()),
('delivery_time', models.DateTimeField(blank=True, null=True)),
('amount', models.DecimalField(decimal_places=2, max_digits=15)),
('order_type', models.SmallIntegerField(default=1)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='foodcartapp.CustomUser')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('half_price', models.DecimalField(decimal_places=2, max_digits=8)),
('full_price', models.DecimalField(decimal_places=2, max_digits=8)),
('availabilty', models.BooleanField(default=True)),
('image', models.ImageField(upload_to='')),
('special_status', models.BooleanField(default=False)),
('category', models.CharField(max_length=50)),
('hotel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.Hotel')),
],
),
migrations.CreateModel(
name='OrderDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.DecimalField(decimal_places=2, max_digits=8)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.Product')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('pincode', models.CharField(max_length=7)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.City')),
],
),
migrations.AddField(
model_name='hotel',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.Location'),
),
] | foodcartapp/migrations/0001_initial.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', models.CharField(max_length=10)),
('pincode', models.CharField(max_length=7)),
('address', models.CharField(max_length=256)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Hotel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('gst', models.DecimalField(decimal_places=2, max_digits=4)),
('hoteladmin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.CustomUser')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.SmallIntegerField(default=1)),
('order_time', models.DateTimeField()),
('delivery_time', models.DateTimeField(blank=True, null=True)),
('amount', models.DecimalField(decimal_places=2, max_digits=15)),
('order_type', models.SmallIntegerField(default=1)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='foodcartapp.CustomUser')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('half_price', models.DecimalField(decimal_places=2, max_digits=8)),
('full_price', models.DecimalField(decimal_places=2, max_digits=8)),
('availabilty', models.BooleanField(default=True)),
('image', models.ImageField(upload_to='')),
('special_status', models.BooleanField(default=False)),
('category', models.CharField(max_length=50)),
('hotel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.Hotel')),
],
),
migrations.CreateModel(
name='OrderDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.DecimalField(decimal_places=2, max_digits=8)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.Product')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('pincode', models.CharField(max_length=7)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.City')),
],
),
migrations.AddField(
model_name='hotel',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodcartapp.Location'),
),
] | 0.570451 | 0.132038 |
import core
# +---+ +---+
# | X | | Y |
# +---+ +---+
def bn_independent():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y'])
return g
# +---+ +---+
# | X |---->| Y |
# +---+ +---+
def bn_dependent():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y'])
g.add_edge('X', 'Y')
return g
# +---+ +---+ +---+
# | X |---->| Y |---->| Z |
# +---+ +---+ +---+
def bn_chain():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y', 'Z'])
g.add_edges_from([('X', 'Y'), ('Y', 'Z')])
return g
# +---+ +---+ +---+
# | Y |<----| X |---->| Z |
# +---+ +---+ +---+
def bn_naive_bayes():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y', 'Z'])
g.add_edges_from([('X', 'Y'), ('X', 'Z')])
return g
# +---+ +---+ +---+
# | X |---->| Z |<----| Y |
# +---+ +---+ +---+
def bn_v_structure():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y', 'Z'])
g.add_edges_from([('X', 'Z'), ('Y', 'Z')])
return g
# +---+
# | X |
# +---+
# |
# v
# +---+ +---+
# | Y |<----| W |
# +---+ +---+
# | |
# v |
# +---+ |
# | Z |<------+
# +---+
def bn_koller():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y', 'W', 'Z'])
g.add_edges_from([('X', 'Y'), ('W', 'Y'), ('W', 'Z'), ('Y', 'Z')])
return g
# +------------+ +---------+
# | Earthquake | | Burglar |
# +------------+ +---------+
# | | |
# | | +-------+ |
# | +---->| Alarm |<----+
# v +-------+
# +-------+ |
# | Radio | |
# +-------+ v
# +-------+
# | Phone |
# +-------+
def bn_earthquake():
g = core.BayesNet()
g.add_nodes_from(['Earthquake', 'Burglar', 'Alarm', 'Radio', 'Phone'])
g.add_edges_from([('Earthquake', 'Radio'),
('Earthquake', 'Alarm'),
('Burglar', 'Alarm'),
('Alarm', 'Phone')])
return g | Bayesian Networks/gibbs sampling/examples_dsep.py | import core
# +---+ +---+
# | X | | Y |
# +---+ +---+
def bn_independent():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y'])
return g
# +---+ +---+
# | X |---->| Y |
# +---+ +---+
def bn_dependent():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y'])
g.add_edge('X', 'Y')
return g
# +---+ +---+ +---+
# | X |---->| Y |---->| Z |
# +---+ +---+ +---+
def bn_chain():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y', 'Z'])
g.add_edges_from([('X', 'Y'), ('Y', 'Z')])
return g
# +---+ +---+ +---+
# | Y |<----| X |---->| Z |
# +---+ +---+ +---+
def bn_naive_bayes():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y', 'Z'])
g.add_edges_from([('X', 'Y'), ('X', 'Z')])
return g
# +---+ +---+ +---+
# | X |---->| Z |<----| Y |
# +---+ +---+ +---+
def bn_v_structure():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y', 'Z'])
g.add_edges_from([('X', 'Z'), ('Y', 'Z')])
return g
# +---+
# | X |
# +---+
# |
# v
# +---+ +---+
# | Y |<----| W |
# +---+ +---+
# | |
# v |
# +---+ |
# | Z |<------+
# +---+
def bn_koller():
g = core.BayesNet()
g.add_nodes_from(['X', 'Y', 'W', 'Z'])
g.add_edges_from([('X', 'Y'), ('W', 'Y'), ('W', 'Z'), ('Y', 'Z')])
return g
# +------------+ +---------+
# | Earthquake | | Burglar |
# +------------+ +---------+
# | | |
# | | +-------+ |
# | +---->| Alarm |<----+
# v +-------+
# +-------+ |
# | Radio | |
# +-------+ v
# +-------+
# | Phone |
# +-------+
def bn_earthquake():
g = core.BayesNet()
g.add_nodes_from(['Earthquake', 'Burglar', 'Alarm', 'Radio', 'Phone'])
g.add_edges_from([('Earthquake', 'Radio'),
('Earthquake', 'Alarm'),
('Burglar', 'Alarm'),
('Alarm', 'Phone')])
return g | 0.53777 | 0.170335 |
import datetime
from django.core.management import call_command
from django.urls import reverse
from dateutil.relativedelta import relativedelta
from django.utils.timezone import now, get_current_timezone
from factory.fuzzy import FuzzyDate
from django_countries.fields import Country as DjangoCountry
from freezegun import freeze_time
from jmespath import search as s
from fixturedb.factories.win import create_win_factory
from mi.models import Country
from mi.tests.base_test_case import (
MiApiViewsWithWinsBaseTestCase,
MiApiViewsBaseTestCase
)
from mi.utils import sort_campaigns_by
from mi.tests.utils import GenericTopNonHvcWinsTestMixin, GenericWinTableTestMixin, GenericMonthlyViewTestCase
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class CountryBaseViewTestCase(MiApiViewsWithWinsBaseTestCase):
export_value = 100000
win_date_2017 = datetime.datetime(2017, 4, 25, tzinfo=get_current_timezone())
win_date_2016 = datetime.datetime(2016, 5, 25, tzinfo=get_current_timezone())
fy_2016_last_date = datetime.datetime(2017, 3, 31, tzinfo=get_current_timezone())
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def get_url_for_year(self, year, base_url=None):
if not base_url:
base_url = self.view_base_url
return '{base}?year={year}'.format(base=base_url, year=year)
class CountryDetailTestCase(CountryBaseViewTestCase):
TEST_COUNTRY_CODE = "FR"
country_detail_url = reverse(
'mi:country_detail', kwargs={"country_code": "FR"})
country_detail_url_invalid = reverse(
'mi:country_detail', kwargs={"country_code": "ABC"})
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(self.user, sector_choices=self.TEAM_1_SECTORS)
self.view_base_url = self.country_detail_url
def test_2017_detail_in_2016_404(self):
self.view_base_url = self.country_detail_url_invalid
self.url = self.get_url_for_year(2016)
self._get_api_response(self.url, status_code=404)
def test_2016_detail_in_2017_404(self):
self.view_base_url = self.country_detail_url_invalid
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
def test_detail_json_2016_no_wins(self):
self.url = self.get_url_for_year(2016)
self.expected_response = {
"name": "France",
"id": "FR",
"wins": {
"export": {
"totals": {
"number": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"non_hvc": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"hvc": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
}
},
"non_export": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
}
},
"hvcs": {
"campaigns": [
'HVC: E045',
'HVC: E046',
'HVC: E047',
'HVC: E048',
'HVC: E214'
],
"target": 50000000
},
"avg_time_to_confirm": 0.0
}
self.assertResponse()
def test_detail_json_2017_no_wins(self):
self.url = self.get_url_for_year(2017)
self.expected_response = {
"name": "France",
"id": "FR",
"wins": {
"export": {
"totals": {
"number": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"non_hvc": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"hvc": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
}
},
"non_export": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
}
},
"hvcs": {
"campaigns": ['E04517', 'E04617', 'E04717'],
"target": 110000000
},
"avg_time_to_confirm": 0.0
}
self.assertResponse()
def test_detail_one_confirmed_2016_hvc_win_doesnt_appear_in_2016(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_2017_one_confirmed_hvc_win(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_one_confirmed_2016_hvc_win_doesnt_appear_in_2017(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_one_unconfirmed_2016_hvc_win_appear_in_2017(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2016,
confirm=False,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["confirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["totals"]
["value"]["unconfirmed"], self.export_value)
def test_detail_one_2016_common_hvc_win_confirmed_in_2017_appear_in_2017(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_2016_only_hvc_win_confirmed_in_2017_appear_in_2017(self):
self._create_hvc_win(
hvc_code='E046',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_2017_one_confirmed_diff_hvc_win_for_FR_appears(self):
self._create_hvc_win(
hvc_code='E001',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_FR_hvc_win_but_not_FR_doesnt_appear(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='CA'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_one_confirmed_2016_non_hvc_win_doesnt_appear_in_2016(self):
self._create_non_hvc_win(
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_2017_one_confirmed_non_hvc_win(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class CountriesMonthsTestCase(CountryBaseViewTestCase, GenericMonthlyViewTestCase):
export_value = 123456
TEST_CAMPAIGN_ID = 'E045'
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.test_country = Country.objects.get(country='FR')
self.view_base_url = reverse('mi:country_monthly', kwargs={
'country_code': self.test_country.country})
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class CountryCampaignsTestCase(CountryBaseViewTestCase):
list_countries_base_url = reverse('mi:countries')
view_base_url = reverse('mi:country_campaigns', kwargs={
'country_code': "FR"})
CEN_2016_HVCS = ["E045", "E046", "E047", "E048", "E214"]
CEN_2017_HVCS = ["E045", "E046", "E047", "E054", "E119", "E225"]
FR_2017_HVCS = ["E045", "E046", "E047"]
TEST_CAMPAIGN_ID = "E045"
TARGET_E017 = 10000000
PRORATED_TARGET = 833333 # target based on the frozen date
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.url = self.get_url_for_year(2017)
def test_campaigns_list_2016(self):
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(
api_response["hvcs"]["campaigns"]))
self.assertEqual(
len(api_response["campaigns"]), len(self.CEN_2016_HVCS))
def test_campaigns_2016_no_duplicates(self):
list_countries_url = self.get_url_for_year(
year=2016, base_url=self.list_countries_base_url)
all_countries = self._get_api_response(
list_countries_url).data["results"]
for country in all_countries:
country_url = reverse('mi:country_campaigns',
kwargs={"country_code": country["id"]})
self.url = self.get_url_for_year(2016, base_url=country_url)
api_response = self._api_response_data
for campaign in api_response["campaigns"]:
dups = s("campaigns[?campaign_id=='{}'].campaign".format(
campaign["campaign_id"]), api_response)
self.assertTrue(len(dups) == 1)
def test_campaigns_list_2017(self):
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_list_2017_no_duplicates(self):
list_countries_url = self.get_url_for_year(
year=2017, base_url=self.list_countries_base_url)
all_countries = self._get_api_response(
list_countries_url).data["results"]
for country in all_countries:
country_url = reverse('mi:country_campaigns',
kwargs={"country_code": country["id"]})
self.url = self.get_url_for_year(2017, base_url=country_url)
api_response = self._api_response_data
for campaign in api_response["campaigns"]:
dups = s("campaigns[?campaign_id=='{}'].campaign".format(
campaign["campaign_id"]), api_response)
self.assertTrue(len(dups) == 1)
def test_campaigns_json_2016_no_wins(self):
self.url = self.get_url_for_year(2016)
self.expected_response = {
"campaigns": [],
"name": "France",
"id": "FR",
"hvcs": {
"campaigns": [
"HVC: E045",
"HVC: E046",
"HVC: E047",
"HVC: E048",
"HVC: E214",
],
"target": self.CAMPAIGN_TARGET * len(self.CEN_2016_HVCS)
},
"avg_time_to_confirm": 0
}
campaigns = []
for hvc_code in self.CEN_2016_HVCS:
campaigns.append({
"campaign": "HVC",
"campaign_id": hvc_code,
"totals": {
"hvc": {
"value": {
"unconfirmed": 0,
"confirmed": 0,
"total": 0
},
"number": {
"unconfirmed": 0,
"confirmed": 0,
"total": 0
}
},
"change": "up",
"progress": {
"unconfirmed_percent": 0,
"confirmed_percent": 0,
"status": "red"
},
"target": self.CAMPAIGN_TARGET
}
})
self.expected_response["campaigns"] = sorted(
campaigns, key=sort_campaigns_by, reverse=True)
self.assertResponse()
def test_avg_time_to_confirm_unconfirmed_wins(self):
""" Average time to confirm will be zero, if there are no confirmed wins """
for hvc_code in self.CEN_2016_HVCS:
self._create_hvc_win(
hvc_code=hvc_code, confirm=False, country="FR")
api_response = self._api_response_data
expected_avg_time = 0.0
response_avg_time = api_response["avg_time_to_confirm"]
self.assertEqual(expected_avg_time, response_avg_time)
def test_avg_time_to_confirm_wins_confirmed_nextday(self):
""" Test average time to confirm when all wins confirmed in one day """
for hvc_code in self.CEN_2016_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
notify_date=self.win_date_2017,
response_date=self.win_date_2017 + datetime.timedelta(days=1),
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
expected_avg_time = 1.0
response_avg_time = api_response["avg_time_to_confirm"]
self.assertEqual(expected_avg_time, response_avg_time)
def test_avg_time_to_confirm_wins_confirmed_randomly(self):
"""
Average time to confirm should be more than one,
when wins took more than one day to be confirmed
"""
for hvc_code in self.CEN_2016_HVCS:
response_date = FuzzyDate(datetime.datetime(2017, 5, 27),
datetime.datetime(2017, 5, 31)).evaluate(2, None, False)
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
notify_date=self.win_date_2017,
response_date=response_date,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
response_avg_time = api_response["avg_time_to_confirm"]
self.assertTrue(response_avg_time > 1.0)
def test_campaigns_count_no_wins(self):
""" Make sure number of campaigns returned have no effect when there are no wins """
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_count_unconfirmed_wins(self):
""" unconfirmed wins shouldn't have any effect on number of campaigns """
for hvc_code in self.CEN_2017_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_count_confirmed_wins(self):
""" confirmed HVC wins shouldn't have any effect on number of campaigns """
for hvc_code in self.CEN_2017_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_count_unconfirmed_nonhvc_wins(self):
""" unconfirmed non-hvc wins shouldn't have any effect on number of campaigns """
for _ in self.CEN_2017_HVCS:
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_count_confirmed_nonhvc_wins(self):
""" confirmed non-hvc wins shouldn't have any effect on number of campaigns """
for _ in self.CEN_2017_HVCS:
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country="FR"
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaign_progress_colour_no_wins(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are no wins """
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_unconfirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are no confirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_confirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are not enough confirmed wins """
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_nonhvc_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are only non-hvc wins """
for _ in range(1, 11):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_nonhvc_confirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are only confirmed non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_confirmed_wins_amber(self):
"""
Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 30 / 100
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_50_green(self):
""" Progress colour should be green if there are enough win to take runrate past 45% """
for _ in range(1, 5):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=3000000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "green")
def test_campaign_progress_confirmed_wins_45_green(self):
""" Boundary Testing for Green:
Progress colour should be green if there are enough win to take runrate past 45% """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=791700,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "green")
def test_campaign_progress_confirmed_wins_44_amber(self):
"""
Boundary testing for Amber: Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 44 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val / 10,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_25_amber(self):
"""
Boundary testing for Amber: Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 25 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val / 10,
country='FR'
)
self._create_hvc_win(hvc_code=self.TEST_CAMPAIGN_ID,
export_value=146700, confirm=True)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_24_red(self):
""" Boundary testing for red: Anything less than 25% runrate of progress should be Red """
export_val = self.PRORATED_TARGET * 24 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val / 10,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_percent_no_wins(self):
""" Progress percentage will be 0, if there are no confirmed HVC wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_unconfirmed_wins(self):
""" Progress percentage will be 0, if there are no confirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10.0)
def test_campaign_progress_percent_confirmed_wins_1(self):
""" Test simple progress percent """
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 1.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_nonhvc_wins(self):
""" Non hvc wins shouldn't effect progress percent """
for _ in range(1, 11):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_nonhvc_confirmed_wins(self):
""" Non hvc confirmed wins shouldn't effect progress percent """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_confirmed_wins_20(self):
""" Check 20% progress percent """
for _ in range(1, 3):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=1000000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 20.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_hvc_number_no_wins(self):
""" HVC number shouldn't be affected when there are no wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_only_nonhvc_wins(self):
""" HVC number shouldn't be affected when there are only non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_only_nonhvc_confirmed_wins(self):
""" HVC number shouldn't be affected when there are only confirmed non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_unconfirmed_wins(self):
""" Check HVC number with unconfirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
def test_campaign_hvc_number_confirmed_wins(self):
""" Check HVC number with confirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
def test_campaign_hvc_number_mixed_wins(self):
""" Check HVC numbers with both confirmed and unconfirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 20)
def test_campaign_hvc_value_no_wins(self):
""" HVC value will be 0 with there are no wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_only_nonhvc_wins(self):
""" HVC value will be 0 there are only unconfirmed non-HVC wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_only_nonhvc_confirmed_wins(self):
""" HVC value will be 0 when there are only confirmed non-HVC wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_unconfirmed_wins(self):
""" Check HVC value when there are unconfirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
def test_campaign_hvc_value_confirmed_wins(self):
""" Check HVC value when there are confirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
def test_campaign_hvc_value_mixed_wins(self):
""" Check HVC value when there are both confirmed and unconfirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 6000000)
class CountryTopNonHVCViewTestCase(CountryBaseViewTestCase, GenericTopNonHvcWinsTestMixin):
export_value = 9992
TEST_COUNTRY_CODE = "FR"
country_top_nonhvc_url = reverse(
'mi:country_top_nonhvc', kwargs={"country_code": "FR"})
country_topnonhvc_url_invalid = reverse(
'mi:country_top_nonhvc', kwargs={"country_code": "ABC"})
country_topnonhvc_url_missing_country_kwarg = reverse(
'mi:country_top_nonhvc', kwargs={"country_code": None})
fin_years = [2016, 2017]
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.view_base_url = self.country_top_nonhvc_url
def test_fake_country_404(self):
self.view_base_url = self.country_topnonhvc_url_invalid
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
def test_missing_country_404(self):
self.view_base_url = self.country_topnonhvc_url_missing_country_kwarg
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
class CountryWinTableTestCase(CountryBaseViewTestCase, GenericWinTableTestMixin):
TEST_COUNTRY_CODE = 'FR'
TEST_COUNTRY = DjangoCountry(TEST_COUNTRY_CODE)
fin_years = [2016, 2017]
expected_response = {
"country": {
"id": TEST_COUNTRY_CODE,
"name": TEST_COUNTRY.name,
},
"wins": {
"hvc": []
}
}
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.country_win_table_url = reverse('mi:country_win_table', kwargs={
'country_code': self.TEST_COUNTRY_CODE
})
self.country_win_table_url_invalid = reverse('mi:country_win_table', kwargs={
'country_code': 'XX'
})
self.view_base_url = self.country_win_table_url | mi/tests/test_country_views.py | import datetime
from django.core.management import call_command
from django.urls import reverse
from dateutil.relativedelta import relativedelta
from django.utils.timezone import now, get_current_timezone
from factory.fuzzy import FuzzyDate
from django_countries.fields import Country as DjangoCountry
from freezegun import freeze_time
from jmespath import search as s
from fixturedb.factories.win import create_win_factory
from mi.models import Country
from mi.tests.base_test_case import (
MiApiViewsWithWinsBaseTestCase,
MiApiViewsBaseTestCase
)
from mi.utils import sort_campaigns_by
from mi.tests.utils import GenericTopNonHvcWinsTestMixin, GenericWinTableTestMixin, GenericMonthlyViewTestCase
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class CountryBaseViewTestCase(MiApiViewsWithWinsBaseTestCase):
export_value = 100000
win_date_2017 = datetime.datetime(2017, 4, 25, tzinfo=get_current_timezone())
win_date_2016 = datetime.datetime(2016, 5, 25, tzinfo=get_current_timezone())
fy_2016_last_date = datetime.datetime(2017, 3, 31, tzinfo=get_current_timezone())
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def get_url_for_year(self, year, base_url=None):
if not base_url:
base_url = self.view_base_url
return '{base}?year={year}'.format(base=base_url, year=year)
class CountryDetailTestCase(CountryBaseViewTestCase):
TEST_COUNTRY_CODE = "FR"
country_detail_url = reverse(
'mi:country_detail', kwargs={"country_code": "FR"})
country_detail_url_invalid = reverse(
'mi:country_detail', kwargs={"country_code": "ABC"})
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(self.user, sector_choices=self.TEAM_1_SECTORS)
self.view_base_url = self.country_detail_url
def test_2017_detail_in_2016_404(self):
self.view_base_url = self.country_detail_url_invalid
self.url = self.get_url_for_year(2016)
self._get_api_response(self.url, status_code=404)
def test_2016_detail_in_2017_404(self):
self.view_base_url = self.country_detail_url_invalid
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
def test_detail_json_2016_no_wins(self):
self.url = self.get_url_for_year(2016)
self.expected_response = {
"name": "France",
"id": "FR",
"wins": {
"export": {
"totals": {
"number": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"non_hvc": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"hvc": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
}
},
"non_export": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
}
},
"hvcs": {
"campaigns": [
'HVC: E045',
'HVC: E046',
'HVC: E047',
'HVC: E048',
'HVC: E214'
],
"target": 50000000
},
"avg_time_to_confirm": 0.0
}
self.assertResponse()
def test_detail_json_2017_no_wins(self):
self.url = self.get_url_for_year(2017)
self.expected_response = {
"name": "France",
"id": "FR",
"wins": {
"export": {
"totals": {
"number": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"non_hvc": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"hvc": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
}
},
"non_export": {
"number": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"value": {
"total": 0,
"unconfirmed": 0,
"confirmed": 0
}
}
},
"hvcs": {
"campaigns": ['E04517', 'E04617', 'E04717'],
"target": 110000000
},
"avg_time_to_confirm": 0.0
}
self.assertResponse()
def test_detail_one_confirmed_2016_hvc_win_doesnt_appear_in_2016(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_2017_one_confirmed_hvc_win(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_one_confirmed_2016_hvc_win_doesnt_appear_in_2017(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_one_unconfirmed_2016_hvc_win_appear_in_2017(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2016,
confirm=False,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["confirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["totals"]
["value"]["unconfirmed"], self.export_value)
def test_detail_one_2016_common_hvc_win_confirmed_in_2017_appear_in_2017(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_2016_only_hvc_win_confirmed_in_2017_appear_in_2017(self):
self._create_hvc_win(
hvc_code='E046',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_2017_one_confirmed_diff_hvc_win_for_FR_appears(self):
self._create_hvc_win(
hvc_code='E001',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_FR_hvc_win_but_not_FR_doesnt_appear(self):
self._create_hvc_win(
hvc_code='E045',
win_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='CA'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_one_confirmed_2016_non_hvc_win_doesnt_appear_in_2016(self):
self._create_non_hvc_win(
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
def test_detail_2017_one_confirmed_non_hvc_win(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["hvcs"]["campaigns"]) > 0)
self.assertEqual(api_response["name"], "France")
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["confirmed"], 1)
self.assertEqual(api_response["wins"]["export"]
["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(
api_response["wins"]["export"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(api_response["wins"]["export"]
["totals"]["value"]["unconfirmed"], 0)
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class CountriesMonthsTestCase(CountryBaseViewTestCase, GenericMonthlyViewTestCase):
export_value = 123456
TEST_CAMPAIGN_ID = 'E045'
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.test_country = Country.objects.get(country='FR')
self.view_base_url = reverse('mi:country_monthly', kwargs={
'country_code': self.test_country.country})
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class CountryCampaignsTestCase(CountryBaseViewTestCase):
list_countries_base_url = reverse('mi:countries')
view_base_url = reverse('mi:country_campaigns', kwargs={
'country_code': "FR"})
CEN_2016_HVCS = ["E045", "E046", "E047", "E048", "E214"]
CEN_2017_HVCS = ["E045", "E046", "E047", "E054", "E119", "E225"]
FR_2017_HVCS = ["E045", "E046", "E047"]
TEST_CAMPAIGN_ID = "E045"
TARGET_E017 = 10000000
PRORATED_TARGET = 833333 # target based on the frozen date
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.url = self.get_url_for_year(2017)
def test_campaigns_list_2016(self):
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(
api_response["hvcs"]["campaigns"]))
self.assertEqual(
len(api_response["campaigns"]), len(self.CEN_2016_HVCS))
def test_campaigns_2016_no_duplicates(self):
list_countries_url = self.get_url_for_year(
year=2016, base_url=self.list_countries_base_url)
all_countries = self._get_api_response(
list_countries_url).data["results"]
for country in all_countries:
country_url = reverse('mi:country_campaigns',
kwargs={"country_code": country["id"]})
self.url = self.get_url_for_year(2016, base_url=country_url)
api_response = self._api_response_data
for campaign in api_response["campaigns"]:
dups = s("campaigns[?campaign_id=='{}'].campaign".format(
campaign["campaign_id"]), api_response)
self.assertTrue(len(dups) == 1)
def test_campaigns_list_2017(self):
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_list_2017_no_duplicates(self):
list_countries_url = self.get_url_for_year(
year=2017, base_url=self.list_countries_base_url)
all_countries = self._get_api_response(
list_countries_url).data["results"]
for country in all_countries:
country_url = reverse('mi:country_campaigns',
kwargs={"country_code": country["id"]})
self.url = self.get_url_for_year(2017, base_url=country_url)
api_response = self._api_response_data
for campaign in api_response["campaigns"]:
dups = s("campaigns[?campaign_id=='{}'].campaign".format(
campaign["campaign_id"]), api_response)
self.assertTrue(len(dups) == 1)
def test_campaigns_json_2016_no_wins(self):
self.url = self.get_url_for_year(2016)
self.expected_response = {
"campaigns": [],
"name": "France",
"id": "FR",
"hvcs": {
"campaigns": [
"HVC: E045",
"HVC: E046",
"HVC: E047",
"HVC: E048",
"HVC: E214",
],
"target": self.CAMPAIGN_TARGET * len(self.CEN_2016_HVCS)
},
"avg_time_to_confirm": 0
}
campaigns = []
for hvc_code in self.CEN_2016_HVCS:
campaigns.append({
"campaign": "HVC",
"campaign_id": hvc_code,
"totals": {
"hvc": {
"value": {
"unconfirmed": 0,
"confirmed": 0,
"total": 0
},
"number": {
"unconfirmed": 0,
"confirmed": 0,
"total": 0
}
},
"change": "up",
"progress": {
"unconfirmed_percent": 0,
"confirmed_percent": 0,
"status": "red"
},
"target": self.CAMPAIGN_TARGET
}
})
self.expected_response["campaigns"] = sorted(
campaigns, key=sort_campaigns_by, reverse=True)
self.assertResponse()
def test_avg_time_to_confirm_unconfirmed_wins(self):
""" Average time to confirm will be zero, if there are no confirmed wins """
for hvc_code in self.CEN_2016_HVCS:
self._create_hvc_win(
hvc_code=hvc_code, confirm=False, country="FR")
api_response = self._api_response_data
expected_avg_time = 0.0
response_avg_time = api_response["avg_time_to_confirm"]
self.assertEqual(expected_avg_time, response_avg_time)
def test_avg_time_to_confirm_wins_confirmed_nextday(self):
""" Test average time to confirm when all wins confirmed in one day """
for hvc_code in self.CEN_2016_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
notify_date=self.win_date_2017,
response_date=self.win_date_2017 + datetime.timedelta(days=1),
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
expected_avg_time = 1.0
response_avg_time = api_response["avg_time_to_confirm"]
self.assertEqual(expected_avg_time, response_avg_time)
def test_avg_time_to_confirm_wins_confirmed_randomly(self):
"""
Average time to confirm should be more than one,
when wins took more than one day to be confirmed
"""
for hvc_code in self.CEN_2016_HVCS:
response_date = FuzzyDate(datetime.datetime(2017, 5, 27),
datetime.datetime(2017, 5, 31)).evaluate(2, None, False)
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
notify_date=self.win_date_2017,
response_date=response_date,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
response_avg_time = api_response["avg_time_to_confirm"]
self.assertTrue(response_avg_time > 1.0)
def test_campaigns_count_no_wins(self):
""" Make sure number of campaigns returned have no effect when there are no wins """
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_count_unconfirmed_wins(self):
""" unconfirmed wins shouldn't have any effect on number of campaigns """
for hvc_code in self.CEN_2017_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_count_confirmed_wins(self):
""" confirmed HVC wins shouldn't have any effect on number of campaigns """
for hvc_code in self.CEN_2017_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_count_unconfirmed_nonhvc_wins(self):
""" unconfirmed non-hvc wins shouldn't have any effect on number of campaigns """
for _ in self.CEN_2017_HVCS:
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaigns_count_confirmed_nonhvc_wins(self):
""" confirmed non-hvc wins shouldn't have any effect on number of campaigns """
for _ in self.CEN_2017_HVCS:
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country="FR"
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), 5)
def test_campaign_progress_colour_no_wins(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are no wins """
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_unconfirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are no confirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_confirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are not enough confirmed wins """
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_nonhvc_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are only non-hvc wins """
for _ in range(1, 11):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_nonhvc_confirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are only confirmed non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_confirmed_wins_amber(self):
"""
Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 30 / 100
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_50_green(self):
""" Progress colour should be green if there are enough win to take runrate past 45% """
for _ in range(1, 5):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=3000000,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "green")
def test_campaign_progress_confirmed_wins_45_green(self):
""" Boundary Testing for Green:
Progress colour should be green if there are enough win to take runrate past 45% """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=791700,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "green")
def test_campaign_progress_confirmed_wins_44_amber(self):
"""
Boundary testing for Amber: Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 44 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val / 10,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_25_amber(self):
"""
Boundary testing for Amber: Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 25 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val / 10,
country='FR'
)
self._create_hvc_win(hvc_code=self.TEST_CAMPAIGN_ID,
export_value=146700, confirm=True)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_24_red(self):
""" Boundary testing for red: Anything less than 25% runrate of progress should be Red """
export_val = self.PRORATED_TARGET * 24 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val / 10,
country='FR'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_percent_no_wins(self):
""" Progress percentage will be 0, if there are no confirmed HVC wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_unconfirmed_wins(self):
""" Progress percentage will be 0, if there are no confirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10.0)
def test_campaign_progress_percent_confirmed_wins_1(self):
""" Test simple progress percent """
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 1.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_nonhvc_wins(self):
""" Non hvc wins shouldn't effect progress percent """
for _ in range(1, 11):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_nonhvc_confirmed_wins(self):
""" Non hvc confirmed wins shouldn't effect progress percent """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_confirmed_wins_20(self):
""" Check 20% progress percent """
for _ in range(1, 3):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=1000000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 20.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_hvc_number_no_wins(self):
""" HVC number shouldn't be affected when there are no wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_only_nonhvc_wins(self):
""" HVC number shouldn't be affected when there are only non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_only_nonhvc_confirmed_wins(self):
""" HVC number shouldn't be affected when there are only confirmed non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_unconfirmed_wins(self):
""" Check HVC number with unconfirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
def test_campaign_hvc_number_confirmed_wins(self):
""" Check HVC number with confirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
def test_campaign_hvc_number_mixed_wins(self):
""" Check HVC numbers with both confirmed and unconfirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 20)
def test_campaign_hvc_value_no_wins(self):
""" HVC value will be 0 with there are no wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_only_nonhvc_wins(self):
""" HVC value will be 0 there are only unconfirmed non-HVC wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_only_nonhvc_confirmed_wins(self):
""" HVC value will be 0 when there are only confirmed non-HVC wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_unconfirmed_wins(self):
""" Check HVC value when there are unconfirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
def test_campaign_hvc_value_confirmed_wins(self):
""" Check HVC value when there are confirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
def test_campaign_hvc_value_mixed_wins(self):
""" Check HVC value when there are both confirmed and unconfirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='FR'
)
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='FR'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 6000000)
class CountryTopNonHVCViewTestCase(CountryBaseViewTestCase, GenericTopNonHvcWinsTestMixin):
export_value = 9992
TEST_COUNTRY_CODE = "FR"
country_top_nonhvc_url = reverse(
'mi:country_top_nonhvc', kwargs={"country_code": "FR"})
country_topnonhvc_url_invalid = reverse(
'mi:country_top_nonhvc', kwargs={"country_code": "ABC"})
country_topnonhvc_url_missing_country_kwarg = reverse(
'mi:country_top_nonhvc', kwargs={"country_code": None})
fin_years = [2016, 2017]
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.view_base_url = self.country_top_nonhvc_url
def test_fake_country_404(self):
self.view_base_url = self.country_topnonhvc_url_invalid
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
def test_missing_country_404(self):
self.view_base_url = self.country_topnonhvc_url_missing_country_kwarg
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
class CountryWinTableTestCase(CountryBaseViewTestCase, GenericWinTableTestMixin):
TEST_COUNTRY_CODE = 'FR'
TEST_COUNTRY = DjangoCountry(TEST_COUNTRY_CODE)
fin_years = [2016, 2017]
expected_response = {
"country": {
"id": TEST_COUNTRY_CODE,
"name": TEST_COUNTRY.name,
},
"wins": {
"hvc": []
}
}
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.country_win_table_url = reverse('mi:country_win_table', kwargs={
'country_code': self.TEST_COUNTRY_CODE
})
self.country_win_table_url_invalid = reverse('mi:country_win_table', kwargs={
'country_code': 'XX'
})
self.view_base_url = self.country_win_table_url | 0.382718 | 0.22306 |
import os
from pathlib import Path
import pytest
import numpy as np
import torch as th
from ml.vision.transforms import functional as TF
from ml.vision import transforms
from ml.vision.ops import clip_boxes_to_image
from ml import nn
import ml
from .fixtures import *
@pytest.fixture
def dev():
return th.device('cuda') if th.cuda.is_available() else th.device('cpu')
@pytest.fixture
def normalize():
mean = [0.442, 0.406, 0.38]
std = [0.224, 0.217, 0.211]
return transforms.Normalize(mean=mean, std=std)
@pytest.fixture
def cwd():
return Path(__file__).parent.parent
@pytest.fixture
def chkpt_img(cwd):
return cwd.parent / 'checkpoints/backbone/kinetics400-x101_32x8d_wsl-62.58.pth'
@pytest.fixture
def padding():
return (0.70, 0.35)
@pytest.fixture
def dets():
return th.Tensor([[150., 246., 348., 654.],
[151., 227., 197., 338.],
[ 70., 43., 128., 198.],
[221., 302., 439., 712.],
[168., 274., 269., 490.],
[ 59., 51., 122., 215.]])
@pytest.fixture
def model(dev):
from ml.vision.models.backbone import resnext101
model = resnext101(pretrained=True, groups=32, width_per_group=8)
model.eval()
model.to(dev)
return model
def call_backbone_spatial(model, batch):
with th.no_grad():
with th.cuda.amp.autocast(enabled=True):
r = model(batch)[-2]
th.cuda.synchronize()
#print(f"r.grad_fn={r.grad_fn}")
#print(f"i.grad_fn={i.grad_fn}")
return r
def call_backbone(model, batch):
with th.no_grad():
with th.cuda.amp.autocast(enabled=True):
feats = model(batch)[-1]
th.cuda.synchronize()
# print(f"feats.grad_fn={feats.grad_fn}")
return feats
@pytest.mark.essential
@pytest.mark.parametrize("batch_size", [10])
def test_resnext101_spatial_feats(benchmark, model, normalize, dev, batch_size):
spatial_transform = transforms.Compose([normalize])
batch = []
for n in range(batch_size):
frame = th.rand((3, 720, 1280), dtype=th.float32)
frame = spatial_transform(frame).to(dev)
batch.append(frame)
batch = th.stack(batch)
th.cuda.synchronize()
spatial_feats = benchmark(call_backbone_spatial, model, batch)
assert spatial_feats.shape[0] == batch_size
@pytest.mark.essential
@pytest.mark.parametrize("streams", [2, 4])
def test_resnext101_feats(benchmark, model, dev, normalize, dets, padding, streams):
im_transform = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize, ])
frames = th.rand((streams, 3, 720, 1280), dtype=th.float32)
H, W = frames.shape[-2:]
width = dets[:, 2] - dets[:, 0] + 1
height = dets[:, 3] - dets[:, 1] + 1
paddingW = width * padding[0]
paddingH = height * padding[1]
dets[:, 0] -= paddingW
dets[:, 1] -= paddingH
dets[:, 2] += paddingW
dets[:, 3] += paddingH
dets = clip_boxes_to_image(dets, (H, W))
batch = []
for s in range(streams):
for box in dets.round().long():
x1, y1, x2, y2 = box.tolist()
batch.append(im_transform(frames[s, :, y1:y2, x1:x2]))
batch = th.stack(batch).to(dev)
th.cuda.synchronize()
feats = benchmark(call_backbone, model, batch)
assert feats.shape[0] == batch.shape[0] | tests/test_backbone.py | import os
from pathlib import Path
import pytest
import numpy as np
import torch as th
from ml.vision.transforms import functional as TF
from ml.vision import transforms
from ml.vision.ops import clip_boxes_to_image
from ml import nn
import ml
from .fixtures import *
@pytest.fixture
def dev():
return th.device('cuda') if th.cuda.is_available() else th.device('cpu')
@pytest.fixture
def normalize():
mean = [0.442, 0.406, 0.38]
std = [0.224, 0.217, 0.211]
return transforms.Normalize(mean=mean, std=std)
@pytest.fixture
def cwd():
return Path(__file__).parent.parent
@pytest.fixture
def chkpt_img(cwd):
return cwd.parent / 'checkpoints/backbone/kinetics400-x101_32x8d_wsl-62.58.pth'
@pytest.fixture
def padding():
return (0.70, 0.35)
@pytest.fixture
def dets():
return th.Tensor([[150., 246., 348., 654.],
[151., 227., 197., 338.],
[ 70., 43., 128., 198.],
[221., 302., 439., 712.],
[168., 274., 269., 490.],
[ 59., 51., 122., 215.]])
@pytest.fixture
def model(dev):
from ml.vision.models.backbone import resnext101
model = resnext101(pretrained=True, groups=32, width_per_group=8)
model.eval()
model.to(dev)
return model
def call_backbone_spatial(model, batch):
with th.no_grad():
with th.cuda.amp.autocast(enabled=True):
r = model(batch)[-2]
th.cuda.synchronize()
#print(f"r.grad_fn={r.grad_fn}")
#print(f"i.grad_fn={i.grad_fn}")
return r
def call_backbone(model, batch):
with th.no_grad():
with th.cuda.amp.autocast(enabled=True):
feats = model(batch)[-1]
th.cuda.synchronize()
# print(f"feats.grad_fn={feats.grad_fn}")
return feats
@pytest.mark.essential
@pytest.mark.parametrize("batch_size", [10])
def test_resnext101_spatial_feats(benchmark, model, normalize, dev, batch_size):
spatial_transform = transforms.Compose([normalize])
batch = []
for n in range(batch_size):
frame = th.rand((3, 720, 1280), dtype=th.float32)
frame = spatial_transform(frame).to(dev)
batch.append(frame)
batch = th.stack(batch)
th.cuda.synchronize()
spatial_feats = benchmark(call_backbone_spatial, model, batch)
assert spatial_feats.shape[0] == batch_size
@pytest.mark.essential
@pytest.mark.parametrize("streams", [2, 4])
def test_resnext101_feats(benchmark, model, dev, normalize, dets, padding, streams):
im_transform = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize, ])
frames = th.rand((streams, 3, 720, 1280), dtype=th.float32)
H, W = frames.shape[-2:]
width = dets[:, 2] - dets[:, 0] + 1
height = dets[:, 3] - dets[:, 1] + 1
paddingW = width * padding[0]
paddingH = height * padding[1]
dets[:, 0] -= paddingW
dets[:, 1] -= paddingH
dets[:, 2] += paddingW
dets[:, 3] += paddingH
dets = clip_boxes_to_image(dets, (H, W))
batch = []
for s in range(streams):
for box in dets.round().long():
x1, y1, x2, y2 = box.tolist()
batch.append(im_transform(frames[s, :, y1:y2, x1:x2]))
batch = th.stack(batch).to(dev)
th.cuda.synchronize()
feats = benchmark(call_backbone, model, batch)
assert feats.shape[0] == batch.shape[0] | 0.483161 | 0.460592 |
import requests
import json
from ckan.plugins import toolkit as tk
from ckan import model as ckan_model
from ckan.logic import NotFound
from ckan.lib.mailer import mail_recipient
from .. import model
import logging
_ = tk._
check_access = tk.check_access
side_effect_free = tk.side_effect_free
log = logging.getLogger(__name__)
def service_permission_application_create(context, data_dict):
tk.check_access('service_permission_application_create', context, data_dict)
errors = {}
organization_id = data_dict.get('organization_id')
if organization_id is None or organization_id == "":
errors['organization_id'] = _('Missing value')
target_organization_id = data_dict.get('target_organization_id')
if target_organization_id is None or target_organization_id == "":
errors['target_organization_id'] = _('Missing value')
business_code = data_dict.get('business_code')
if business_code is None or business_code == "":
errors['business_code'] = _('Missing value')
contact_name = data_dict.get('contact_name')
if contact_name is None or contact_name == "":
errors['contact_name'] = _('Missing value')
contact_email = data_dict.get('contact_email')
if contact_email is None or contact_email == "":
errors['contact_email'] = _('Missing value')
ip_address_list = data_dict.get('ip_address_list')
if ip_address_list is None or ip_address_list == "":
errors['ip_address_list'] = _('Missing value')
subsystem_id = data_dict.get('subsystem_id')
if subsystem_id is None or subsystem_id == "":
errors['subsystem_id'] = _('Missing value')
subsystem_code = data_dict.get('subsystem_code')
if subsystem_code is None or subsystem_code == "":
errors['subsystem_code'] = _('Missing value')
service_code_list = data_dict.get('service_code_list')
if service_code_list is None or service_code_list == "":
errors['service_code_list'] = _('Missing value')
if errors:
raise tk.ValidationError(errors)
usage_description = data_dict.get('usage_description')
request_date = data_dict.get('request_date') or None
# Need sysadmin privileges to see permission_application_settings
sysadmin_context = {'ignore_auth': True, 'use_cache': False}
package = tk.get_action('package_show')(sysadmin_context, {'id': subsystem_id})
owner_org = tk.get_action('organization_show')(context, {'id': package['owner_org']})
application_id = model.ApplyPermission.create(organization_id=organization_id,
target_organization_id=target_organization_id,
business_code=business_code,
contact_name=contact_name,
contact_email=contact_email,
ip_address_list=ip_address_list,
subsystem_id=subsystem_id,
subsystem_code=subsystem_code,
service_code_list=service_code_list,
usage_description=usage_description,
request_date=request_date)
log.info(package.get('service_permission_settings', '{}'))
service_permission_settings = package.get('service_permission_settings', {})
delivery_method = service_permission_settings.get('delivery_method', 'email')
if delivery_method == 'api':
application = model.ApplyPermission.get(application_id).as_dict()
try:
api_url = service_permission_settings.get('api')
data = data_dict.copy()
data['subsystem_code'] = package.get('xroad_subsystemcode') or package['title']
service_code_list = [r['xroad_servicecode'] or r['name'] for r in package.get('resources')
if r['id'] in data_dict['service_code_list']]
data['service_code_list'] = service_code_list
requests.post(api_url, data=json.dumps(data), timeout=5).raise_for_status()
except Exception as e:
log.error('Error calling request application API: %s', e)
elif delivery_method == 'email':
email_address = service_permission_settings.get('email', owner_org.get('email_address'))
if email_address:
log.info('Sending permission application notification email to {}'.format(email_address))
application = model.ApplyPermission.get(application_id).as_dict()
email_subject = u'{} pyytää lupaa käyttää Suomi.fi-palveluväylässä tarjoamaasi palvelua'.format(
application['organization']['title'])
email_content = tk.render('apply_permissions_for_service/notification_email.html',
extra_vars={'application': application})
try:
mail_recipient(owner_org['title'], email_address, email_subject, email_content,
headers={'content-type': 'text/html'})
except Exception as e:
# Email exceptions are not user relevant nor action critical, but should be logged
log.warning(e)
else:
log.info('Organization %s has no email address defined, not sending permission application notification.',
owner_org['name'])
@side_effect_free
def service_permission_application_list(context, data_dict):
check_access('service_permission_application_list', context, data_dict)
applications = ckan_model.Session.query(model.ApplyPermission)
subsystem_id = data_dict.get('subsystem_id')
if subsystem_id:
applications = applications.filter(model.ApplyPermission.subsystem_id == subsystem_id)
applications = applications.all()
return [application.as_dict() for application in applications]
@side_effect_free
def service_permission_application_show(context, data_dict):
check_access('service_permission_application_show', context, data_dict)
application_id = data_dict.get('id')
if application_id is None:
raise NotFound
application = model.ApplyPermission.get(application_id).as_dict()
return application
@side_effect_free
def service_permission_settings_show(context, data_dict):
check_access('service_permission_settings', context, data_dict)
subsystem_id = data_dict.get('subsystem_id')
if subsystem_id is None:
raise NotFound
pkg = tk.get_action('package_show')(context, {'id': subsystem_id})
return pkg.get('service_permission_settings', {})
def service_permission_settings_update(context, data_dict):
tk.check_access('service_permission_settings', context, data_dict)
subsystem_id = data_dict.get('subsystem_id')
if subsystem_id is None:
raise NotFound
settings = {field: data_dict[field]
for field in ('delivery_method', 'api', 'web', 'email', 'file_url')
if field in data_dict}
tk.get_action('package_patch')(context, {
'id': data_dict['subsystem_id'],
'service_permission_settings': json.dumps(settings)
})
return settings | ckanext/ckanext-apply_permissions_for_service/ckanext/apply_permissions_for_service/logic/action.py | import requests
import json
from ckan.plugins import toolkit as tk
from ckan import model as ckan_model
from ckan.logic import NotFound
from ckan.lib.mailer import mail_recipient
from .. import model
import logging
_ = tk._
check_access = tk.check_access
side_effect_free = tk.side_effect_free
log = logging.getLogger(__name__)
def service_permission_application_create(context, data_dict):
tk.check_access('service_permission_application_create', context, data_dict)
errors = {}
organization_id = data_dict.get('organization_id')
if organization_id is None or organization_id == "":
errors['organization_id'] = _('Missing value')
target_organization_id = data_dict.get('target_organization_id')
if target_organization_id is None or target_organization_id == "":
errors['target_organization_id'] = _('Missing value')
business_code = data_dict.get('business_code')
if business_code is None or business_code == "":
errors['business_code'] = _('Missing value')
contact_name = data_dict.get('contact_name')
if contact_name is None or contact_name == "":
errors['contact_name'] = _('Missing value')
contact_email = data_dict.get('contact_email')
if contact_email is None or contact_email == "":
errors['contact_email'] = _('Missing value')
ip_address_list = data_dict.get('ip_address_list')
if ip_address_list is None or ip_address_list == "":
errors['ip_address_list'] = _('Missing value')
subsystem_id = data_dict.get('subsystem_id')
if subsystem_id is None or subsystem_id == "":
errors['subsystem_id'] = _('Missing value')
subsystem_code = data_dict.get('subsystem_code')
if subsystem_code is None or subsystem_code == "":
errors['subsystem_code'] = _('Missing value')
service_code_list = data_dict.get('service_code_list')
if service_code_list is None or service_code_list == "":
errors['service_code_list'] = _('Missing value')
if errors:
raise tk.ValidationError(errors)
usage_description = data_dict.get('usage_description')
request_date = data_dict.get('request_date') or None
# Need sysadmin privileges to see permission_application_settings
sysadmin_context = {'ignore_auth': True, 'use_cache': False}
package = tk.get_action('package_show')(sysadmin_context, {'id': subsystem_id})
owner_org = tk.get_action('organization_show')(context, {'id': package['owner_org']})
application_id = model.ApplyPermission.create(organization_id=organization_id,
target_organization_id=target_organization_id,
business_code=business_code,
contact_name=contact_name,
contact_email=contact_email,
ip_address_list=ip_address_list,
subsystem_id=subsystem_id,
subsystem_code=subsystem_code,
service_code_list=service_code_list,
usage_description=usage_description,
request_date=request_date)
log.info(package.get('service_permission_settings', '{}'))
service_permission_settings = package.get('service_permission_settings', {})
delivery_method = service_permission_settings.get('delivery_method', 'email')
if delivery_method == 'api':
application = model.ApplyPermission.get(application_id).as_dict()
try:
api_url = service_permission_settings.get('api')
data = data_dict.copy()
data['subsystem_code'] = package.get('xroad_subsystemcode') or package['title']
service_code_list = [r['xroad_servicecode'] or r['name'] for r in package.get('resources')
if r['id'] in data_dict['service_code_list']]
data['service_code_list'] = service_code_list
requests.post(api_url, data=json.dumps(data), timeout=5).raise_for_status()
except Exception as e:
log.error('Error calling request application API: %s', e)
elif delivery_method == 'email':
email_address = service_permission_settings.get('email', owner_org.get('email_address'))
if email_address:
log.info('Sending permission application notification email to {}'.format(email_address))
application = model.ApplyPermission.get(application_id).as_dict()
email_subject = u'{} pyytää lupaa käyttää Suomi.fi-palveluväylässä tarjoamaasi palvelua'.format(
application['organization']['title'])
email_content = tk.render('apply_permissions_for_service/notification_email.html',
extra_vars={'application': application})
try:
mail_recipient(owner_org['title'], email_address, email_subject, email_content,
headers={'content-type': 'text/html'})
except Exception as e:
# Email exceptions are not user relevant nor action critical, but should be logged
log.warning(e)
else:
log.info('Organization %s has no email address defined, not sending permission application notification.',
owner_org['name'])
@side_effect_free
def service_permission_application_list(context, data_dict):
check_access('service_permission_application_list', context, data_dict)
applications = ckan_model.Session.query(model.ApplyPermission)
subsystem_id = data_dict.get('subsystem_id')
if subsystem_id:
applications = applications.filter(model.ApplyPermission.subsystem_id == subsystem_id)
applications = applications.all()
return [application.as_dict() for application in applications]
@side_effect_free
def service_permission_application_show(context, data_dict):
check_access('service_permission_application_show', context, data_dict)
application_id = data_dict.get('id')
if application_id is None:
raise NotFound
application = model.ApplyPermission.get(application_id).as_dict()
return application
@side_effect_free
def service_permission_settings_show(context, data_dict):
check_access('service_permission_settings', context, data_dict)
subsystem_id = data_dict.get('subsystem_id')
if subsystem_id is None:
raise NotFound
pkg = tk.get_action('package_show')(context, {'id': subsystem_id})
return pkg.get('service_permission_settings', {})
def service_permission_settings_update(context, data_dict):
tk.check_access('service_permission_settings', context, data_dict)
subsystem_id = data_dict.get('subsystem_id')
if subsystem_id is None:
raise NotFound
settings = {field: data_dict[field]
for field in ('delivery_method', 'api', 'web', 'email', 'file_url')
if field in data_dict}
tk.get_action('package_patch')(context, {
'id': data_dict['subsystem_id'],
'service_permission_settings': json.dumps(settings)
})
return settings | 0.250821 | 0.086632 |
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import argparse
import fileinput
from cocoprep.archive_exceptions import PreprocessingException, PreprocessingWarning
from cocoprep.archive_load_data import parse_archive_file_name, parse_range, get_key_value, get_file_name_list
def parse_info_file(file_name):
"""Returns a list of quadruples [function, instance, dimension, evaluations] read from the .info file with the
given name.
"""
info_data_list = []
with open(file_name, 'r') as f:
for line in f:
instances = []
evaluations = []
if 'function' in line:
function = int(get_key_value(line, 'function'))
dimension = int(get_key_value(line, 'dim'))
for element in line.split(','):
if ':' in element:
replaced = element.replace('|', ':')
instances.append(int(replaced.split(':')[0]))
evaluations.append(int(replaced.split(':')[1]))
for index, instance in enumerate(instances):
info_data_item = [function, instance, dimension, evaluations[index]]
info_data_list.append(info_data_item)
f.close()
return info_data_list
def check_file_complete(input_paths, functions, instances, dimensions, max_diff=1000):
"""Checks the .adat files created by the bbob-biobj logger to see if they have been properly written. Outputs the
difference between the last evaluation from the .adat file and the one noted in the .info file if they are
greater than max_diff.
Takes into account only the given functions, instances and dimensions.
"""
def inspect_line(input_file, line_string, evaluations, max_diff=1e5):
"""Check that the line_string contains at least three numbers and that they are correctly written. Outputs a
message if the difference between the evaluations and the first number in the line_string is grater than
max_diff.
"""
num_items = len(line_string.split())
if num_items < 3:
print("File {}, line {} too short".format(input_file, line_string))
for i in range(num_items):
try:
float(line_string.split()[i])
except ValueError:
print('File {}, line {}, number {} incorrect'.format(input_file, line_string, line_string.split()[i]))
continue
if evaluations - int(line_string.split()[0]) > max_diff:
print('Mismatch in evaluations in file {}\n'
'.info = {}\n'
'.adat = {}\n'
' diff = {}\n'.format(input_file, evaluations, line_string.split()[0],
evaluations - int(line_string.split()[0])))
# Check whether .info and .adat files exist in the input paths
info_files = get_file_name_list(input_paths, ".info")
if len(info_files) == 0:
raise PreprocessingException('Folder {} does not contain .info files'.format(input_paths))
adat_files = get_file_name_list(input_paths, ".adat")
if len(adat_files) == 0:
raise PreprocessingException('Folder {} does not contain .adat files'.format(input_paths))
info_dict = {}
print('Reading .info files...')
for input_file in info_files:
# Store the data from the .info files
try:
info_data_list = parse_info_file(input_file)
except ValueError as error:
raise PreprocessingException('Cannot read file {}\n{}'.format(input_file, error))
for info_data_item in info_data_list:
(function, instance, dimension, evaluations) = info_data_item
if (function not in functions) or (instance not in instances) or (dimension not in dimensions):
continue
info_dict[(function, instance, dimension)] = evaluations
print('Reading .adat files...')
for input_file in adat_files:
try:
(suite_name, function, instance, dimension) = parse_archive_file_name(input_file)
if (function not in functions) or (instance and instance not in instances) or \
(dimension not in dimensions):
continue
except PreprocessingWarning as warning:
print('Skipping file {}\n{}'.format(input_file, warning))
continue
with open(input_file, 'r') as f:
instance_found = False
last_line = None
for line in f:
if not line.strip() or (line[0] == '%' and 'instance' not in line):
# Ignore empty lines and lines with comments
continue
elif line[0] == '%' and 'instance' in line:
if last_line:
inspect_line(input_file, last_line, info_dict[(function, instance, dimension)])
instance = int(get_key_value(line[1:], 'instance'))
instance_found = (instance in instances)
elif instance_found and line[0] != '%':
last_line = line
if instance_found:
inspect_line(input_file, last_line, info_dict[(function, instance, dimension)])
f.close()
def evaluations_append(input_paths, functions, instances, dimensions, fast=False):
"""Appends the comment `% evaluations = NUMBER` to the end of every instance in the .adat files created by the
bbob-biobj logger.
If fast is True, it assumes the file contains only one instance (the instance is read from the file contents,
not the file name) and appends the comment only once - at the end of the file. No check whether this should be
done is performed - the user should know when it is safe to choose this option.
The NUMBER is retrieved from the corresponding .info file.
Takes into account only the given functions, instances and dimensions.
"""
# Check whether .info and .adat files exist in the input paths
info_files = get_file_name_list(input_paths, ".info")
if len(info_files) == 0:
raise PreprocessingException('Folder {} does not contain .info files'.format(input_paths))
adat_files = get_file_name_list(input_paths, ".adat")
if len(adat_files) == 0:
raise PreprocessingException('Folder {} does not contain .adat files'.format(input_paths))
info_dict = {}
for input_file in info_files:
try:
info_data_list = parse_info_file(input_file)
except ValueError as error:
raise PreprocessingException('Cannot read file {}\n{}'.format(input_file, error))
for info_data_item in info_data_list:
(function, instance, dimension, evaluations) = info_data_item
if (function not in functions) or (instance not in instances) or (dimension not in dimensions):
continue
info_dict[(function, instance, dimension)] = evaluations
for input_file in adat_files:
try:
(suite_name, function, instance, dimension) = parse_archive_file_name(input_file)
if (function not in functions) or (instance and instance not in instances) or \
(dimension not in dimensions):
continue
except PreprocessingWarning as warning:
print('Skipping file {}\n{}'.format(input_file, warning))
continue
try:
if instance or fast:
# Assumes only one instance is contained in the file
with open(input_file, 'r') as f:
for line in f:
if (line[0] == '%') and ('instance' in line):
instance = int(get_key_value(line[1:], 'instance'))
break
f.close()
with open(input_file, 'a') as f:
f.write('% evaluations = {}'.format(info_dict[(function, instance, dimension)]))
f.close()
else:
first_instance = True
# Take care of the non-last instances in the file
for line in fileinput.input(input_file, inplace=True):
if (line[0] == '%') and ('instance' in line):
instance = int(get_key_value(line[1:], 'instance'))
if first_instance:
first_instance = False
else:
sys.stdout.write('% evaluations = {}\n'.format(info_dict[(function, instance, dimension)]))
sys.stdout.write(line)
fileinput.close()
# Take care of the last instance in the file
with open(input_file, 'a') as f:
f.write('% evaluations = {}'.format(info_dict[(function, instance, dimension)]))
f.close()
except KeyError as error:
print('Encountered problem in file {}\n{}'.format(input_file, error))
fileinput.close()
continue
if __name__ == '__main__':
"""Appends the comment `% evaluations = NUMBER` to the end of every instance in the algorithm archives.
The input folders should include .info files for all corresponding .adat files.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--functions', type=parse_range, default=range(1, 56),
help='function numbers to be included in the processing of archives')
parser.add_argument('-i', '--instances', type=parse_range, default=range(1, 11),
help='instance numbers to be included in the processing of archives')
parser.add_argument('-d', '--dimensions', type=parse_range, default=[2, 3, 5, 10, 20, 40],
help='dimensions to be included in the processing of archives')
parser.add_argument('--fast', action='store_true',
help='fast option that assumes all archive files contain only one instance')
parser.add_argument('input', default=[], nargs='+', help='path(s) to the input folder(s)')
args = parser.parse_args()
print('Program called with arguments: \ninput folders = {}\nfast = {}'.format(args.input, args.fast))
print('functions = {} \ninstances = {}\ndimensions = {}\n'.format(args.functions, args.instances, args.dimensions))
evaluations_append(args.input, args.functions, args.instances, args.dimensions, args.fast)
#check_file_complete(args.input, args.functions, args.instances, args.dimensions) | code-preprocessing/log-reconstruction/evaluations_append.py | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import argparse
import fileinput
from cocoprep.archive_exceptions import PreprocessingException, PreprocessingWarning
from cocoprep.archive_load_data import parse_archive_file_name, parse_range, get_key_value, get_file_name_list
def parse_info_file(file_name):
"""Returns a list of quadruples [function, instance, dimension, evaluations] read from the .info file with the
given name.
"""
info_data_list = []
with open(file_name, 'r') as f:
for line in f:
instances = []
evaluations = []
if 'function' in line:
function = int(get_key_value(line, 'function'))
dimension = int(get_key_value(line, 'dim'))
for element in line.split(','):
if ':' in element:
replaced = element.replace('|', ':')
instances.append(int(replaced.split(':')[0]))
evaluations.append(int(replaced.split(':')[1]))
for index, instance in enumerate(instances):
info_data_item = [function, instance, dimension, evaluations[index]]
info_data_list.append(info_data_item)
f.close()
return info_data_list
def check_file_complete(input_paths, functions, instances, dimensions, max_diff=1000):
"""Checks the .adat files created by the bbob-biobj logger to see if they have been properly written. Outputs the
difference between the last evaluation from the .adat file and the one noted in the .info file if they are
greater than max_diff.
Takes into account only the given functions, instances and dimensions.
"""
def inspect_line(input_file, line_string, evaluations, max_diff=1e5):
"""Check that the line_string contains at least three numbers and that they are correctly written. Outputs a
message if the difference between the evaluations and the first number in the line_string is grater than
max_diff.
"""
num_items = len(line_string.split())
if num_items < 3:
print("File {}, line {} too short".format(input_file, line_string))
for i in range(num_items):
try:
float(line_string.split()[i])
except ValueError:
print('File {}, line {}, number {} incorrect'.format(input_file, line_string, line_string.split()[i]))
continue
if evaluations - int(line_string.split()[0]) > max_diff:
print('Mismatch in evaluations in file {}\n'
'.info = {}\n'
'.adat = {}\n'
' diff = {}\n'.format(input_file, evaluations, line_string.split()[0],
evaluations - int(line_string.split()[0])))
# Check whether .info and .adat files exist in the input paths
info_files = get_file_name_list(input_paths, ".info")
if len(info_files) == 0:
raise PreprocessingException('Folder {} does not contain .info files'.format(input_paths))
adat_files = get_file_name_list(input_paths, ".adat")
if len(adat_files) == 0:
raise PreprocessingException('Folder {} does not contain .adat files'.format(input_paths))
info_dict = {}
print('Reading .info files...')
for input_file in info_files:
# Store the data from the .info files
try:
info_data_list = parse_info_file(input_file)
except ValueError as error:
raise PreprocessingException('Cannot read file {}\n{}'.format(input_file, error))
for info_data_item in info_data_list:
(function, instance, dimension, evaluations) = info_data_item
if (function not in functions) or (instance not in instances) or (dimension not in dimensions):
continue
info_dict[(function, instance, dimension)] = evaluations
print('Reading .adat files...')
for input_file in adat_files:
try:
(suite_name, function, instance, dimension) = parse_archive_file_name(input_file)
if (function not in functions) or (instance and instance not in instances) or \
(dimension not in dimensions):
continue
except PreprocessingWarning as warning:
print('Skipping file {}\n{}'.format(input_file, warning))
continue
with open(input_file, 'r') as f:
instance_found = False
last_line = None
for line in f:
if not line.strip() or (line[0] == '%' and 'instance' not in line):
# Ignore empty lines and lines with comments
continue
elif line[0] == '%' and 'instance' in line:
if last_line:
inspect_line(input_file, last_line, info_dict[(function, instance, dimension)])
instance = int(get_key_value(line[1:], 'instance'))
instance_found = (instance in instances)
elif instance_found and line[0] != '%':
last_line = line
if instance_found:
inspect_line(input_file, last_line, info_dict[(function, instance, dimension)])
f.close()
def evaluations_append(input_paths, functions, instances, dimensions, fast=False):
"""Appends the comment `% evaluations = NUMBER` to the end of every instance in the .adat files created by the
bbob-biobj logger.
If fast is True, it assumes the file contains only one instance (the instance is read from the file contents,
not the file name) and appends the comment only once - at the end of the file. No check whether this should be
done is performed - the user should know when it is safe to choose this option.
The NUMBER is retrieved from the corresponding .info file.
Takes into account only the given functions, instances and dimensions.
"""
# Check whether .info and .adat files exist in the input paths
info_files = get_file_name_list(input_paths, ".info")
if len(info_files) == 0:
raise PreprocessingException('Folder {} does not contain .info files'.format(input_paths))
adat_files = get_file_name_list(input_paths, ".adat")
if len(adat_files) == 0:
raise PreprocessingException('Folder {} does not contain .adat files'.format(input_paths))
info_dict = {}
for input_file in info_files:
try:
info_data_list = parse_info_file(input_file)
except ValueError as error:
raise PreprocessingException('Cannot read file {}\n{}'.format(input_file, error))
for info_data_item in info_data_list:
(function, instance, dimension, evaluations) = info_data_item
if (function not in functions) or (instance not in instances) or (dimension not in dimensions):
continue
info_dict[(function, instance, dimension)] = evaluations
for input_file in adat_files:
try:
(suite_name, function, instance, dimension) = parse_archive_file_name(input_file)
if (function not in functions) or (instance and instance not in instances) or \
(dimension not in dimensions):
continue
except PreprocessingWarning as warning:
print('Skipping file {}\n{}'.format(input_file, warning))
continue
try:
if instance or fast:
# Assumes only one instance is contained in the file
with open(input_file, 'r') as f:
for line in f:
if (line[0] == '%') and ('instance' in line):
instance = int(get_key_value(line[1:], 'instance'))
break
f.close()
with open(input_file, 'a') as f:
f.write('% evaluations = {}'.format(info_dict[(function, instance, dimension)]))
f.close()
else:
first_instance = True
# Take care of the non-last instances in the file
for line in fileinput.input(input_file, inplace=True):
if (line[0] == '%') and ('instance' in line):
instance = int(get_key_value(line[1:], 'instance'))
if first_instance:
first_instance = False
else:
sys.stdout.write('% evaluations = {}\n'.format(info_dict[(function, instance, dimension)]))
sys.stdout.write(line)
fileinput.close()
# Take care of the last instance in the file
with open(input_file, 'a') as f:
f.write('% evaluations = {}'.format(info_dict[(function, instance, dimension)]))
f.close()
except KeyError as error:
print('Encountered problem in file {}\n{}'.format(input_file, error))
fileinput.close()
continue
if __name__ == '__main__':
"""Appends the comment `% evaluations = NUMBER` to the end of every instance in the algorithm archives.
The input folders should include .info files for all corresponding .adat files.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--functions', type=parse_range, default=range(1, 56),
help='function numbers to be included in the processing of archives')
parser.add_argument('-i', '--instances', type=parse_range, default=range(1, 11),
help='instance numbers to be included in the processing of archives')
parser.add_argument('-d', '--dimensions', type=parse_range, default=[2, 3, 5, 10, 20, 40],
help='dimensions to be included in the processing of archives')
parser.add_argument('--fast', action='store_true',
help='fast option that assumes all archive files contain only one instance')
parser.add_argument('input', default=[], nargs='+', help='path(s) to the input folder(s)')
args = parser.parse_args()
print('Program called with arguments: \ninput folders = {}\nfast = {}'.format(args.input, args.fast))
print('functions = {} \ninstances = {}\ndimensions = {}\n'.format(args.functions, args.instances, args.dimensions))
evaluations_append(args.input, args.functions, args.instances, args.dimensions, args.fast)
#check_file_complete(args.input, args.functions, args.instances, args.dimensions) | 0.448668 | 0.238473 |
from homeassistant.components.device_tracker import DOMAIN, config_entry as ce
from homeassistant.helpers import device_registry as dr, entity_registry as er
from tests.common import MockConfigEntry
def test_tracker_entity():
"""Test tracker entity."""
class TestEntry(ce.TrackerEntity):
"""Mock tracker class."""
should_poll = False
instance = TestEntry()
assert instance.force_update
instance.should_poll = True
assert not instance.force_update
async def test_cleanup_legacy(hass, enable_custom_integrations):
"""Test we clean up devices created by old device tracker."""
dev_reg = dr.async_get(hass)
ent_reg = er.async_get(hass)
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
device1 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device1")}
)
device2 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device2")}
)
device3 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device3")}
)
# Device with light + device tracker entity
entity1a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity1a-unique",
config_entry=config_entry,
device_id=device1.id,
)
entity1b = ent_reg.async_get_or_create(
"light",
"test",
"entity1b-unique",
config_entry=config_entry,
device_id=device1.id,
)
# Just device tracker entity
entity2a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity2a-unique",
config_entry=config_entry,
device_id=device2.id,
)
# Device with no device tracker entities
entity3a = ent_reg.async_get_or_create(
"light",
"test",
"entity3a-unique",
config_entry=config_entry,
device_id=device3.id,
)
# Device tracker but no device
entity4a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity4a-unique",
config_entry=config_entry,
)
# Completely different entity
entity5a = ent_reg.async_get_or_create(
"light",
"test",
"entity4a-unique",
config_entry=config_entry,
)
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
for entity in (entity1a, entity1b, entity3a, entity4a, entity5a):
assert ent_reg.async_get(entity.entity_id) is not None
# We've removed device so device ID cleared
assert ent_reg.async_get(entity2a.entity_id).device_id is None
# Removed because only had device tracker entity
assert dev_reg.async_get(device2.id) is None
async def test_register_mac(hass):
"""Test registering a mac."""
dev_reg = dr.async_get(hass)
ent_reg = er.async_get(hass)
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
mac1 = "12:34:56:AB:CD:EF"
entity_entry_1 = ent_reg.async_get_or_create(
"device_tracker",
"test",
mac1 + "yo1",
original_name="name 1",
config_entry=config_entry,
disabled_by=er.RegistryEntryDisabler.INTEGRATION,
)
ce._async_register_mac(hass, "test", mac1, mac1 + "yo1")
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, mac1)},
)
await hass.async_block_till_done()
entity_entry_1 = ent_reg.async_get(entity_entry_1.entity_id)
assert entity_entry_1.disabled_by is None | tests/components/device_tracker/test_config_entry.py | from homeassistant.components.device_tracker import DOMAIN, config_entry as ce
from homeassistant.helpers import device_registry as dr, entity_registry as er
from tests.common import MockConfigEntry
def test_tracker_entity():
"""Test tracker entity."""
class TestEntry(ce.TrackerEntity):
"""Mock tracker class."""
should_poll = False
instance = TestEntry()
assert instance.force_update
instance.should_poll = True
assert not instance.force_update
async def test_cleanup_legacy(hass, enable_custom_integrations):
"""Test we clean up devices created by old device tracker."""
dev_reg = dr.async_get(hass)
ent_reg = er.async_get(hass)
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
device1 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device1")}
)
device2 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device2")}
)
device3 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device3")}
)
# Device with light + device tracker entity
entity1a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity1a-unique",
config_entry=config_entry,
device_id=device1.id,
)
entity1b = ent_reg.async_get_or_create(
"light",
"test",
"entity1b-unique",
config_entry=config_entry,
device_id=device1.id,
)
# Just device tracker entity
entity2a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity2a-unique",
config_entry=config_entry,
device_id=device2.id,
)
# Device with no device tracker entities
entity3a = ent_reg.async_get_or_create(
"light",
"test",
"entity3a-unique",
config_entry=config_entry,
device_id=device3.id,
)
# Device tracker but no device
entity4a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity4a-unique",
config_entry=config_entry,
)
# Completely different entity
entity5a = ent_reg.async_get_or_create(
"light",
"test",
"entity4a-unique",
config_entry=config_entry,
)
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
for entity in (entity1a, entity1b, entity3a, entity4a, entity5a):
assert ent_reg.async_get(entity.entity_id) is not None
# We've removed device so device ID cleared
assert ent_reg.async_get(entity2a.entity_id).device_id is None
# Removed because only had device tracker entity
assert dev_reg.async_get(device2.id) is None
async def test_register_mac(hass):
"""Test registering a mac."""
dev_reg = dr.async_get(hass)
ent_reg = er.async_get(hass)
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
mac1 = "12:34:56:AB:CD:EF"
entity_entry_1 = ent_reg.async_get_or_create(
"device_tracker",
"test",
mac1 + "yo1",
original_name="name 1",
config_entry=config_entry,
disabled_by=er.RegistryEntryDisabler.INTEGRATION,
)
ce._async_register_mac(hass, "test", mac1, mac1 + "yo1")
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, mac1)},
)
await hass.async_block_till_done()
entity_entry_1 = ent_reg.async_get(entity_entry_1.entity_id)
assert entity_entry_1.disabled_by is None | 0.739046 | 0.303151 |
import pygame
import sys
from states.intro import Intro
from states.menu import Menu
from states.game import Game
from states.transition import Transition
from states.editor import Editor
from utils import music_manager as Music_Manager
import random
class Main(object):
def __init__(self):
pygame.init()
self.SCREEN_W = 960
self.SCREEN_H = 640
self.SCREEN_C = (self.SCREEN_W / 2, self.SCREEN_H / 2)
self.SCREEN = pygame.display.set_mode((self.SCREEN_W, self.SCREEN_H))
self.CLOCK = pygame.time.Clock()
self.DISPLAY = pygame.Surface((self.SCREEN_W, self.SCREEN_H), flags=pygame.SRCALPHA)
pygame.display.set_icon(pygame.image.load('assets/icons/icon.ico'))
pygame.display.set_caption('Followed')
self.music_manager = Music_Manager()
self.fonts = {
"general": pygame.font.Font("assets/fonts/oswald.ttf", 25)
}
self.sfx = {
"transtion": pygame.mixer.Sound("assets/sfx/transtion.wav"),
"select": pygame.mixer.Sound("assets/sfx/select.wav"),
}
self.screen_shake = {
"intensity": 0,
"duration": 0,
"active": False
}
self._events = {}
self._states = {
"intro": Intro(self),
"menu": Menu(self),
"game": Game(self),
"transition": Transition(self),
"level_editor": Editor(self)
}
self._state = "intro"
def events(self):
self._events.clear()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.shutdown()
if event.type == pygame.MOUSEBUTTONDOWN:
self._events["mousebuttondown"] = event
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
self._events["keydown-left"] = True
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
self._events["keydown-right"] = True
elif event.key == pygame.K_UP or event.key == pygame.K_w:
self._events["keydown-up"] = True
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
self._events["keydown-down"] = True
elif event.key == pygame.K_r:
self._events["restart"] = True
elif event.key == pygame.K_l:
self._events["level"] = True
elif event.key == pygame.K_y:
self._events["save"] = True
elif event.key == pygame.K_1:
self._events["1"] = True
elif event.key == pygame.K_2:
self._events["2"] = True
elif event.key == pygame.K_3:
self._events["3"] = True
elif event.key == pygame.K_4:
self._events["4"] = True
elif event.key == pygame.K_5:
self._events["5"] = True
elif event.key == pygame.K_6:
self._events["6"] = True
elif event.key == pygame.K_7:
self._events["7"] = True
elif event.key == pygame.K_8:
self._events["8"] = True
elif event.key == pygame.K_9:
self._events["9"] = True
elif event.key == pygame.K_0:
self._events["0"] = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
self._events["keydown-left"] = False
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
self._events["keydown-right"] = False
elif event.key == pygame.K_UP or event.key == pygame.K_w:
self._events["keydown-up"] = False
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
self._events["keydown-down"] = False
elif event.key == pygame.K_r:
self._events["restart"] = False
elif event.key == pygame.K_l:
self._events["level"] = False
elif event.key == pygame.K_y:
self._events["save"] = False
elif event.key == pygame.K_1:
self._events["1"] = False
elif event.key == pygame.K_2:
self._events["2"] = False
elif event.key == pygame.K_3:
self._events["3"] = False
elif event.key == pygame.K_4:
self._events["4"] = False
elif event.key == pygame.K_5:
self._events["5"] = False
elif event.key == pygame.K_6:
self._events["6"] = False
elif event.key == pygame.K_7:
self._events["7"] = False
elif event.key == pygame.K_8:
self._events["8"] = False
elif event.key == pygame.K_9:
self._events["9"] = False
elif event.key == pygame.K_0:
self._events["0"] = False
def update(self):
self._states[self._state].update(self._events)
self._states["transition"].update()
if self.screen_shake["active"]:
if self.screen_shake["duration"] > 0:
self.screen_shake["duration"] -= 1
else:
self.screen_shake["active"] = False
self.music_manager.update( self._state )
def render(self):
self.DISPLAY.fill((0, 0, 0))
self._states[self._state].render(self.DISPLAY)
self._states["transition"].render(self.DISPLAY)
display_offset = [0, 0]
if self.screen_shake["active"]:
display_offset[0] += random.randint(-self.screen_shake["intensity"][0], self.screen_shake["intensity"][0])
display_offset[1] += random.randint(-self.screen_shake["intensity"][1], self.screen_shake["intensity"][1])
self.SCREEN.blit(self.DISPLAY, display_offset)
pygame.display.update()
def screenshake(self, intensity=[2, 2], duration=20):
self.screen_shake["intensity"] = intensity
self.screen_shake["duration"] = duration
self.screen_shake["active"] = True
def render_text(self, surface, text, x, y, font="general", render_centerx=True, render_centery=True):
for i, line in enumerate(text.split("\n")):
text_surface = self.fonts[font].render(line, True, (255, 255, 255))
text_surface_3D = self.fonts[font].render(line, True, (120, 120, 120))
match render_centerx, render_centery:
case True, True:
text_rect = text_surface.get_rect(center=(x, y + text_surface.get_height() * i))
case True, False:
text_rect = text_surface.get_rect(centerx=x, top=y + text_surface.get_height() * i)
case False, True:
text_rect = text_surface.get_rect(left=x, centery=y + text_surface.get_height() * i)
case False, False:
text_rect = text_surface.get_rect(topleft=(x, y + text_surface.get_height() * i))
text_rect_3D = text_rect.copy()
text_rect_3D.x += 3
text_rect_3D.y -= 3
surface.blit(text_surface_3D, text_rect_3D)
surface.blit(text_surface, text_rect)
def loop(self):
while True:
self.events()
self.update()
self.render()
self.CLOCK.tick(60)
def transition_to(self, state, setup=True, speed=2):
if not self._states["transition"].active:
if setup:
self._states[state].setup()
self._states["transition"].setup()
self._states["transition"].speed = speed
self._states["transition"].active = True
self._states["transition"].endstate = state
def shutdown(self):
pygame.quit()
sys.exit()
if __name__ == "__main__":
Main().loop() | main.py | import pygame
import sys
from states.intro import Intro
from states.menu import Menu
from states.game import Game
from states.transition import Transition
from states.editor import Editor
from utils import music_manager as Music_Manager
import random
class Main(object):
def __init__(self):
pygame.init()
self.SCREEN_W = 960
self.SCREEN_H = 640
self.SCREEN_C = (self.SCREEN_W / 2, self.SCREEN_H / 2)
self.SCREEN = pygame.display.set_mode((self.SCREEN_W, self.SCREEN_H))
self.CLOCK = pygame.time.Clock()
self.DISPLAY = pygame.Surface((self.SCREEN_W, self.SCREEN_H), flags=pygame.SRCALPHA)
pygame.display.set_icon(pygame.image.load('assets/icons/icon.ico'))
pygame.display.set_caption('Followed')
self.music_manager = Music_Manager()
self.fonts = {
"general": pygame.font.Font("assets/fonts/oswald.ttf", 25)
}
self.sfx = {
"transtion": pygame.mixer.Sound("assets/sfx/transtion.wav"),
"select": pygame.mixer.Sound("assets/sfx/select.wav"),
}
self.screen_shake = {
"intensity": 0,
"duration": 0,
"active": False
}
self._events = {}
self._states = {
"intro": Intro(self),
"menu": Menu(self),
"game": Game(self),
"transition": Transition(self),
"level_editor": Editor(self)
}
self._state = "intro"
def events(self):
self._events.clear()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.shutdown()
if event.type == pygame.MOUSEBUTTONDOWN:
self._events["mousebuttondown"] = event
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
self._events["keydown-left"] = True
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
self._events["keydown-right"] = True
elif event.key == pygame.K_UP or event.key == pygame.K_w:
self._events["keydown-up"] = True
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
self._events["keydown-down"] = True
elif event.key == pygame.K_r:
self._events["restart"] = True
elif event.key == pygame.K_l:
self._events["level"] = True
elif event.key == pygame.K_y:
self._events["save"] = True
elif event.key == pygame.K_1:
self._events["1"] = True
elif event.key == pygame.K_2:
self._events["2"] = True
elif event.key == pygame.K_3:
self._events["3"] = True
elif event.key == pygame.K_4:
self._events["4"] = True
elif event.key == pygame.K_5:
self._events["5"] = True
elif event.key == pygame.K_6:
self._events["6"] = True
elif event.key == pygame.K_7:
self._events["7"] = True
elif event.key == pygame.K_8:
self._events["8"] = True
elif event.key == pygame.K_9:
self._events["9"] = True
elif event.key == pygame.K_0:
self._events["0"] = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
self._events["keydown-left"] = False
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
self._events["keydown-right"] = False
elif event.key == pygame.K_UP or event.key == pygame.K_w:
self._events["keydown-up"] = False
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
self._events["keydown-down"] = False
elif event.key == pygame.K_r:
self._events["restart"] = False
elif event.key == pygame.K_l:
self._events["level"] = False
elif event.key == pygame.K_y:
self._events["save"] = False
elif event.key == pygame.K_1:
self._events["1"] = False
elif event.key == pygame.K_2:
self._events["2"] = False
elif event.key == pygame.K_3:
self._events["3"] = False
elif event.key == pygame.K_4:
self._events["4"] = False
elif event.key == pygame.K_5:
self._events["5"] = False
elif event.key == pygame.K_6:
self._events["6"] = False
elif event.key == pygame.K_7:
self._events["7"] = False
elif event.key == pygame.K_8:
self._events["8"] = False
elif event.key == pygame.K_9:
self._events["9"] = False
elif event.key == pygame.K_0:
self._events["0"] = False
def update(self):
self._states[self._state].update(self._events)
self._states["transition"].update()
if self.screen_shake["active"]:
if self.screen_shake["duration"] > 0:
self.screen_shake["duration"] -= 1
else:
self.screen_shake["active"] = False
self.music_manager.update( self._state )
def render(self):
self.DISPLAY.fill((0, 0, 0))
self._states[self._state].render(self.DISPLAY)
self._states["transition"].render(self.DISPLAY)
display_offset = [0, 0]
if self.screen_shake["active"]:
display_offset[0] += random.randint(-self.screen_shake["intensity"][0], self.screen_shake["intensity"][0])
display_offset[1] += random.randint(-self.screen_shake["intensity"][1], self.screen_shake["intensity"][1])
self.SCREEN.blit(self.DISPLAY, display_offset)
pygame.display.update()
def screenshake(self, intensity=[2, 2], duration=20):
self.screen_shake["intensity"] = intensity
self.screen_shake["duration"] = duration
self.screen_shake["active"] = True
def render_text(self, surface, text, x, y, font="general", render_centerx=True, render_centery=True):
for i, line in enumerate(text.split("\n")):
text_surface = self.fonts[font].render(line, True, (255, 255, 255))
text_surface_3D = self.fonts[font].render(line, True, (120, 120, 120))
match render_centerx, render_centery:
case True, True:
text_rect = text_surface.get_rect(center=(x, y + text_surface.get_height() * i))
case True, False:
text_rect = text_surface.get_rect(centerx=x, top=y + text_surface.get_height() * i)
case False, True:
text_rect = text_surface.get_rect(left=x, centery=y + text_surface.get_height() * i)
case False, False:
text_rect = text_surface.get_rect(topleft=(x, y + text_surface.get_height() * i))
text_rect_3D = text_rect.copy()
text_rect_3D.x += 3
text_rect_3D.y -= 3
surface.blit(text_surface_3D, text_rect_3D)
surface.blit(text_surface, text_rect)
def loop(self):
while True:
self.events()
self.update()
self.render()
self.CLOCK.tick(60)
def transition_to(self, state, setup=True, speed=2):
if not self._states["transition"].active:
if setup:
self._states[state].setup()
self._states["transition"].setup()
self._states["transition"].speed = speed
self._states["transition"].active = True
self._states["transition"].endstate = state
def shutdown(self):
pygame.quit()
sys.exit()
if __name__ == "__main__":
Main().loop() | 0.234933 | 0.130923 |
import sys
import PySimpleGUI as sg
import etl_asset_data as etl
import data_base
from datetime import datetime as dt
sg.theme("DarkTeal10")
def create_main_window():
layout = [
[
sg.Text("Selecione o seu arquivo TXT"),
sg.In(key="-FILE-"),
sg.FileBrowse("Selecionar", file_types=(("ALL files", "*.txt"),))
],
[
sg.Text("Aguarde a importação dos dados", text_color='orange')
],
[
sg.Button("Importar para o MySQL", key="-START-", size=(20,1), button_color=('white', 'SeaGreen4')),
sg.Button("Finalizar Programa", key="-FINISH-", size=(20,1), button_color=('white', 'firebrick4'))
],
[
sg.Output(size=(80,25))
],
]
window = sg.Window("Importação dados B3", layout, location=(400,100))
return window
def create_second_window(count_total_inserts):
today = dt.now().strftime("%d/%m/%Y")
# Instantiating Data Base
data_obj = data_base.DataBase()
data_inserted = data_obj.get_data_inserted()
# Instantiating ETL Object with
etl_obj = etl.EtlAssetData()
data_inserted_list = etl_obj.convert_to_list(data_inserted)
layout = [
[
sg.Text(f"Dados Importados em {today}")
],
[
sg.Table(values=data_inserted_list,
headings=[ 'IdData', 'Tipo de Registro', 'Data do Pregão',
'Cód. BDI', 'Cód. Negociação', 'Tipo de Mercado',
'Nome Empresa', 'Especificação do Papel', 'Prazo Dias Mercado a Termo',
'Moeda Refrência', 'Preço de Abertura', 'Preço Máximo', 'Preço Mínimo',
'Preço Médio', 'Preço Último Negócio', 'Preço Melhor Oferta de Compra',
'Preço Melhor Oferta de Venda', 'Número de Negócios', 'Quantidade de Papéis Negociados',
'Volume Total Negociado', 'Preço Exercício', 'Indicador de Correção de Preços',
'Data Vencimento', 'Fator Cotação', 'Preço Exercício Pontos',
'Cód. ISIN', 'Número Distribuição Papel', 'Data da Importação'],
num_rows=min(30,
len(data_inserted_list)),
vertical_scroll_only=False,
auto_size_columns=True)
],
[
sg.Text(f'\nForam inseridos (ou atualizados) um total de {count_total_inserts} registros')
],
[
sg.Text("")
],
[
sg.Button("Voltar", key="-BACK-", button_color=('white', 'orange')),
sg.Button("Finalizar Programa", key="-FINISH-", button_color=('white', 'firebrick4'))
]
]
window = sg.Window(f"Dados Importados em {today}", layout, location=(55,0), size=(1300,790), resizable = True)
return window
main_window = create_main_window()
active_window = main_window
# Managing windows
while True:
event, values = active_window.read()
if event == "-START-":
if values['-FILE-'] != '':
# Instantiating ETL Object with
etl_obj = etl.EtlAssetData()
valid_file_format = etl_obj.validate_file_content(values['-FILE-'])
if not valid_file_format:
sg.Popup('Conteúdo do arquivo com formato fora do padrão. Favor verificar o cabeçalho.', keep_on_top=True)
else:
# Getting transformed data
asset_data = etl_obj.etl(values['-FILE-'])
# Instantiating Data Base
data_obj = data_base.DataBase()
# Seting data to MySql Data Base
insert_finish = data_obj.insert_data_mysql(asset_data)
if not insert_finish == False:
main_window.hide()
active_window = create_second_window(insert_finish)
else:
sg.Popup('Favor selecionar o arquivo a ser carregado no campo "Selecione o seu arquivo TXT"', keep_on_top=True)
elif event == "-BACK-":
active_window.hide()
active_window = create_main_window()
elif event == "-FINISH-":
break
# Closes if the user requires the window to close
if event == sg.WIN_CLOSED:
break | gui.py | import sys
import PySimpleGUI as sg
import etl_asset_data as etl
import data_base
from datetime import datetime as dt
sg.theme("DarkTeal10")
def create_main_window():
layout = [
[
sg.Text("Selecione o seu arquivo TXT"),
sg.In(key="-FILE-"),
sg.FileBrowse("Selecionar", file_types=(("ALL files", "*.txt"),))
],
[
sg.Text("Aguarde a importação dos dados", text_color='orange')
],
[
sg.Button("Importar para o MySQL", key="-START-", size=(20,1), button_color=('white', 'SeaGreen4')),
sg.Button("Finalizar Programa", key="-FINISH-", size=(20,1), button_color=('white', 'firebrick4'))
],
[
sg.Output(size=(80,25))
],
]
window = sg.Window("Importação dados B3", layout, location=(400,100))
return window
def create_second_window(count_total_inserts):
today = dt.now().strftime("%d/%m/%Y")
# Instantiating Data Base
data_obj = data_base.DataBase()
data_inserted = data_obj.get_data_inserted()
# Instantiating ETL Object with
etl_obj = etl.EtlAssetData()
data_inserted_list = etl_obj.convert_to_list(data_inserted)
layout = [
[
sg.Text(f"Dados Importados em {today}")
],
[
sg.Table(values=data_inserted_list,
headings=[ 'IdData', 'Tipo de Registro', 'Data do Pregão',
'Cód. BDI', 'Cód. Negociação', 'Tipo de Mercado',
'Nome Empresa', 'Especificação do Papel', 'Prazo Dias Mercado a Termo',
'Moeda Refrência', 'Preço de Abertura', 'Preço Máximo', 'Preço Mínimo',
'Preço Médio', 'Preço Último Negócio', 'Preço Melhor Oferta de Compra',
'Preço Melhor Oferta de Venda', 'Número de Negócios', 'Quantidade de Papéis Negociados',
'Volume Total Negociado', 'Preço Exercício', 'Indicador de Correção de Preços',
'Data Vencimento', 'Fator Cotação', 'Preço Exercício Pontos',
'Cód. ISIN', 'Número Distribuição Papel', 'Data da Importação'],
num_rows=min(30,
len(data_inserted_list)),
vertical_scroll_only=False,
auto_size_columns=True)
],
[
sg.Text(f'\nForam inseridos (ou atualizados) um total de {count_total_inserts} registros')
],
[
sg.Text("")
],
[
sg.Button("Voltar", key="-BACK-", button_color=('white', 'orange')),
sg.Button("Finalizar Programa", key="-FINISH-", button_color=('white', 'firebrick4'))
]
]
window = sg.Window(f"Dados Importados em {today}", layout, location=(55,0), size=(1300,790), resizable = True)
return window
main_window = create_main_window()
active_window = main_window
# Managing windows
while True:
event, values = active_window.read()
if event == "-START-":
if values['-FILE-'] != '':
# Instantiating ETL Object with
etl_obj = etl.EtlAssetData()
valid_file_format = etl_obj.validate_file_content(values['-FILE-'])
if not valid_file_format:
sg.Popup('Conteúdo do arquivo com formato fora do padrão. Favor verificar o cabeçalho.', keep_on_top=True)
else:
# Getting transformed data
asset_data = etl_obj.etl(values['-FILE-'])
# Instantiating Data Base
data_obj = data_base.DataBase()
# Seting data to MySql Data Base
insert_finish = data_obj.insert_data_mysql(asset_data)
if not insert_finish == False:
main_window.hide()
active_window = create_second_window(insert_finish)
else:
sg.Popup('Favor selecionar o arquivo a ser carregado no campo "Selecione o seu arquivo TXT"', keep_on_top=True)
elif event == "-BACK-":
active_window.hide()
active_window = create_main_window()
elif event == "-FINISH-":
break
# Closes if the user requires the window to close
if event == sg.WIN_CLOSED:
break | 0.354992 | 0.263053 |
import itertools
import numpy as np
import gym
from gym_minigrid.minigrid import COLORS, WorldObj
from .verifier import *
from .levelgen import *
class ColorSplitsBase(RoomGridLevel):
def __init__(self, room_size=8, num_dists=8, seed=None, box_colors = ['red', 'green', 'blue'], ball_colors = ['purple', 'yellow', 'grey'], training = True, baseline = False, **kwargs):
self.num_dists = num_dists
# Non-intersecting color sets for boxes and balls, all colors for keys
self.training = training
self.baseline = baseline
self.all_colors = set(COLORS.keys())
if self.training:
self.box_colors = box_colors
self.ball_colors = ball_colors
if not self.baseline:
self.shapes = ['key', 'ball', 'box']
else:
self.shapes = ['key']
else:
self.box_colors = list(self.all_colors - set(box_colors))
self.ball_colors = list(self.all_colors - set(ball_colors))
self.shapes = ['ball', 'box']
self.all_colors = list(self.all_colors)
super().__init__(
num_rows=1,
num_cols=1,
room_size=room_size,
seed=seed,
**kwargs
)
def color_selector(self, obj_type):
if obj_type == 'key':
return self._rand_elem(self.all_colors)
elif obj_type == 'box':
return self._rand_elem(self.box_colors)
elif obj_type == 'ball':
return self._rand_elem(self.ball_colors)
else:
return
def add_distractors(self, i=None, j=None, num_distractors=10, all_unique=True):
"""
Add random objects that can potentially distract/confuse the agent.
"""
# Collect a list of existing objects
objs = []
for row in self.room_grid:
for room in row:
for obj in room.objs:
objs.append((obj.type, obj.color))
# List of distractors added
dists = []
while len(dists) < num_distractors:
obj_type = self._rand_elem(self.shapes)
color = self.color_selector(obj_type)
obj = (obj_type, color)
if all_unique and obj in objs:
continue
# Add the object to a random room if no room specified
room_i = i
room_j = j
if room_i == None:
room_i = self._rand_int(0, self.num_cols)
if room_j == None:
room_j = self._rand_int(0, self.num_rows)
dist, pos = self.add_object(room_i, room_j, *obj)
objs.append(obj)
dists.append(dist)
return dists
class ColorSplitsTestBase(ColorSplitsBase):
generator = None
def __init__(self, room_size=8, num_dists=3, seed=None):
self.batch_size = 120 # need to be synced manually with batch_evaluate
self.generator_id = 0
super().__init__(room_size = room_size, num_dists = num_dists, seed = seed, training = False, max_steps = 16)
def build_generator(self):
csgen = [('ball', c, dc, dshape) for dshape in ['box', 'key'] for c in self.ball_colors for dc in self.all_colors if dc != c]
csgen += [('box', c, dc, dshape) for dshape in ['ball', 'key'] for c in self.box_colors for dc in self.all_colors if dc != c]
ColorSplitsTestBase.generator = list(itertools.product(csgen, range(3), range(2)))
def seed(self, seed):
if seed is None:
self.generator_id = 0
else:
self.generator_id = seed - int(1e9)
return super().seed(seed)
def add_distractors(self):
if ColorSplitsTestBase.generator is None:
self.build_generator()
if self.generator_id >= len(ColorSplitsTestBase.generator):
self.generator_id %= len(ColorSplitsTestBase.generator)
print('Generator exhausted')
(tgt_shape, tgt_color, dcolor, dshape), tgt_loc, dselect = ColorSplitsTestBase.generator[self.generator_id]
self.generator_id += self.batch_size
getobj = lambda shape, color: WorldObj.decode(*WorldObj(shape, color).encode())
target = getobj(tgt_shape, tgt_color)
d1 = getobj(tgt_shape, dcolor)
d2 = getobj(dshape, tgt_color)
objs = [None, None, None]
distractors = [d1, d2]
objs[tgt_loc] = target
objs[0 if tgt_loc != 0 else 1] = distractors[dselect]
objs[2 if tgt_loc != 2 else 1] = distractors[1-dselect]
locs = [(2,3), (4,3), (6,3)]
room = self.get_room(0, 0)
for i in range(3):
pos = self.place_obj(objs[i], locs[i], (1,1), max_tries = 1)
room.objs.append(objs[i])
return target
class Level_GotoLocalColorSplits(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, training = True)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
self.instrs = GoToInstr(ObjDesc(obj.type, obj.color))
class Level_GotoLocalColorSplitsTest(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, training = False)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
self.instrs = GoToInstr(ObjDesc(obj.type, obj.color))
class Level_GotoLocalColorSplitsBaseline(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(rroom_size=room_size, num_dists=num_dists, seed=seed, baseline = True)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
if obj.type == 'key':
self.instrs = GoToInstr(ObjDesc(obj.type, obj.color))
else:
self.instrs = GoToInstr(ObjDesc(obj.type))
class Level_PickupLocalColorSplits(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None, training = True):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, training = training)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
self.instrs = PickupInstr(ObjDesc(obj.type, obj.color), strict = True)
class Level_PickupLocalColorSplitsTest(Level_PickupLocalColorSplits):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, training = False)
class Level_PickupLocalColorSplitsTestStrict(ColorSplitsTestBase):
def gen_mission(self):
self.agent_pos = None
pos = np.array((4,6))
self.grid.set(*pos, None)
self.agent_pos = pos
self.agent_dir = 3
target = self.add_distractors()
self.instrs = PickupInstr(ObjDesc(target.type, target.color), strict = True)
class Level_PickupLocalColorSplitsBaseline(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, baseline = True)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
if obj.type == 'key':
self.instrs = PickupInstr(ObjDesc(obj.type, obj.color))
else:
self.instrs = PickupInstr(ObjDesc(obj.type))
class ShapeColorGeneralizationBase(RoomGridLevel):
def __init__(self, room_size=8, num_dists=8, seed=None, training = True, splits = None, baseinstr = GoToInstr, geninstr = lambda desc: PickupInstr(desc, strict = True), **kwargs):
self.num_dists = num_dists
self.training = training
self.baseinstr = baseinstr
self.geninstr = geninstr
self.all_shapes = {'key', 'ball', 'box'}
self.splits = splits
if splits == 'color':
self.common_shapes = set()
elif splits == 'shape':
self.common_shapes = {'key'}
else:
raise ValueError('Must be either color or shape generalization')
super().__init__(
num_rows=1,
num_cols=1,
room_size=room_size,
seed=seed,
**kwargs
)
def add_distractors(self, i=None, j=None, num_distractors=10, all_unique=True, guaranteed_shapes = []):
"""
Add random objects, with at least one object has one of the guaranteed shapes
"""
COLOR_NAMES = list(COLORS.keys())
# Collect a list of existing objects
objs = []
for row in self.room_grid:
for room in row:
for obj in room.objs:
objs.append((obj.type, obj.color))
# List of distractors added
dists = []
guaranteed = False
while len(dists) < num_distractors:
color = self._rand_elem(COLOR_NAMES)
if not guaranteed and len(guaranteed_shapes) > 0:
objtype = self._rand_elem(guaranteed_shapes)
guaranteed = True
else:
objtype = self._rand_elem(list(self.all_shapes))
obj = (objtype, color)
if all_unique and obj in objs:
continue
# Add the object to a random room if no room specified
room_i = i
room_j = j
if room_i == None:
room_i = self._rand_int(0, self.num_cols)
if room_j == None:
room_j = self._rand_int(0, self.num_rows)
dist, pos = self.add_object(room_i, room_j, *obj)
objs.append(obj)
dists.append(dist)
return dists
def add_shapes_select_target(self, exclude_shapes = set()):
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False, guaranteed_shapes = list(self.all_shapes - exclude_shapes))
self.check_objs_reachable()
obj = self._rand_elem(objs)
while obj.type in exclude_shapes:
obj = self._rand_elem(objs)
return obj
def gen_mission(self):
self.place_agent()
if self.training:
mode = self.np_random.choice(['base', 'gen'])
if mode == 'gen':
if self.splits == 'color':
obj = self.add_shapes_select_target()
self.instrs = self.geninstr(ObjDesc(type = None, color = obj.color))
elif self.splits == 'shape':
obj = self.add_shapes_select_target(self.all_shapes - self.common_shapes)
self.instrs = self.geninstr(ObjDesc(obj.type))
else:
obj = self.add_shapes_select_target()
self.instrs = self.baseinstr(ObjDesc(obj.type))
else:
obj = self.add_shapes_select_target(self.common_shapes)
self.instrs = self.geninstr(ObjDesc(obj.type))
class Level_PickupGotoLocalColorSplits(ShapeColorGeneralizationBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, splits = 'color', training = True)
class Level_PickupGotoLocalColorSplitsTest(ShapeColorGeneralizationBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, splits = 'color', training = False)
class Level_PickupGotoLocalShapeSplits(ShapeColorGeneralizationBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, splits = 'shape', training = True)
class Level_PickupGotoLocalShapeSplitsTest(ShapeColorGeneralizationBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, splits = 'shape', training = False)
class Level_PutNextLocalShapeSplits(RoomGridLevel):
"""
Put an object next to another object, inside a single room
with no doors, no distractors
"""
def __init__(self, room_size=8, num_objs=8, seed=None, training = True):
self.training = training
if training:
self.o1types = {'box', 'ball'}
self.o2types = {'key'}
else:
self.o1types = {'key'}
self.o2types = {'box', 'ball'}
self.all_shapes = self.o1types | self.o2types
self.num_objs = num_objs
super().__init__(
num_rows=1,
num_cols=1,
room_size=room_size,
seed=seed
)
def add_distractors(self, i=None, j=None, num_distractors=10, all_unique=True, guaranteed_shapes = []):
"""
Add random objects, with at least one object has for each of the guaranteed shapes
"""
COLOR_NAMES = list(COLORS.keys())
# Collect a list of existing objects
objs = []
for row in self.room_grid:
for room in row:
for obj in room.objs:
objs.append((obj.type, obj.color))
# List of distractors added
dists = []
while len(dists) < num_distractors:
color = self._rand_elem(COLOR_NAMES)
if len(guaranteed_shapes) > 0:
objtype = guaranteed_shapes.pop()
else:
objtype = self._rand_elem(list(self.all_shapes))
obj = (objtype, color)
if all_unique and obj in objs:
continue
# Add the object to a random room if no room specified
room_i = i
room_j = j
if room_i == None:
room_i = self._rand_int(0, self.num_cols)
if room_j == None:
room_j = self._rand_int(0, self.num_rows)
dist, pos = self.add_object(room_i, room_j, *obj)
objs.append(obj)
dists.append(dist)
return dists
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_objs, all_unique=True, guaranteed_shapes = ['key', 'box', 'ball'])
self.check_objs_reachable()
o1, o2 = self._rand_subset(objs, 2)
while o1.type not in self.o1types:
o1 = self._rand_elem(objs)
while o2.type not in self.o2types or o1 == o2:
o2 = self._rand_elem(objs)
self.instrs = PutNextInstr(
ObjDesc(o1.type, o1.color),
ObjDesc(o2.type, o2.color)
)
class Level_PutNextLocalShapeSplitsTest(Level_PutNextLocalShapeSplits):
def __init__(self, room_size=8, num_objs=8, seed=None):
super().__init__(room_size=room_size, num_objs = num_objs, seed=seed, training = False)
class Level_PutNextPickupLocalShapeSplits(Level_PutNextLocalShapeSplits):
def __init__(self, room_size=8, num_objs=8, seed=None, training = True):
super().__init__(room_size=room_size, num_objs = num_objs, seed=seed, training = training)
def gen_mission(self):
if self.training:
mode = self.np_random.choice(['putnext', 'pickup'])
if mode == 'pickup':
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_objs, all_unique=True, guaranteed_shapes = ['key', 'box', 'ball'])
self.check_objs_reachable()
target = self._rand_elem(objs)
self.instrs = PickupInstr(ObjDesc(target.type, target.color), strict = True)
return
super().gen_mission()
class Level_PutNextPickupLocalShapeSplitsTest(Level_PutNextPickupLocalShapeSplits):
def __init__(self, room_size=8, num_objs=8, seed=None):
super().__init__(room_size=room_size, num_objs = num_objs, seed=seed, training = False)
# Register the levels in this file
register_levels(__name__, globals(), prefix = 'Embodiment') | babyai/levels/embodiment_levels.py | import itertools
import numpy as np
import gym
from gym_minigrid.minigrid import COLORS, WorldObj
from .verifier import *
from .levelgen import *
class ColorSplitsBase(RoomGridLevel):
def __init__(self, room_size=8, num_dists=8, seed=None, box_colors = ['red', 'green', 'blue'], ball_colors = ['purple', 'yellow', 'grey'], training = True, baseline = False, **kwargs):
self.num_dists = num_dists
# Non-intersecting color sets for boxes and balls, all colors for keys
self.training = training
self.baseline = baseline
self.all_colors = set(COLORS.keys())
if self.training:
self.box_colors = box_colors
self.ball_colors = ball_colors
if not self.baseline:
self.shapes = ['key', 'ball', 'box']
else:
self.shapes = ['key']
else:
self.box_colors = list(self.all_colors - set(box_colors))
self.ball_colors = list(self.all_colors - set(ball_colors))
self.shapes = ['ball', 'box']
self.all_colors = list(self.all_colors)
super().__init__(
num_rows=1,
num_cols=1,
room_size=room_size,
seed=seed,
**kwargs
)
def color_selector(self, obj_type):
if obj_type == 'key':
return self._rand_elem(self.all_colors)
elif obj_type == 'box':
return self._rand_elem(self.box_colors)
elif obj_type == 'ball':
return self._rand_elem(self.ball_colors)
else:
return
def add_distractors(self, i=None, j=None, num_distractors=10, all_unique=True):
"""
Add random objects that can potentially distract/confuse the agent.
"""
# Collect a list of existing objects
objs = []
for row in self.room_grid:
for room in row:
for obj in room.objs:
objs.append((obj.type, obj.color))
# List of distractors added
dists = []
while len(dists) < num_distractors:
obj_type = self._rand_elem(self.shapes)
color = self.color_selector(obj_type)
obj = (obj_type, color)
if all_unique and obj in objs:
continue
# Add the object to a random room if no room specified
room_i = i
room_j = j
if room_i == None:
room_i = self._rand_int(0, self.num_cols)
if room_j == None:
room_j = self._rand_int(0, self.num_rows)
dist, pos = self.add_object(room_i, room_j, *obj)
objs.append(obj)
dists.append(dist)
return dists
class ColorSplitsTestBase(ColorSplitsBase):
generator = None
def __init__(self, room_size=8, num_dists=3, seed=None):
self.batch_size = 120 # need to be synced manually with batch_evaluate
self.generator_id = 0
super().__init__(room_size = room_size, num_dists = num_dists, seed = seed, training = False, max_steps = 16)
def build_generator(self):
csgen = [('ball', c, dc, dshape) for dshape in ['box', 'key'] for c in self.ball_colors for dc in self.all_colors if dc != c]
csgen += [('box', c, dc, dshape) for dshape in ['ball', 'key'] for c in self.box_colors for dc in self.all_colors if dc != c]
ColorSplitsTestBase.generator = list(itertools.product(csgen, range(3), range(2)))
def seed(self, seed):
if seed is None:
self.generator_id = 0
else:
self.generator_id = seed - int(1e9)
return super().seed(seed)
def add_distractors(self):
if ColorSplitsTestBase.generator is None:
self.build_generator()
if self.generator_id >= len(ColorSplitsTestBase.generator):
self.generator_id %= len(ColorSplitsTestBase.generator)
print('Generator exhausted')
(tgt_shape, tgt_color, dcolor, dshape), tgt_loc, dselect = ColorSplitsTestBase.generator[self.generator_id]
self.generator_id += self.batch_size
getobj = lambda shape, color: WorldObj.decode(*WorldObj(shape, color).encode())
target = getobj(tgt_shape, tgt_color)
d1 = getobj(tgt_shape, dcolor)
d2 = getobj(dshape, tgt_color)
objs = [None, None, None]
distractors = [d1, d2]
objs[tgt_loc] = target
objs[0 if tgt_loc != 0 else 1] = distractors[dselect]
objs[2 if tgt_loc != 2 else 1] = distractors[1-dselect]
locs = [(2,3), (4,3), (6,3)]
room = self.get_room(0, 0)
for i in range(3):
pos = self.place_obj(objs[i], locs[i], (1,1), max_tries = 1)
room.objs.append(objs[i])
return target
class Level_GotoLocalColorSplits(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, training = True)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
self.instrs = GoToInstr(ObjDesc(obj.type, obj.color))
class Level_GotoLocalColorSplitsTest(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, training = False)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
self.instrs = GoToInstr(ObjDesc(obj.type, obj.color))
class Level_GotoLocalColorSplitsBaseline(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(rroom_size=room_size, num_dists=num_dists, seed=seed, baseline = True)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
if obj.type == 'key':
self.instrs = GoToInstr(ObjDesc(obj.type, obj.color))
else:
self.instrs = GoToInstr(ObjDesc(obj.type))
class Level_PickupLocalColorSplits(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None, training = True):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, training = training)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
self.instrs = PickupInstr(ObjDesc(obj.type, obj.color), strict = True)
class Level_PickupLocalColorSplitsTest(Level_PickupLocalColorSplits):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, training = False)
class Level_PickupLocalColorSplitsTestStrict(ColorSplitsTestBase):
def gen_mission(self):
self.agent_pos = None
pos = np.array((4,6))
self.grid.set(*pos, None)
self.agent_pos = pos
self.agent_dir = 3
target = self.add_distractors()
self.instrs = PickupInstr(ObjDesc(target.type, target.color), strict = True)
class Level_PickupLocalColorSplitsBaseline(ColorSplitsBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, baseline = True)
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False)
self.check_objs_reachable()
obj = self._rand_elem(objs)
if obj.type == 'key':
self.instrs = PickupInstr(ObjDesc(obj.type, obj.color))
else:
self.instrs = PickupInstr(ObjDesc(obj.type))
class ShapeColorGeneralizationBase(RoomGridLevel):
def __init__(self, room_size=8, num_dists=8, seed=None, training = True, splits = None, baseinstr = GoToInstr, geninstr = lambda desc: PickupInstr(desc, strict = True), **kwargs):
self.num_dists = num_dists
self.training = training
self.baseinstr = baseinstr
self.geninstr = geninstr
self.all_shapes = {'key', 'ball', 'box'}
self.splits = splits
if splits == 'color':
self.common_shapes = set()
elif splits == 'shape':
self.common_shapes = {'key'}
else:
raise ValueError('Must be either color or shape generalization')
super().__init__(
num_rows=1,
num_cols=1,
room_size=room_size,
seed=seed,
**kwargs
)
def add_distractors(self, i=None, j=None, num_distractors=10, all_unique=True, guaranteed_shapes = []):
"""
Add random objects, with at least one object has one of the guaranteed shapes
"""
COLOR_NAMES = list(COLORS.keys())
# Collect a list of existing objects
objs = []
for row in self.room_grid:
for room in row:
for obj in room.objs:
objs.append((obj.type, obj.color))
# List of distractors added
dists = []
guaranteed = False
while len(dists) < num_distractors:
color = self._rand_elem(COLOR_NAMES)
if not guaranteed and len(guaranteed_shapes) > 0:
objtype = self._rand_elem(guaranteed_shapes)
guaranteed = True
else:
objtype = self._rand_elem(list(self.all_shapes))
obj = (objtype, color)
if all_unique and obj in objs:
continue
# Add the object to a random room if no room specified
room_i = i
room_j = j
if room_i == None:
room_i = self._rand_int(0, self.num_cols)
if room_j == None:
room_j = self._rand_int(0, self.num_rows)
dist, pos = self.add_object(room_i, room_j, *obj)
objs.append(obj)
dists.append(dist)
return dists
def add_shapes_select_target(self, exclude_shapes = set()):
objs = self.add_distractors(num_distractors=self.num_dists, all_unique=False, guaranteed_shapes = list(self.all_shapes - exclude_shapes))
self.check_objs_reachable()
obj = self._rand_elem(objs)
while obj.type in exclude_shapes:
obj = self._rand_elem(objs)
return obj
def gen_mission(self):
self.place_agent()
if self.training:
mode = self.np_random.choice(['base', 'gen'])
if mode == 'gen':
if self.splits == 'color':
obj = self.add_shapes_select_target()
self.instrs = self.geninstr(ObjDesc(type = None, color = obj.color))
elif self.splits == 'shape':
obj = self.add_shapes_select_target(self.all_shapes - self.common_shapes)
self.instrs = self.geninstr(ObjDesc(obj.type))
else:
obj = self.add_shapes_select_target()
self.instrs = self.baseinstr(ObjDesc(obj.type))
else:
obj = self.add_shapes_select_target(self.common_shapes)
self.instrs = self.geninstr(ObjDesc(obj.type))
class Level_PickupGotoLocalColorSplits(ShapeColorGeneralizationBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, splits = 'color', training = True)
class Level_PickupGotoLocalColorSplitsTest(ShapeColorGeneralizationBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, splits = 'color', training = False)
class Level_PickupGotoLocalShapeSplits(ShapeColorGeneralizationBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, splits = 'shape', training = True)
class Level_PickupGotoLocalShapeSplitsTest(ShapeColorGeneralizationBase):
def __init__(self, room_size=8, num_dists=8, seed=None):
super().__init__(room_size=room_size, num_dists=num_dists, seed=seed, splits = 'shape', training = False)
class Level_PutNextLocalShapeSplits(RoomGridLevel):
"""
Put an object next to another object, inside a single room
with no doors, no distractors
"""
def __init__(self, room_size=8, num_objs=8, seed=None, training = True):
self.training = training
if training:
self.o1types = {'box', 'ball'}
self.o2types = {'key'}
else:
self.o1types = {'key'}
self.o2types = {'box', 'ball'}
self.all_shapes = self.o1types | self.o2types
self.num_objs = num_objs
super().__init__(
num_rows=1,
num_cols=1,
room_size=room_size,
seed=seed
)
def add_distractors(self, i=None, j=None, num_distractors=10, all_unique=True, guaranteed_shapes = []):
"""
Add random objects, with at least one object has for each of the guaranteed shapes
"""
COLOR_NAMES = list(COLORS.keys())
# Collect a list of existing objects
objs = []
for row in self.room_grid:
for room in row:
for obj in room.objs:
objs.append((obj.type, obj.color))
# List of distractors added
dists = []
while len(dists) < num_distractors:
color = self._rand_elem(COLOR_NAMES)
if len(guaranteed_shapes) > 0:
objtype = guaranteed_shapes.pop()
else:
objtype = self._rand_elem(list(self.all_shapes))
obj = (objtype, color)
if all_unique and obj in objs:
continue
# Add the object to a random room if no room specified
room_i = i
room_j = j
if room_i == None:
room_i = self._rand_int(0, self.num_cols)
if room_j == None:
room_j = self._rand_int(0, self.num_rows)
dist, pos = self.add_object(room_i, room_j, *obj)
objs.append(obj)
dists.append(dist)
return dists
def gen_mission(self):
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_objs, all_unique=True, guaranteed_shapes = ['key', 'box', 'ball'])
self.check_objs_reachable()
o1, o2 = self._rand_subset(objs, 2)
while o1.type not in self.o1types:
o1 = self._rand_elem(objs)
while o2.type not in self.o2types or o1 == o2:
o2 = self._rand_elem(objs)
self.instrs = PutNextInstr(
ObjDesc(o1.type, o1.color),
ObjDesc(o2.type, o2.color)
)
class Level_PutNextLocalShapeSplitsTest(Level_PutNextLocalShapeSplits):
def __init__(self, room_size=8, num_objs=8, seed=None):
super().__init__(room_size=room_size, num_objs = num_objs, seed=seed, training = False)
class Level_PutNextPickupLocalShapeSplits(Level_PutNextLocalShapeSplits):
def __init__(self, room_size=8, num_objs=8, seed=None, training = True):
super().__init__(room_size=room_size, num_objs = num_objs, seed=seed, training = training)
def gen_mission(self):
if self.training:
mode = self.np_random.choice(['putnext', 'pickup'])
if mode == 'pickup':
self.place_agent()
objs = self.add_distractors(num_distractors=self.num_objs, all_unique=True, guaranteed_shapes = ['key', 'box', 'ball'])
self.check_objs_reachable()
target = self._rand_elem(objs)
self.instrs = PickupInstr(ObjDesc(target.type, target.color), strict = True)
return
super().gen_mission()
class Level_PutNextPickupLocalShapeSplitsTest(Level_PutNextPickupLocalShapeSplits):
def __init__(self, room_size=8, num_objs=8, seed=None):
super().__init__(room_size=room_size, num_objs = num_objs, seed=seed, training = False)
# Register the levels in this file
register_levels(__name__, globals(), prefix = 'Embodiment') | 0.617397 | 0.198491 |
# Script for organizing ASL data to BIDS format
# Pull scan name from asl and m0 image, NEED TO DO: output to examcard2json
# Remove asl image (or just don't put it in the BIDS folder?)
# Convert dicom to nifti
import glob
import os
import sys, getopt
from pydicom import dcmread
import json
def main(argv):
indir = ''
try:
opts, args = getopt.getopt(argv, "hi:a:m:s:p:",["indir=","asl=","m0=","source="])
except getopt.GetoptError:
print('organize_data.py')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('organize_data.py')
sys.exit()
elif opt in ("-i", "--indir"):
indir = arg
elif opt in ("-a", "--asl"):
asl = arg
elif opt in ("-m", "--m0"):
m0 = arg
elif opt in ("-s", "--source"):
source = arg
# set T1
t1w=glob.glob(indir + '/t1.dcm')
if not t1w:
t1w=glob.glob(indir + '/T1.dcm')
t1w=t1w[0]
# check if file paths are absolute
if os.path.isabs(asl) == False:
asl = indir + '/' + asl
m0 = indir + '/' + m0
source = indir + '/' + source
# get pydicom info for each scan
ds_asl = dcmread(asl)
ds_m0 = dcmread(m0)
ds_t1w = dcmread(t1w)
ds_source = dcmread(source)
# pull scan name from dicom header
scanname = {}
scanname['asl'] = ds_asl.SeriesDescription
scanname['m0'] = ds_m0.SeriesDescription
# write scanname dict to json
with open(indir + '/SeriesDescription.json','w') as outfile:
json.dump(scanname,outfile)
# move scans to BIDS directories
os.system('mkdir -p ' + indir + '/BIDS/sub-01/ses-01/anat/')
os.system('mkdir -p ' + indir + '/BIDS/sub-01/ses-01/perf/')
os.system('cp ' + source + ' ' + indir + '/BIDS/sub-01/ses-01/perf/' + os.path.basename(source))
os.system('cp ' + m0 + ' ' + indir + '/BIDS/sub-01/ses-01/perf/' + os.path.basename(m0))
os.system('cp ' + t1w + ' ' + indir + '/BIDS/sub-01/ses-01/anat/' + os.path.basename(t1w))
# run dcm2niix on source and m0 scans
os.system('dcm2niix -f %b ' + indir + '/BIDS/sub-01/ses-01/anat')
os.system('dcm2niix -f %b ' + indir + '/BIDS/sub-01/ses-01/perf')
# remove leftover dicoms
for file in glob.glob(indir + '/BIDS/sub-01/ses-01/*/*'):
if file.endswith('.dcm'):
os.system('rm ' + file)
# rename nii/json files to match bids formatting
#ds_t1w.SeriesDescription = ds_t1w.SeriesDescription.replace(" ","").replace('/', "").replace(":", "").replace("_", "")
#ds_asl.SeriesDescription = ds_asl.SeriesDescription.replace(" ","").replace('/', "").replace(":", "").replace("_", "")
#ds_m0.SeriesDescription = ds_m0.SeriesDescription.replace(" ","").replace('/', "").replace(":", "").replace("_", "")
anat_rename = 'sub-01_ses-01_T1w'
for file in glob.glob(indir + '/BIDS/sub-01/ses-01/anat/*'):
if file.endswith('.json'):
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + anat_rename + '.json')
else:
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + anat_rename + '.nii')
os.system('gzip ' + os.path.dirname(file) + '/' + anat_rename + '.nii')
asl_rename = 'sub-01_ses-01_asl'
m0_rename = 'sub-01_ses-01_m0scan'
for file in glob.glob(indir + '/BIDS/sub-01/ses-01/perf/*'):
if 'M0' in file or 'm0' in file:
if file.endswith('.json'):
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + m0_rename + '.json')
else:
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + m0_rename + '.nii')
os.system('gzip ' + os.path.dirname(file) + '/' + m0_rename + '.nii')
else:
if file.endswith('.json'):
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + asl_rename + '.json')
else:
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + asl_rename + '.nii')
os.system('gzip ' + os.path.dirname(file) + '/' + asl_rename + '.nii')
# create dataset_description.json
dataset_description = {
"BIDSVersion": "1.0.1",
"Name": "XNAT Project",
"DatasetDOI": "https://xnat2.vanderbilt.edu/xnat",
"Author": "No Author defined on XNAT"
}
with open(indir + '/BIDS/dataset_description.json','w') as outfile:
json.dump(dataset_description,outfile)
if __name__ == '__main__':
main(sys.argv[1:]) | xnatwrapper/organize_data.py |
# Script for organizing ASL data to BIDS format
# Pull scan name from asl and m0 image, NEED TO DO: output to examcard2json
# Remove asl image (or just don't put it in the BIDS folder?)
# Convert dicom to nifti
import glob
import os
import sys, getopt
from pydicom import dcmread
import json
def main(argv):
indir = ''
try:
opts, args = getopt.getopt(argv, "hi:a:m:s:p:",["indir=","asl=","m0=","source="])
except getopt.GetoptError:
print('organize_data.py')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('organize_data.py')
sys.exit()
elif opt in ("-i", "--indir"):
indir = arg
elif opt in ("-a", "--asl"):
asl = arg
elif opt in ("-m", "--m0"):
m0 = arg
elif opt in ("-s", "--source"):
source = arg
# set T1
t1w=glob.glob(indir + '/t1.dcm')
if not t1w:
t1w=glob.glob(indir + '/T1.dcm')
t1w=t1w[0]
# check if file paths are absolute
if os.path.isabs(asl) == False:
asl = indir + '/' + asl
m0 = indir + '/' + m0
source = indir + '/' + source
# get pydicom info for each scan
ds_asl = dcmread(asl)
ds_m0 = dcmread(m0)
ds_t1w = dcmread(t1w)
ds_source = dcmread(source)
# pull scan name from dicom header
scanname = {}
scanname['asl'] = ds_asl.SeriesDescription
scanname['m0'] = ds_m0.SeriesDescription
# write scanname dict to json
with open(indir + '/SeriesDescription.json','w') as outfile:
json.dump(scanname,outfile)
# move scans to BIDS directories
os.system('mkdir -p ' + indir + '/BIDS/sub-01/ses-01/anat/')
os.system('mkdir -p ' + indir + '/BIDS/sub-01/ses-01/perf/')
os.system('cp ' + source + ' ' + indir + '/BIDS/sub-01/ses-01/perf/' + os.path.basename(source))
os.system('cp ' + m0 + ' ' + indir + '/BIDS/sub-01/ses-01/perf/' + os.path.basename(m0))
os.system('cp ' + t1w + ' ' + indir + '/BIDS/sub-01/ses-01/anat/' + os.path.basename(t1w))
# run dcm2niix on source and m0 scans
os.system('dcm2niix -f %b ' + indir + '/BIDS/sub-01/ses-01/anat')
os.system('dcm2niix -f %b ' + indir + '/BIDS/sub-01/ses-01/perf')
# remove leftover dicoms
for file in glob.glob(indir + '/BIDS/sub-01/ses-01/*/*'):
if file.endswith('.dcm'):
os.system('rm ' + file)
# rename nii/json files to match bids formatting
#ds_t1w.SeriesDescription = ds_t1w.SeriesDescription.replace(" ","").replace('/', "").replace(":", "").replace("_", "")
#ds_asl.SeriesDescription = ds_asl.SeriesDescription.replace(" ","").replace('/', "").replace(":", "").replace("_", "")
#ds_m0.SeriesDescription = ds_m0.SeriesDescription.replace(" ","").replace('/', "").replace(":", "").replace("_", "")
anat_rename = 'sub-01_ses-01_T1w'
for file in glob.glob(indir + '/BIDS/sub-01/ses-01/anat/*'):
if file.endswith('.json'):
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + anat_rename + '.json')
else:
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + anat_rename + '.nii')
os.system('gzip ' + os.path.dirname(file) + '/' + anat_rename + '.nii')
asl_rename = 'sub-01_ses-01_asl'
m0_rename = 'sub-01_ses-01_m0scan'
for file in glob.glob(indir + '/BIDS/sub-01/ses-01/perf/*'):
if 'M0' in file or 'm0' in file:
if file.endswith('.json'):
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + m0_rename + '.json')
else:
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + m0_rename + '.nii')
os.system('gzip ' + os.path.dirname(file) + '/' + m0_rename + '.nii')
else:
if file.endswith('.json'):
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + asl_rename + '.json')
else:
os.system('mv ' + file + ' ' + os.path.dirname(file) + '/' + asl_rename + '.nii')
os.system('gzip ' + os.path.dirname(file) + '/' + asl_rename + '.nii')
# create dataset_description.json
dataset_description = {
"BIDSVersion": "1.0.1",
"Name": "XNAT Project",
"DatasetDOI": "https://xnat2.vanderbilt.edu/xnat",
"Author": "No Author defined on XNAT"
}
with open(indir + '/BIDS/dataset_description.json','w') as outfile:
json.dump(dataset_description,outfile)
if __name__ == '__main__':
main(sys.argv[1:]) | 0.123696 | 0.252004 |
from django.contrib.auth import get_user_model
from rest_framework import serializers
from bmh_lims.database import models
User = get_user_model()
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class WorkflowDefinitionSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = models.WorkflowDefinition
fields = '__all__'
class WorkflowBatchSerializer(serializers.ModelSerializer):
workflow = serializers.PrimaryKeyRelatedField(queryset=models.WorkflowDefinition.objects.all())
class Meta:
model = models.WorkflowBatch
fields = ['id', 'workflow', 'status']
extra_kwargs = {
"id": {
"read_only": False,
"required": False,
}
}
class LabSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = models.Lab
fields = ('id', 'lab_name', 'lab_contact',)
class ProjectSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = models.Project
fields = '__all__'
class SampleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
#submitting_lab = serializers.PrimaryKeyRelatedField(queryset=models.Lab.objects.all())
submitting_lab = serializers.SlugRelatedField(queryset=models.Lab.objects.all(),
slug_field="lab_name")
submitter_project = serializers.SlugRelatedField(queryset=models.Project.objects.all(), slug_field='project_name')
sample_type = serializers.ChoiceField(choices=['Cells (in DNA/RNA shield)', 'DNA', 'Amplicon', 'Other'])
class Meta:
model = models.Sample
fields = (
'id',
'sample_id',
'sample_name',
'well',
'submitting_lab',
'sample_type',
'sample_volume_in_ul',
'requested_services',
'submitter_project',
'strain',
'isolate',
'genus',
'species',
'subspecies_subtype_lineage',
'approx_genome_size_in_bp',
'comments',
'culture_date',
'culture_conditions',
'dna_extraction_date',
'dna_extraction_method',
'qubit_concentration_in_ng_ul',
'created',
'modified',
)
class WorkflowSampleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
workflow_batch = WorkflowBatchSerializer(read_only=True)
sample = serializers.PrimaryKeyRelatedField(queryset=models.Sample.objects.all())
class Meta:
model = models.WorkflowSample
fields = '__all__'
class WorkflowSampleBatchSerializer(serializers.Serializer):
"""
Serializer for the workflow_samplebatch_create endpoint which allows for a workflow batch to be automatically
created upon submission of a list of samples assigned to a workflow
"""
id = serializers.ReadOnlyField()
workflow_batch = WorkflowBatchSerializer()
sample = serializers.PrimaryKeyRelatedField(queryset=models.Sample.objects.all())
class Meta:
model = models.WorkflowSample
fields = '__all__'
def create(self, validated_data):
"""
This method takes in validated_data sent over from WorkflowSampleBatchCreateViewSet.create() and then
creates a WorkflowSample object using that information
:param validated_data: Workflow Sample to create
:return: created model instance
"""
workflow_batch = models.WorkflowBatch.objects.get(id=validated_data['workflow_batch']['id'])
workflow_sample = models.WorkflowSample.objects.create(sample=validated_data['sample'],
workflow_batch=workflow_batch)
return workflow_sample
def update(self, instance, validated_data):
# TODO
pass | bmh_lims/database/api/serializers.py | from django.contrib.auth import get_user_model
from rest_framework import serializers
from bmh_lims.database import models
User = get_user_model()
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class WorkflowDefinitionSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = models.WorkflowDefinition
fields = '__all__'
class WorkflowBatchSerializer(serializers.ModelSerializer):
workflow = serializers.PrimaryKeyRelatedField(queryset=models.WorkflowDefinition.objects.all())
class Meta:
model = models.WorkflowBatch
fields = ['id', 'workflow', 'status']
extra_kwargs = {
"id": {
"read_only": False,
"required": False,
}
}
class LabSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = models.Lab
fields = ('id', 'lab_name', 'lab_contact',)
class ProjectSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = models.Project
fields = '__all__'
class SampleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
#submitting_lab = serializers.PrimaryKeyRelatedField(queryset=models.Lab.objects.all())
submitting_lab = serializers.SlugRelatedField(queryset=models.Lab.objects.all(),
slug_field="lab_name")
submitter_project = serializers.SlugRelatedField(queryset=models.Project.objects.all(), slug_field='project_name')
sample_type = serializers.ChoiceField(choices=['Cells (in DNA/RNA shield)', 'DNA', 'Amplicon', 'Other'])
class Meta:
model = models.Sample
fields = (
'id',
'sample_id',
'sample_name',
'well',
'submitting_lab',
'sample_type',
'sample_volume_in_ul',
'requested_services',
'submitter_project',
'strain',
'isolate',
'genus',
'species',
'subspecies_subtype_lineage',
'approx_genome_size_in_bp',
'comments',
'culture_date',
'culture_conditions',
'dna_extraction_date',
'dna_extraction_method',
'qubit_concentration_in_ng_ul',
'created',
'modified',
)
class WorkflowSampleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
workflow_batch = WorkflowBatchSerializer(read_only=True)
sample = serializers.PrimaryKeyRelatedField(queryset=models.Sample.objects.all())
class Meta:
model = models.WorkflowSample
fields = '__all__'
class WorkflowSampleBatchSerializer(serializers.Serializer):
"""
Serializer for the workflow_samplebatch_create endpoint which allows for a workflow batch to be automatically
created upon submission of a list of samples assigned to a workflow
"""
id = serializers.ReadOnlyField()
workflow_batch = WorkflowBatchSerializer()
sample = serializers.PrimaryKeyRelatedField(queryset=models.Sample.objects.all())
class Meta:
model = models.WorkflowSample
fields = '__all__'
def create(self, validated_data):
"""
This method takes in validated_data sent over from WorkflowSampleBatchCreateViewSet.create() and then
creates a WorkflowSample object using that information
:param validated_data: Workflow Sample to create
:return: created model instance
"""
workflow_batch = models.WorkflowBatch.objects.get(id=validated_data['workflow_batch']['id'])
workflow_sample = models.WorkflowSample.objects.create(sample=validated_data['sample'],
workflow_batch=workflow_batch)
return workflow_sample
def update(self, instance, validated_data):
# TODO
pass | 0.619356 | 0.115511 |
# This file is part of the Mad Girlfriend software
# See the LICENSE file for copyright information
from rules import Rules
from alertgenerator import Alert, Alerter
from packetparser import Packet
import signal, sys, os, socket, time, traceback, exceptions
def getMemoryUsage():
data = open('/proc/meminfo', 'r').read(2048).split('\n')
memFree = int(data[1].split(':')[1].strip().split(' ')[0]) # kb
buffers = int(data[3].split(':')[1].strip().split(' ')[0]) # kb
cached = int(data[4].split(':')[1].strip().split(' ')[0]) # kb
# Available memory is what is free (completely unoccupied) plus what can
# can be emptied on demand (i.e. buffers and cache). The number returned
# by this function is how many KBs more python can use before OOM.
totalUsableMemory = memFree + buffers + cached
return totalUsableMemory
def canary(packet, alerter):
global lastPacketsHandled, lastBytesHandled
# The canary chirps its status every now and then
nowandthen = 15 # seconds
if 'lastalert' not in alerter.state:
alerter.state['lastalert'] = 0
elapsedSinceLastCanary = time.time() - alerter.state['lastalert']
if elapsedSinceLastCanary > nowandthen:
alerter.state['lastalert'] = time.time()
ph = ['packetsHandled', 'count', lastPacketsHandled / elapsedSinceLastCanary]
tph = ['totalPacketsHandled', 'count', packetsHandled]
bh = ['bytesHandled', 'count', lastBytesHandled / elapsedSinceLastCanary]
tbh = ['totalBytesHandled', 'count', bytesHandled]
memusage = ['memusage', 'count', getMemoryUsage()]
loadavg = ['loadavg', 'count', os.getloadavg()[0]]
extravalues = [tph, tbh, memusage, loadavg, ph, bh]
alerter.log(Alert.INFO, None, extravalues)
lastPacketsHandled = 0 # since last canary
lastBytesHandled = 0 # since last canary
# The rules array contains all rules we apply to each packet.
# The canary function, defined above, is always present.
rules = [(canary, Alerter('canary'))]
for methodName in Rules.__dict__:
if methodName[0] != '_':
if methodName == 'canary':
print("Error: you cannot have a rule named 'canary'. This is a reserved name.")
sys.exit(2)
rules.append((Rules.__dict__[methodName], Alerter(methodName)))
else:
if methodName not in ['__module__', '__doc__']:
print("Ignoring method '" + methodName + "' because it starts with an underscore.")
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
except:
print('Error creating socket.')
sys.exit(1)
print("Mad Girlfriend initialized.")
packetsHandled = 0
bytesHandled = 0
lastPacketsHandled = 0 # since last canary
lastBytesHandled = 0 # since last canary
try:
while True:
data = s.recvfrom(65565)[0]
for rule, alerter in rules:
try:
rule(Packet(data), alerter)
except:
if sys.exc_info()[0] is exceptions.KeyboardInterrupt:
raise
else:
sys.stderr.write("Error in rule {}: {}: {}\n{}".format(alerter.name, \
sys.exc_info()[0], sys.exc_info()[1], traceback.print_tb(sys.exc_info()[2])))
packetsHandled += 1
lastPacketsHandled += 1
lastBytesHandled += len(data)
bytesHandled += len(data)
except KeyboardInterrupt:
print("Received SIGINT")
for rule, alerter in rules:
print("Closing " + alerter.name + ".log")
alerter.close()
print("Done! Have a nice day :)")
sys.exit(0)
sys.exit(3) | madgirlfriend.py |
# This file is part of the Mad Girlfriend software
# See the LICENSE file for copyright information
from rules import Rules
from alertgenerator import Alert, Alerter
from packetparser import Packet
import signal, sys, os, socket, time, traceback, exceptions
def getMemoryUsage():
data = open('/proc/meminfo', 'r').read(2048).split('\n')
memFree = int(data[1].split(':')[1].strip().split(' ')[0]) # kb
buffers = int(data[3].split(':')[1].strip().split(' ')[0]) # kb
cached = int(data[4].split(':')[1].strip().split(' ')[0]) # kb
# Available memory is what is free (completely unoccupied) plus what can
# can be emptied on demand (i.e. buffers and cache). The number returned
# by this function is how many KBs more python can use before OOM.
totalUsableMemory = memFree + buffers + cached
return totalUsableMemory
def canary(packet, alerter):
global lastPacketsHandled, lastBytesHandled
# The canary chirps its status every now and then
nowandthen = 15 # seconds
if 'lastalert' not in alerter.state:
alerter.state['lastalert'] = 0
elapsedSinceLastCanary = time.time() - alerter.state['lastalert']
if elapsedSinceLastCanary > nowandthen:
alerter.state['lastalert'] = time.time()
ph = ['packetsHandled', 'count', lastPacketsHandled / elapsedSinceLastCanary]
tph = ['totalPacketsHandled', 'count', packetsHandled]
bh = ['bytesHandled', 'count', lastBytesHandled / elapsedSinceLastCanary]
tbh = ['totalBytesHandled', 'count', bytesHandled]
memusage = ['memusage', 'count', getMemoryUsage()]
loadavg = ['loadavg', 'count', os.getloadavg()[0]]
extravalues = [tph, tbh, memusage, loadavg, ph, bh]
alerter.log(Alert.INFO, None, extravalues)
lastPacketsHandled = 0 # since last canary
lastBytesHandled = 0 # since last canary
# The rules array contains all rules we apply to each packet.
# The canary function, defined above, is always present.
rules = [(canary, Alerter('canary'))]
for methodName in Rules.__dict__:
if methodName[0] != '_':
if methodName == 'canary':
print("Error: you cannot have a rule named 'canary'. This is a reserved name.")
sys.exit(2)
rules.append((Rules.__dict__[methodName], Alerter(methodName)))
else:
if methodName not in ['__module__', '__doc__']:
print("Ignoring method '" + methodName + "' because it starts with an underscore.")
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
except:
print('Error creating socket.')
sys.exit(1)
print("Mad Girlfriend initialized.")
packetsHandled = 0
bytesHandled = 0
lastPacketsHandled = 0 # since last canary
lastBytesHandled = 0 # since last canary
try:
while True:
data = s.recvfrom(65565)[0]
for rule, alerter in rules:
try:
rule(Packet(data), alerter)
except:
if sys.exc_info()[0] is exceptions.KeyboardInterrupt:
raise
else:
sys.stderr.write("Error in rule {}: {}: {}\n{}".format(alerter.name, \
sys.exc_info()[0], sys.exc_info()[1], traceback.print_tb(sys.exc_info()[2])))
packetsHandled += 1
lastPacketsHandled += 1
lastBytesHandled += len(data)
bytesHandled += len(data)
except KeyboardInterrupt:
print("Received SIGINT")
for rule, alerter in rules:
print("Closing " + alerter.name + ".log")
alerter.close()
print("Done! Have a nice day :)")
sys.exit(0)
sys.exit(3) | 0.307878 | 0.141578 |
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .custom_widget_item import CustomTreeWidgetItem
logger = sgtk.platform.get_logger(__name__)
from .tree_node_base import TreeNodeBase
from .tree_node_task import TreeNodeTask
class TreeNodeItem(TreeNodeBase):
"""
Tree item for a publish item
"""
def __init__(self, item, parent):
"""
:param item:
:param parent: The parent QWidget for this control
"""
self._item = item
super(TreeNodeItem, self).__init__(parent)
self.setFlags(self.flags() | QtCore.Qt.ItemIsSelectable)
# go ahead and keep a handle on these so they can be reused
self._expanded_icon = QtGui.QIcon(":/tk_multi_publish2/down_arrow.png")
self._collapsed_icon = QtGui.QIcon(":/tk_multi_publish2/right_arrow.png")
def _create_widget(self, parent):
"""
Create the widget that is used to visualise the node
"""
# create an item widget and associate it with this QTreeWidgetItem
widget = CustomTreeWidgetItem(self, parent)
# update with any saved state
widget.set_header("<b>%s</b><br>%s" % (self._item.name, self._item.display_type))
widget.set_icon(self._item.icon)
widget.set_checkbox_value(self.data(0, self.CHECKBOX_ROLE))
# connect the collapse/expand tool button to the toggle callback
widget.expand_indicator.clicked.connect(
lambda: self.setExpanded(not self.isExpanded()))
return widget
def __repr__(self):
return "<TreeNodeItem %s>" % str(self)
def __str__(self):
return "%s %s" % (self._item.display_type, self._item.name)
def create_summary(self):
"""
Creates summary of actions
:returns: List of strings
"""
if self.enabled:
items_summaries = []
task_summaries = []
for child_index in xrange(self.childCount()):
child_item = self.child(child_index)
if isinstance(child_item, TreeNodeTask):
task_summaries.extend(child_item.create_summary())
else:
# sub-items
items_summaries.extend(child_item.create_summary())
summary = []
if len(task_summaries) > 0:
summary_str = "<b>%s</b><br>" % self.item.name
summary_str += "<br>".join(["– %s" % task_summary for task_summary in task_summaries])
summary.append(summary_str)
summary.extend(items_summaries)
return summary
else:
return []
@property
def item(self):
"""
Associated item instance
"""
return self._item
def get_publish_instance(self):
"""
Returns the low level item or task instance
that this object represents
:returns: task or item instance
"""
return self.item
def setExpanded(self, expand):
"""
Expands the item if expand is true, otherwise collapses the item.
Overrides the default implementation to display the custom
expand/collapse toggle tool button properly.
:param bool expand: True if item should be expanded, False otherwise
"""
super(TreeNodeItem, self).setExpanded(expand)
self._check_expand_state()
def double_clicked(self, column):
"""Called when the item is double clicked
:param int column: The model column that was double clicked on the item.
"""
# ensure the expand/collapse indicator is properly displayed. this is
# called just before the expansion state is toggled. so we show the
# opposite icon
if self.isExpanded():
icon = self._collapsed_icon
else:
icon = self._expanded_icon
self._embedded_widget.expand_indicator.setIcon(icon)
def _check_expand_state(self):
"""
Sets the expand indicator based on the expand state of the item
:return:
"""
if self.isExpanded():
icon = self._expanded_icon
else:
icon = self._collapsed_icon
self._embedded_widget.expand_indicator.setIcon(icon)
class TopLevelTreeNodeItem(TreeNodeItem):
"""
Tree item for a publish item
"""
def __init__(self, item, parent):
"""
:param item:
:param parent: The parent QWidget for this control
"""
super(TopLevelTreeNodeItem, self).__init__(item, parent)
# ensure items that allow context change are draggable
if self.item.context_change_allowed:
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsDragEnabled
else:
flags = QtCore.Qt.ItemIsSelectable
self.setFlags(self.flags() | flags)
def _create_widget(self, parent):
"""
Create the widget that is used to visualise the node
"""
widget = super(TopLevelTreeNodeItem, self)._create_widget(parent)
# show the proper drag handle
widget.show_drag_handle(self.item.context_change_allowed)
return widget
def synchronize_context(self):
"""
Updates the context for the underlying item given the
current position in the tree
"""
# our parent node is always a context node
self.item.context = self.parent().context | install/app_store/tk-multi-publish2/v2.0.6/python/tk_multi_publish2/publish_tree_widget/tree_node_item.py |
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .custom_widget_item import CustomTreeWidgetItem
logger = sgtk.platform.get_logger(__name__)
from .tree_node_base import TreeNodeBase
from .tree_node_task import TreeNodeTask
class TreeNodeItem(TreeNodeBase):
"""
Tree item for a publish item
"""
def __init__(self, item, parent):
"""
:param item:
:param parent: The parent QWidget for this control
"""
self._item = item
super(TreeNodeItem, self).__init__(parent)
self.setFlags(self.flags() | QtCore.Qt.ItemIsSelectable)
# go ahead and keep a handle on these so they can be reused
self._expanded_icon = QtGui.QIcon(":/tk_multi_publish2/down_arrow.png")
self._collapsed_icon = QtGui.QIcon(":/tk_multi_publish2/right_arrow.png")
def _create_widget(self, parent):
"""
Create the widget that is used to visualise the node
"""
# create an item widget and associate it with this QTreeWidgetItem
widget = CustomTreeWidgetItem(self, parent)
# update with any saved state
widget.set_header("<b>%s</b><br>%s" % (self._item.name, self._item.display_type))
widget.set_icon(self._item.icon)
widget.set_checkbox_value(self.data(0, self.CHECKBOX_ROLE))
# connect the collapse/expand tool button to the toggle callback
widget.expand_indicator.clicked.connect(
lambda: self.setExpanded(not self.isExpanded()))
return widget
def __repr__(self):
return "<TreeNodeItem %s>" % str(self)
def __str__(self):
return "%s %s" % (self._item.display_type, self._item.name)
def create_summary(self):
"""
Creates summary of actions
:returns: List of strings
"""
if self.enabled:
items_summaries = []
task_summaries = []
for child_index in xrange(self.childCount()):
child_item = self.child(child_index)
if isinstance(child_item, TreeNodeTask):
task_summaries.extend(child_item.create_summary())
else:
# sub-items
items_summaries.extend(child_item.create_summary())
summary = []
if len(task_summaries) > 0:
summary_str = "<b>%s</b><br>" % self.item.name
summary_str += "<br>".join(["– %s" % task_summary for task_summary in task_summaries])
summary.append(summary_str)
summary.extend(items_summaries)
return summary
else:
return []
@property
def item(self):
"""
Associated item instance
"""
return self._item
def get_publish_instance(self):
"""
Returns the low level item or task instance
that this object represents
:returns: task or item instance
"""
return self.item
def setExpanded(self, expand):
"""
Expands the item if expand is true, otherwise collapses the item.
Overrides the default implementation to display the custom
expand/collapse toggle tool button properly.
:param bool expand: True if item should be expanded, False otherwise
"""
super(TreeNodeItem, self).setExpanded(expand)
self._check_expand_state()
def double_clicked(self, column):
"""Called when the item is double clicked
:param int column: The model column that was double clicked on the item.
"""
# ensure the expand/collapse indicator is properly displayed. this is
# called just before the expansion state is toggled. so we show the
# opposite icon
if self.isExpanded():
icon = self._collapsed_icon
else:
icon = self._expanded_icon
self._embedded_widget.expand_indicator.setIcon(icon)
def _check_expand_state(self):
"""
Sets the expand indicator based on the expand state of the item
:return:
"""
if self.isExpanded():
icon = self._expanded_icon
else:
icon = self._collapsed_icon
self._embedded_widget.expand_indicator.setIcon(icon)
class TopLevelTreeNodeItem(TreeNodeItem):
"""
Tree item for a publish item
"""
def __init__(self, item, parent):
"""
:param item:
:param parent: The parent QWidget for this control
"""
super(TopLevelTreeNodeItem, self).__init__(item, parent)
# ensure items that allow context change are draggable
if self.item.context_change_allowed:
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsDragEnabled
else:
flags = QtCore.Qt.ItemIsSelectable
self.setFlags(self.flags() | flags)
def _create_widget(self, parent):
"""
Create the widget that is used to visualise the node
"""
widget = super(TopLevelTreeNodeItem, self)._create_widget(parent)
# show the proper drag handle
widget.show_drag_handle(self.item.context_change_allowed)
return widget
def synchronize_context(self):
"""
Updates the context for the underlying item given the
current position in the tree
"""
# our parent node is always a context node
self.item.context = self.parent().context | 0.612078 | 0.154344 |
import tensorflow as tf
import math
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv1d(x, W, s):
return tf.nn.conv2d(x, W, strides=s, padding='SAME')
def pooling_layer_parameterized(pool_method, h_conv, pool_kernel, pool_stride):
if pool_method == 1:
return tf.nn.max_pool(h_conv, ksize=[1, pool_kernel, 1, 1], strides=[1, pool_stride, 1, 1], padding='SAME')
elif pool_method == 2:
return tf.nn.avg_pool(h_conv, ksize=[1, pool_kernel, 1, 1], strides=[1, pool_stride, 1, 1], padding='SAME')
def variable_summaries(var, name, collection):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries') as r:
mean = tf.reduce_mean(var)
tf.add_to_collection(collection, tf.scalar_summary('mean/' + name, mean))
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.add_to_collection(collection, tf.scalar_summary('stddev/' + name, stddev))
tf.add_to_collection(collection, tf.scalar_summary('max/' + name, tf.reduce_max(var)))
tf.add_to_collection(collection, tf.scalar_summary('min/' + name, tf.reduce_min(var)))
tf.add_to_collection(collection, tf.histogram_summary(name, var))
def build_model(hyperparameters):
learning_rate = hyperparameters['learning_rate']
l2_regularization_penalty = hyperparameters['l2_regularization_penalty']
fc1_n_neurons = hyperparameters['fc1_n_neurons']
fc2_1_n_neurons = hyperparameters['fc2_1_n_neurons']
fc2_2_n_neurons = hyperparameters['fc2_2_n_neurons']
fc2_3_n_neurons = hyperparameters['fc2_3_n_neurons']
conv1_kernel = hyperparameters['conv1_kernel']
conv2_kernel = hyperparameters['conv2_kernel']
conv1_filters = hyperparameters['conv1_filters']
conv2_filters = hyperparameters['conv2_filters']
conv1_stride = hyperparameters['conv1_stride']
conv2_stride = hyperparameters['conv2_stride']
pool1_kernel = hyperparameters['pool1_kernel']
pool2_kernel = hyperparameters['pool2_kernel']
pool1_stride = hyperparameters['pool1_stride']
pool2_stride = hyperparameters['pool2_stride']
pool1_method = 1
pool2_method = 1
INPUT_SIZE = 400
tfo = {} # Tensorflow objects
x = tf.placeholder(tf.float32, shape=[None, INPUT_SIZE], name='x')
label_classifier = tf.placeholder(tf.float32, shape=[None], name='label_classifier')
label_offset = tf.placeholder(tf.float32, shape=[None], name='label_offset')
label_coldensity = tf.placeholder(tf.float32, shape=[None], name='label_coldensity')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
global_step = tf.Variable(0, name='global_step', trainable=False)
# First Convolutional Layer
# Kernel size (16,1)
# Stride (4,1)
# number of filters = 4 (features?)
# Neuron activation = ReLU (rectified linear unit)
W_conv1 = weight_variable([conv1_kernel, 1, 1, conv1_filters])
b_conv1 = bias_variable([conv1_filters])
x_4d = tf.reshape(x, [-1, INPUT_SIZE, 1, 1])
# https://www.tensorflow.org/versions/r0.10/api_docs/python/nn.html#convolution
# out_height = ceil(float(in_height) / float(strides[1])) = ceil(1024./4.) = 256
# out_width = ceil(float(in_width) / float(strides[2])) = 1
# shape of h_conv1: [-1, 256, 1, 4]
stride1 = [1, conv1_stride, 1, 1]
h_conv1 = tf.nn.relu(conv1d(x_4d, W_conv1, stride1) + b_conv1)
# Kernel size (8,1)
# Stride (2,1)
# Pooling type = Max Pooling
# out_height = ceil(float(in_height) / float(strides[1])) = ceil(256./2.) = 128
# out_width = ceil(float(in_width) / float(strides[2])) = 1
# shape of h_pool1: [-1, 128, 1, 4]
h_pool1 = pooling_layer_parameterized(pool1_method, h_conv1, pool1_kernel, pool1_stride)
# Second Convolutional Layer
# Kernel size (16,1)
# Stride (2,1)
# number of filters=8
# Neuron activation = ReLU (rectified linear unit)
W_conv2 = weight_variable([conv2_kernel, 1, conv1_filters, conv2_filters])
b_conv2 = bias_variable([conv2_filters])
# out_height = ceil(float(in_height) / float(strides[1])) = ceil(128./2.) = 64
# out_width = ceil(float(in_width) / float(strides[2])) = 1
# shape of h_conv1: [-1, 64, 1, 8]
stride2 = [1, conv2_stride, 1, 1]
h_conv2 = tf.nn.relu(conv1d(h_pool1, W_conv2, stride2) + b_conv2)
h_pool2 = pooling_layer_parameterized(pool2_method, h_conv2, pool2_kernel, pool2_stride)
# FC1: first fully connected layer, shared
inputsize_fc1 = int(math.ceil(math.ceil(math.ceil(math.ceil(
INPUT_SIZE / conv1_stride) / pool1_stride) / conv2_stride) / pool2_stride)) * conv2_filters
h_pool2_flat = tf.reshape(h_pool2, [-1, inputsize_fc1])
W_fc1 = weight_variable([inputsize_fc1, fc1_n_neurons])
b_fc1 = bias_variable([fc1_n_neurons])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout FC1
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2_1 = weight_variable([fc1_n_neurons, fc2_1_n_neurons])
b_fc2_1 = bias_variable([fc2_1_n_neurons])
W_fc2_2 = weight_variable([fc1_n_neurons, fc2_2_n_neurons])
b_fc2_2 = bias_variable([fc2_2_n_neurons])
W_fc2_3 = weight_variable([fc1_n_neurons, fc2_3_n_neurons])
b_fc2_3 = bias_variable([fc2_3_n_neurons])
# FC2 activations
h_fc2_1 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2_1) + b_fc2_1)
h_fc2_2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2_2) + b_fc2_2)
h_fc2_3 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2_3) + b_fc2_3)
# FC2 Dropout [1-3]
h_fc2_1_drop = tf.nn.dropout(h_fc2_1, keep_prob)
h_fc2_2_drop = tf.nn.dropout(h_fc2_2, keep_prob)
h_fc2_3_drop = tf.nn.dropout(h_fc2_3, keep_prob)
# Readout Layer
W_fc3_1 = weight_variable([fc2_1_n_neurons, 1])
b_fc3_1 = bias_variable([1])
W_fc3_2 = weight_variable([fc2_2_n_neurons, 1])
b_fc3_2 = bias_variable([1])
W_fc3_3 = weight_variable([fc2_3_n_neurons, 1])
b_fc3_3 = bias_variable([1])
# y_fc4 = tf.add(tf.matmul(h_fc3_drop, W_fc4), b_fc4)
# y_nn = tf.reshape(y_fc4, [-1])
y_fc4_1 = tf.add(tf.matmul(h_fc2_1_drop, W_fc3_1), b_fc3_1)
y_nn_classifier = tf.reshape(y_fc4_1, [-1], name='y_nn_classifer')
y_fc4_2 = tf.add(tf.matmul(h_fc2_2_drop, W_fc3_2), b_fc3_2)
y_nn_offset = tf.reshape(y_fc4_2, [-1], name='y_nn_offset')
y_fc4_3 = tf.add(tf.matmul(h_fc2_3_drop, W_fc3_3), b_fc3_3)
y_nn_coldensity = tf.reshape(y_fc4_3, [-1], name='y_nn_coldensity')
# Train and Evaluate the model
loss_classifier = tf.add(tf.nn.sigmoid_cross_entropy_with_logits(y_nn_classifier, label_classifier),
l2_regularization_penalty * (tf.nn.l2_loss(W_conv1) + tf.nn.l2_loss(W_conv2) +
tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2_1)),
name='loss_classifier')
loss_offset_regression = tf.add(tf.reduce_sum(tf.nn.l2_loss(y_nn_offset - label_offset)),
l2_regularization_penalty * (tf.nn.l2_loss(W_conv1) + tf.nn.l2_loss(W_conv2) +
tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2_2)),
name='loss_offset_regression')
epsilon = 1e-6
loss_coldensity_regression = tf.reduce_sum(
tf.mul(tf.square(y_nn_coldensity - label_coldensity),
tf.div(label_coldensity,label_coldensity+epsilon)) +
l2_regularization_penalty * (tf.nn.l2_loss(W_conv1) + tf.nn.l2_loss(W_conv2) +
tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2_1)),
name='loss_coldensity_regression')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
cost_all_samples_lossfns_AB = loss_classifier + loss_offset_regression
cost_pos_samples_lossfns_ABC = loss_classifier + loss_offset_regression + loss_coldensity_regression
# train_step_AB = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_all_samples_lossfns_AB, global_step=global_step, name='train_step_AB')
train_step_ABC = optimizer.minimize(cost_pos_samples_lossfns_ABC, global_step=global_step, name='train_step_ABC')
# train_step_C = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_coldensity_regression, global_step=global_step, name='train_step_C')
output_classifier = tf.sigmoid(y_nn_classifier, name='output_classifier')
prediction = tf.round(output_classifier, name='prediction')
correct_prediction = tf.equal(prediction, label_classifier)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
rmse_offset = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(y_nn_offset,label_offset))), name='rmse_offset')
rmse_coldensity = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(y_nn_coldensity,label_coldensity))), name='rmse_coldensity')
variable_summaries(loss_classifier, 'loss_classifier', 'SUMMARY_A')
variable_summaries(loss_offset_regression, 'loss_offset_regression', 'SUMMARY_B')
variable_summaries(loss_coldensity_regression, 'loss_coldensity_regression', 'SUMMARY_C')
variable_summaries(accuracy, 'classification_accuracy', 'SUMMARY_A')
variable_summaries(rmse_offset, 'rmse_offset', 'SUMMARY_B')
variable_summaries(rmse_coldensity, 'rmse_coldensity', 'SUMMARY_C')
# tb_summaries = tf.merge_all_summaries()
return train_step_ABC, tfo #, accuracy , loss_classifier, loss_offset_regression, loss_coldensity_regression, \
#x, label_classifier, label_offset, label_coldensity, keep_prob, prediction, output_classifier, y_nn_offset, \
#rmse_offset, y_nn_coldensity, rmse_coldensity | dla_cnn/Model_v4.py | import tensorflow as tf
import math
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv1d(x, W, s):
return tf.nn.conv2d(x, W, strides=s, padding='SAME')
def pooling_layer_parameterized(pool_method, h_conv, pool_kernel, pool_stride):
if pool_method == 1:
return tf.nn.max_pool(h_conv, ksize=[1, pool_kernel, 1, 1], strides=[1, pool_stride, 1, 1], padding='SAME')
elif pool_method == 2:
return tf.nn.avg_pool(h_conv, ksize=[1, pool_kernel, 1, 1], strides=[1, pool_stride, 1, 1], padding='SAME')
def variable_summaries(var, name, collection):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries') as r:
mean = tf.reduce_mean(var)
tf.add_to_collection(collection, tf.scalar_summary('mean/' + name, mean))
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.add_to_collection(collection, tf.scalar_summary('stddev/' + name, stddev))
tf.add_to_collection(collection, tf.scalar_summary('max/' + name, tf.reduce_max(var)))
tf.add_to_collection(collection, tf.scalar_summary('min/' + name, tf.reduce_min(var)))
tf.add_to_collection(collection, tf.histogram_summary(name, var))
def build_model(hyperparameters):
learning_rate = hyperparameters['learning_rate']
l2_regularization_penalty = hyperparameters['l2_regularization_penalty']
fc1_n_neurons = hyperparameters['fc1_n_neurons']
fc2_1_n_neurons = hyperparameters['fc2_1_n_neurons']
fc2_2_n_neurons = hyperparameters['fc2_2_n_neurons']
fc2_3_n_neurons = hyperparameters['fc2_3_n_neurons']
conv1_kernel = hyperparameters['conv1_kernel']
conv2_kernel = hyperparameters['conv2_kernel']
conv1_filters = hyperparameters['conv1_filters']
conv2_filters = hyperparameters['conv2_filters']
conv1_stride = hyperparameters['conv1_stride']
conv2_stride = hyperparameters['conv2_stride']
pool1_kernel = hyperparameters['pool1_kernel']
pool2_kernel = hyperparameters['pool2_kernel']
pool1_stride = hyperparameters['pool1_stride']
pool2_stride = hyperparameters['pool2_stride']
pool1_method = 1
pool2_method = 1
INPUT_SIZE = 400
tfo = {} # Tensorflow objects
x = tf.placeholder(tf.float32, shape=[None, INPUT_SIZE], name='x')
label_classifier = tf.placeholder(tf.float32, shape=[None], name='label_classifier')
label_offset = tf.placeholder(tf.float32, shape=[None], name='label_offset')
label_coldensity = tf.placeholder(tf.float32, shape=[None], name='label_coldensity')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
global_step = tf.Variable(0, name='global_step', trainable=False)
# First Convolutional Layer
# Kernel size (16,1)
# Stride (4,1)
# number of filters = 4 (features?)
# Neuron activation = ReLU (rectified linear unit)
W_conv1 = weight_variable([conv1_kernel, 1, 1, conv1_filters])
b_conv1 = bias_variable([conv1_filters])
x_4d = tf.reshape(x, [-1, INPUT_SIZE, 1, 1])
# https://www.tensorflow.org/versions/r0.10/api_docs/python/nn.html#convolution
# out_height = ceil(float(in_height) / float(strides[1])) = ceil(1024./4.) = 256
# out_width = ceil(float(in_width) / float(strides[2])) = 1
# shape of h_conv1: [-1, 256, 1, 4]
stride1 = [1, conv1_stride, 1, 1]
h_conv1 = tf.nn.relu(conv1d(x_4d, W_conv1, stride1) + b_conv1)
# Kernel size (8,1)
# Stride (2,1)
# Pooling type = Max Pooling
# out_height = ceil(float(in_height) / float(strides[1])) = ceil(256./2.) = 128
# out_width = ceil(float(in_width) / float(strides[2])) = 1
# shape of h_pool1: [-1, 128, 1, 4]
h_pool1 = pooling_layer_parameterized(pool1_method, h_conv1, pool1_kernel, pool1_stride)
# Second Convolutional Layer
# Kernel size (16,1)
# Stride (2,1)
# number of filters=8
# Neuron activation = ReLU (rectified linear unit)
W_conv2 = weight_variable([conv2_kernel, 1, conv1_filters, conv2_filters])
b_conv2 = bias_variable([conv2_filters])
# out_height = ceil(float(in_height) / float(strides[1])) = ceil(128./2.) = 64
# out_width = ceil(float(in_width) / float(strides[2])) = 1
# shape of h_conv1: [-1, 64, 1, 8]
stride2 = [1, conv2_stride, 1, 1]
h_conv2 = tf.nn.relu(conv1d(h_pool1, W_conv2, stride2) + b_conv2)
h_pool2 = pooling_layer_parameterized(pool2_method, h_conv2, pool2_kernel, pool2_stride)
# FC1: first fully connected layer, shared
inputsize_fc1 = int(math.ceil(math.ceil(math.ceil(math.ceil(
INPUT_SIZE / conv1_stride) / pool1_stride) / conv2_stride) / pool2_stride)) * conv2_filters
h_pool2_flat = tf.reshape(h_pool2, [-1, inputsize_fc1])
W_fc1 = weight_variable([inputsize_fc1, fc1_n_neurons])
b_fc1 = bias_variable([fc1_n_neurons])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout FC1
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2_1 = weight_variable([fc1_n_neurons, fc2_1_n_neurons])
b_fc2_1 = bias_variable([fc2_1_n_neurons])
W_fc2_2 = weight_variable([fc1_n_neurons, fc2_2_n_neurons])
b_fc2_2 = bias_variable([fc2_2_n_neurons])
W_fc2_3 = weight_variable([fc1_n_neurons, fc2_3_n_neurons])
b_fc2_3 = bias_variable([fc2_3_n_neurons])
# FC2 activations
h_fc2_1 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2_1) + b_fc2_1)
h_fc2_2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2_2) + b_fc2_2)
h_fc2_3 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2_3) + b_fc2_3)
# FC2 Dropout [1-3]
h_fc2_1_drop = tf.nn.dropout(h_fc2_1, keep_prob)
h_fc2_2_drop = tf.nn.dropout(h_fc2_2, keep_prob)
h_fc2_3_drop = tf.nn.dropout(h_fc2_3, keep_prob)
# Readout Layer
W_fc3_1 = weight_variable([fc2_1_n_neurons, 1])
b_fc3_1 = bias_variable([1])
W_fc3_2 = weight_variable([fc2_2_n_neurons, 1])
b_fc3_2 = bias_variable([1])
W_fc3_3 = weight_variable([fc2_3_n_neurons, 1])
b_fc3_3 = bias_variable([1])
# y_fc4 = tf.add(tf.matmul(h_fc3_drop, W_fc4), b_fc4)
# y_nn = tf.reshape(y_fc4, [-1])
y_fc4_1 = tf.add(tf.matmul(h_fc2_1_drop, W_fc3_1), b_fc3_1)
y_nn_classifier = tf.reshape(y_fc4_1, [-1], name='y_nn_classifer')
y_fc4_2 = tf.add(tf.matmul(h_fc2_2_drop, W_fc3_2), b_fc3_2)
y_nn_offset = tf.reshape(y_fc4_2, [-1], name='y_nn_offset')
y_fc4_3 = tf.add(tf.matmul(h_fc2_3_drop, W_fc3_3), b_fc3_3)
y_nn_coldensity = tf.reshape(y_fc4_3, [-1], name='y_nn_coldensity')
# Train and Evaluate the model
loss_classifier = tf.add(tf.nn.sigmoid_cross_entropy_with_logits(y_nn_classifier, label_classifier),
l2_regularization_penalty * (tf.nn.l2_loss(W_conv1) + tf.nn.l2_loss(W_conv2) +
tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2_1)),
name='loss_classifier')
loss_offset_regression = tf.add(tf.reduce_sum(tf.nn.l2_loss(y_nn_offset - label_offset)),
l2_regularization_penalty * (tf.nn.l2_loss(W_conv1) + tf.nn.l2_loss(W_conv2) +
tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2_2)),
name='loss_offset_regression')
epsilon = 1e-6
loss_coldensity_regression = tf.reduce_sum(
tf.mul(tf.square(y_nn_coldensity - label_coldensity),
tf.div(label_coldensity,label_coldensity+epsilon)) +
l2_regularization_penalty * (tf.nn.l2_loss(W_conv1) + tf.nn.l2_loss(W_conv2) +
tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2_1)),
name='loss_coldensity_regression')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
cost_all_samples_lossfns_AB = loss_classifier + loss_offset_regression
cost_pos_samples_lossfns_ABC = loss_classifier + loss_offset_regression + loss_coldensity_regression
# train_step_AB = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_all_samples_lossfns_AB, global_step=global_step, name='train_step_AB')
train_step_ABC = optimizer.minimize(cost_pos_samples_lossfns_ABC, global_step=global_step, name='train_step_ABC')
# train_step_C = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_coldensity_regression, global_step=global_step, name='train_step_C')
output_classifier = tf.sigmoid(y_nn_classifier, name='output_classifier')
prediction = tf.round(output_classifier, name='prediction')
correct_prediction = tf.equal(prediction, label_classifier)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
rmse_offset = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(y_nn_offset,label_offset))), name='rmse_offset')
rmse_coldensity = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(y_nn_coldensity,label_coldensity))), name='rmse_coldensity')
variable_summaries(loss_classifier, 'loss_classifier', 'SUMMARY_A')
variable_summaries(loss_offset_regression, 'loss_offset_regression', 'SUMMARY_B')
variable_summaries(loss_coldensity_regression, 'loss_coldensity_regression', 'SUMMARY_C')
variable_summaries(accuracy, 'classification_accuracy', 'SUMMARY_A')
variable_summaries(rmse_offset, 'rmse_offset', 'SUMMARY_B')
variable_summaries(rmse_coldensity, 'rmse_coldensity', 'SUMMARY_C')
# tb_summaries = tf.merge_all_summaries()
return train_step_ABC, tfo #, accuracy , loss_classifier, loss_offset_regression, loss_coldensity_regression, \
#x, label_classifier, label_offset, label_coldensity, keep_prob, prediction, output_classifier, y_nn_offset, \
#rmse_offset, y_nn_coldensity, rmse_coldensity | 0.922989 | 0.552057 |
from selenium import webdriver
import time
import requests
from bs4 import BeautifulSoup
import re
import json
from selenium.webdriver.common.keys import Keys
import pickle
import pandas as pd
array_textos_noticias = []
link = []
def elpais_content(user_input):
#Headless para evitar que se lance la ventana de chrome, ahorrando recursos ya que no se necesitan para la Interfaz Gráfica de Usuario
options = webdriver.ChromeOptions()
user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
options.add_argument('user-agent={}'.format(user_agent))
options.add_argument('--incognito')
options.add_argument('--headless')
options.add_argument('--enable-javascript')
PATH = 'C:/WebDriver/bin/chromedriver.exe'
driver = webdriver.Chrome(PATH, options=options)
try:
driver.get("https://elpais.com/buscador/") #Lanzar la URL
time.sleep(2)
driver.find_element_by_xpath('//*[@id="didomi-notice-agree-button"]').click()
driver.find_element_by_xpath('//*[@id="formulario_busquedas"]/input[1]').send_keys(user_input)
time.sleep(2)
driver.find_element_by_xpath('//*[@id="formulario_busquedas"]/input[1]').send_keys(Keys.ENTER)
time.sleep(2)
source = driver.page_source
except Exception as e:
source = ""
return source #Recoger todo el html de la pagina
def text_elpais(user_input):
noticias = {}
noticias["ELPAIS News in " + user_input] = []
titulo, link_completo, subtitulo = "","",""
page_source = elpais_content(user_input) #Se llama a la funcion 'cope_content' para obtener el contenido de la pagina donde estan las noticias
soup = BeautifulSoup(page_source, 'lxml')
try:
contenedor = soup.find_all(class_="noticia") #Div donde estan las noticias
for i in contenedor:
#Titulo
try:
titulo = i.find(title="Ver noticia").text
except Exception as e:
None
#Link
try:
link = i.find(title="Ver noticia").attrs['href']
link_completo = "https://elpais.com" +str(link)
except Exception as e:
None
#Subtitulo
try:
subtitulo = i.find('p').text
array_textos_noticias.append(subtitulo)
except Exception as e:
None
noticias["ELPAIS News in " + user_input].append({
'Name': titulo,
'Subtitle': subtitulo,
'URL': link_completo
})
except Exception as e:
noticias["ELPAIS News in " + user_input].append({
'Name': titulo,
'Subtitle': subtitulo,
'URL': link_completo
})
return json.dumps(noticias, indent=3)
#print(text_elpais("Talavera de la Reina"))
#Llama a "text_elpais" para rellenar el array y devolverlo para el scrapper de PC1
def elpais_pc1(user_input):
text_elpais(user_input)
return array_textos_noticias
def model_prediction(user_input):
#Scrapper ElPais
array_textos_noticias = elpais_pc1(user_input.decode('utf-8'))
model = pickle.load(open('api/scrapers/data-pc1/trained_model.sav', 'rb'))
tfidf = pickle.load(open('api/scrapers/data-pc1/tfidf.pkl', 'rb'))
df = pd.read_excel('api/scrapers/data-pc1/Noticias_Excel.xlsx', engine='openpyxl')
df['category_id'] = df['Category'].factorize()[0] #Se cambia la categoria 0-> despoblacion 1-> no despoblacion
result = ''
category_id_df = df[['Category', 'category_id']].drop_duplicates().sort_values('category_id') #quita los valores duplicados y ordena
id_to_category = dict(category_id_df[['category_id', 'Category']].values)
#España pueblo vaciada
text_features = tfidf.transform(array_textos_noticias)
predictions = model.predict(text_features)
cont_desp = 0
cont_no_desp = 0
for text, predicted in zip(array_textos_noticias, predictions):
if (id_to_category[predicted] == "No Despoblacion"):
cont_no_desp += 1
else:
cont_desp += 1
#La variable con mayor valor -> resultado
if (cont_desp < cont_no_desp): result = "No Despoblacion"
else: result = "Despoblacion"
json_response = json.dumps({"result": result}, indent=3)
return json_response
# print(model_prediction(b'Brunete')) | api/scrapers/elPais.py | from selenium import webdriver
import time
import requests
from bs4 import BeautifulSoup
import re
import json
from selenium.webdriver.common.keys import Keys
import pickle
import pandas as pd
array_textos_noticias = []
link = []
def elpais_content(user_input):
#Headless para evitar que se lance la ventana de chrome, ahorrando recursos ya que no se necesitan para la Interfaz Gráfica de Usuario
options = webdriver.ChromeOptions()
user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
options.add_argument('user-agent={}'.format(user_agent))
options.add_argument('--incognito')
options.add_argument('--headless')
options.add_argument('--enable-javascript')
PATH = 'C:/WebDriver/bin/chromedriver.exe'
driver = webdriver.Chrome(PATH, options=options)
try:
driver.get("https://elpais.com/buscador/") #Lanzar la URL
time.sleep(2)
driver.find_element_by_xpath('//*[@id="didomi-notice-agree-button"]').click()
driver.find_element_by_xpath('//*[@id="formulario_busquedas"]/input[1]').send_keys(user_input)
time.sleep(2)
driver.find_element_by_xpath('//*[@id="formulario_busquedas"]/input[1]').send_keys(Keys.ENTER)
time.sleep(2)
source = driver.page_source
except Exception as e:
source = ""
return source #Recoger todo el html de la pagina
def text_elpais(user_input):
noticias = {}
noticias["ELPAIS News in " + user_input] = []
titulo, link_completo, subtitulo = "","",""
page_source = elpais_content(user_input) #Se llama a la funcion 'cope_content' para obtener el contenido de la pagina donde estan las noticias
soup = BeautifulSoup(page_source, 'lxml')
try:
contenedor = soup.find_all(class_="noticia") #Div donde estan las noticias
for i in contenedor:
#Titulo
try:
titulo = i.find(title="Ver noticia").text
except Exception as e:
None
#Link
try:
link = i.find(title="Ver noticia").attrs['href']
link_completo = "https://elpais.com" +str(link)
except Exception as e:
None
#Subtitulo
try:
subtitulo = i.find('p').text
array_textos_noticias.append(subtitulo)
except Exception as e:
None
noticias["ELPAIS News in " + user_input].append({
'Name': titulo,
'Subtitle': subtitulo,
'URL': link_completo
})
except Exception as e:
noticias["ELPAIS News in " + user_input].append({
'Name': titulo,
'Subtitle': subtitulo,
'URL': link_completo
})
return json.dumps(noticias, indent=3)
#print(text_elpais("Talavera de la Reina"))
#Llama a "text_elpais" para rellenar el array y devolverlo para el scrapper de PC1
def elpais_pc1(user_input):
text_elpais(user_input)
return array_textos_noticias
def model_prediction(user_input):
#Scrapper ElPais
array_textos_noticias = elpais_pc1(user_input.decode('utf-8'))
model = pickle.load(open('api/scrapers/data-pc1/trained_model.sav', 'rb'))
tfidf = pickle.load(open('api/scrapers/data-pc1/tfidf.pkl', 'rb'))
df = pd.read_excel('api/scrapers/data-pc1/Noticias_Excel.xlsx', engine='openpyxl')
df['category_id'] = df['Category'].factorize()[0] #Se cambia la categoria 0-> despoblacion 1-> no despoblacion
result = ''
category_id_df = df[['Category', 'category_id']].drop_duplicates().sort_values('category_id') #quita los valores duplicados y ordena
id_to_category = dict(category_id_df[['category_id', 'Category']].values)
#España pueblo vaciada
text_features = tfidf.transform(array_textos_noticias)
predictions = model.predict(text_features)
cont_desp = 0
cont_no_desp = 0
for text, predicted in zip(array_textos_noticias, predictions):
if (id_to_category[predicted] == "No Despoblacion"):
cont_no_desp += 1
else:
cont_desp += 1
#La variable con mayor valor -> resultado
if (cont_desp < cont_no_desp): result = "No Despoblacion"
else: result = "Despoblacion"
json_response = json.dumps({"result": result}, indent=3)
return json_response
# print(model_prediction(b'Brunete')) | 0.072587 | 0.075858 |
from scipy.linalg import solve_continuous_lyapunov
from tqdm import tqdm
from torch.utils.data import Subset
import torch
from nnlib.nnlib import utils
from .ntk import compute_training_loss_at_time_t, get_predictions_at_time_t, get_weights_at_time_t, \
get_test_predictions_at_time_t
from . import misc
from .sgd import get_sgd_covariance_full
@utils.with_no_grad
def training_loss_stability(ts, n, eta, ntk, init_preds, Y, l2_reg_coef=0.0, continuous=False):
if l2_reg_coef > 0:
ntk = ntk + l2_reg_coef * torch.eye(ntk.shape[0], dtype=torch.float, device=ntk.device)
losses_without_excluding = [compute_training_loss_at_time_t(t=t,
eta=eta,
ntk=ntk,
init_preds=init_preds,
Y=Y,
continuous=continuous) for t in ts]
losses_without_excluding = torch.stack(losses_without_excluding)
n_outputs = init_preds.shape[-1]
change_quantities = []
change_vectors = []
for sample_idx in tqdm(range(n)):
example_indices = [i for i in range(n) if i != sample_idx]
example_output_indices = []
for i in example_indices:
example_output_indices.extend(range(i * n_outputs, (i + 1) * n_outputs))
new_ntk = ntk.clone()[example_output_indices]
new_ntk = new_ntk[:, example_output_indices]
new_init_preds = init_preds[example_indices]
new_Y = Y[example_indices]
losses = [compute_training_loss_at_time_t(t=t,
eta=eta * n / (n - 1),
ntk=new_ntk,
init_preds=new_init_preds,
Y=new_Y,
continuous=continuous) for t in ts]
losses = torch.stack(losses)
change_vectors.append(losses - losses_without_excluding)
change_quantities.append(torch.mean((losses - losses_without_excluding) ** 2))
return change_vectors, change_quantities
@utils.with_no_grad
def training_pred_stability(t, n, eta, ntk, init_preds, Y, l2_reg_coef=0.0, continuous=False):
if l2_reg_coef > 0:
ntk = ntk + l2_reg_coef * torch.eye(ntk.shape[0], dtype=torch.float, device=ntk.device)
old_preds = get_predictions_at_time_t(t=t, eta=eta, ntk=ntk, init_preds=init_preds,
Y=Y, continuous=continuous)
n_outputs = init_preds.shape[-1]
change_vectors = []
change_quantities = []
for sample_idx in tqdm(range(n)):
example_indices = [i for i in range(n) if i != sample_idx]
example_output_indices = []
for i in example_indices:
example_output_indices.extend(range(i * n_outputs, (i + 1) * n_outputs))
new_ntk = ntk.clone()[example_output_indices]
new_ntk = new_ntk[:, example_output_indices]
new_init_preds = init_preds[example_indices]
new_Y = Y[example_indices]
new_preds = get_predictions_at_time_t(t=t,
eta=eta * n / (n - 1),
ntk=new_ntk,
init_preds=new_init_preds,
Y=new_Y,
continuous=continuous)
change_quantities.append(torch.sum((old_preds[example_indices] - new_preds) ** 2, dim=1).mean(dim=0))
change_vectors.append(new_preds - old_preds[example_indices])
return change_vectors, change_quantities
@utils.with_no_grad
def weight_stability(t, n, eta, init_params, jacobians, ntk, init_preds, Y, continuous=False, without_sgd=True,
l2_reg_coef=0.0, large_model_regime=False, model=None, dataset=None, return_change_vectors=True,
**kwargs):
"""
:param without_sgd: if without_sgd = True, then only ||w1-w2|| will be returned,
otherwise (w1-w2)^T H Sigma^{-1} (w1-w2).
"""
if l2_reg_coef > 0:
ntk = ntk + l2_reg_coef * torch.eye(ntk.shape[0], dtype=torch.float, device=ntk.device)
ntk_inv = torch.inverse(ntk)
old_weights = get_weights_at_time_t(t=t, eta=eta, init_params=init_params, jacobians=jacobians,
ntk=ntk, ntk_inv=ntk_inv, init_preds=init_preds, Y=Y, continuous=continuous,
large_model_regime=large_model_regime, model=model, dataset=dataset, **kwargs)
steady_state_inv_cov = None
if not without_sgd:
if large_model_regime:
raise ValueError("SGD formula works only for small models")
# compute the SGD noise covariance matrix at the end
assert (model is not None) and (dataset is not None)
with utils.SetTemporaryParams(model=model, params=old_weights):
sgd_cov = get_sgd_covariance_full(model=model, dataset=dataset, cpu=False, **kwargs)
# add small amount of isotropic Gaussian noise to make sgd_cov invertible
sgd_cov += 1e-10 * torch.eye(sgd_cov.shape[0], device=sgd_cov.device, dtype=torch.float)
# now we compute H Sigma^{-1}
jacobians_cat = [v.view((v.shape[0], -1)) for k, v in jacobians.items()]
jacobians_cat = torch.cat(jacobians_cat, dim=1) # (n_samples * n_outputs, n_params)
H = torch.mm(jacobians_cat.T, jacobians_cat) + l2_reg_coef * torch.eye(jacobians_cat.shape[1],
device=ntk.device, dtype=torch.float)
# steady_state_inv_cov = torch.mm(H, torch.inverse(sgd_cov))
with utils.Timing(description="Solving the Lyapunov equation"):
steady_state_cov = solve_continuous_lyapunov(a=utils.to_numpy(H), q=utils.to_numpy(sgd_cov))
steady_state_cov = torch.tensor(steady_state_cov, dtype=torch.float, device=ntk.device)
# add small amount of isotropic Gaussian noise to make steady_state_cov invertible
steady_state_cov += 1e-10 * torch.eye(steady_state_cov.shape[0], device=steady_state_cov.device,
dtype=torch.float)
steady_state_inv_cov = torch.inverse(steady_state_cov)
change_vectors = []
change_quantities = []
n_outputs = init_preds.shape[-1]
for sample_idx in tqdm(range(n)):
example_indices = [i for i in range(n) if i != sample_idx]
example_output_indices = []
for i in example_indices:
example_output_indices.extend(range(i * n_outputs, (i + 1) * n_outputs))
new_ntk = ntk.clone()[example_output_indices]
new_ntk = new_ntk[:, example_output_indices]
new_ntk_inv = misc.update_ntk_inv(ntk=ntk, ntk_inv=ntk_inv, keep_indices=example_output_indices)
new_init_preds = init_preds[example_indices]
new_Y = Y[example_indices]
if not large_model_regime:
new_jacobians = dict()
for k, v in jacobians.items():
new_jacobians[k] = v[example_output_indices]
else:
new_jacobians = None
new_dataset = Subset(dataset, example_indices)
new_weights = get_weights_at_time_t(t=t, eta=eta * n / (n - 1), init_params=init_params,
jacobians=new_jacobians, ntk=new_ntk, ntk_inv=new_ntk_inv,
init_preds=new_init_preds, Y=new_Y, continuous=continuous,
large_model_regime=large_model_regime, model=model, dataset=new_dataset,
**kwargs)
total_change = 0.0
param_changes = dict()
for k in old_weights.keys():
param_changes[k] = (new_weights[k] - old_weights[k]).cpu() # to save GPU memory
if return_change_vectors:
change_vectors.append(param_changes)
if without_sgd:
for k in old_weights.keys():
total_change += torch.sum(param_changes[k] ** 2)
else:
param_changes = [v.flatten() for k, v in param_changes.items()]
param_changes = torch.cat(param_changes, dim=0)
total_change = torch.mm(param_changes.view((1, -1)),
torch.mm(steady_state_inv_cov.cpu(), param_changes.view(-1, 1)))
change_quantities.append(total_change)
return change_vectors, change_quantities
@utils.with_no_grad
def test_pred_stability(t, n, eta, ntk, test_train_ntk, train_init_preds, test_init_preds,
train_Y, l2_reg_coef=0.0, continuous=False):
if l2_reg_coef > 0:
ntk = ntk + l2_reg_coef * torch.eye(ntk.shape[0], dtype=torch.float, device=ntk.device)
ntk_inv = torch.inverse(ntk)
old_preds = get_test_predictions_at_time_t(t=t, eta=eta,
ntk=ntk,
test_train_ntk=test_train_ntk,
train_Y=train_Y,
train_init_preds=train_init_preds,
test_init_preds=test_init_preds,
continuous=continuous,
ntk_inv=ntk_inv)
n_outputs = train_init_preds.shape[-1]
change_vectors = []
change_quantities = []
for sample_idx in tqdm(range(n)):
example_indices = [i for i in range(n) if i != sample_idx]
example_output_indices = []
for i in example_indices:
example_output_indices.extend(range(i * n_outputs, (i + 1) * n_outputs))
new_ntk = ntk.clone()[example_output_indices]
new_ntk = new_ntk[:, example_output_indices]
new_test_train_ntk = test_train_ntk[:, example_output_indices]
new_ntk_inv = misc.update_ntk_inv(ntk=ntk, ntk_inv=ntk_inv, keep_indices=example_output_indices)
new_train_init_preds = train_init_preds[example_indices]
new_train_Y = train_Y[example_indices]
new_preds = get_test_predictions_at_time_t(
t=t,
eta=eta * n / (n-1),
train_Y=new_train_Y,
train_init_preds=new_train_init_preds,
test_init_preds=test_init_preds,
continuous=continuous,
ntk=new_ntk,
ntk_inv=new_ntk_inv,
test_train_ntk=new_test_train_ntk)
change_vectors.append(new_preds - old_preds)
change_quantities.append(torch.sum((new_preds - old_preds) ** 2, dim=1).mean(dim=0))
return change_vectors, change_quantities | sample_info/modules/stability.py | from scipy.linalg import solve_continuous_lyapunov
from tqdm import tqdm
from torch.utils.data import Subset
import torch
from nnlib.nnlib import utils
from .ntk import compute_training_loss_at_time_t, get_predictions_at_time_t, get_weights_at_time_t, \
get_test_predictions_at_time_t
from . import misc
from .sgd import get_sgd_covariance_full
@utils.with_no_grad
def training_loss_stability(ts, n, eta, ntk, init_preds, Y, l2_reg_coef=0.0, continuous=False):
if l2_reg_coef > 0:
ntk = ntk + l2_reg_coef * torch.eye(ntk.shape[0], dtype=torch.float, device=ntk.device)
losses_without_excluding = [compute_training_loss_at_time_t(t=t,
eta=eta,
ntk=ntk,
init_preds=init_preds,
Y=Y,
continuous=continuous) for t in ts]
losses_without_excluding = torch.stack(losses_without_excluding)
n_outputs = init_preds.shape[-1]
change_quantities = []
change_vectors = []
for sample_idx in tqdm(range(n)):
example_indices = [i for i in range(n) if i != sample_idx]
example_output_indices = []
for i in example_indices:
example_output_indices.extend(range(i * n_outputs, (i + 1) * n_outputs))
new_ntk = ntk.clone()[example_output_indices]
new_ntk = new_ntk[:, example_output_indices]
new_init_preds = init_preds[example_indices]
new_Y = Y[example_indices]
losses = [compute_training_loss_at_time_t(t=t,
eta=eta * n / (n - 1),
ntk=new_ntk,
init_preds=new_init_preds,
Y=new_Y,
continuous=continuous) for t in ts]
losses = torch.stack(losses)
change_vectors.append(losses - losses_without_excluding)
change_quantities.append(torch.mean((losses - losses_without_excluding) ** 2))
return change_vectors, change_quantities
@utils.with_no_grad
def training_pred_stability(t, n, eta, ntk, init_preds, Y, l2_reg_coef=0.0, continuous=False):
if l2_reg_coef > 0:
ntk = ntk + l2_reg_coef * torch.eye(ntk.shape[0], dtype=torch.float, device=ntk.device)
old_preds = get_predictions_at_time_t(t=t, eta=eta, ntk=ntk, init_preds=init_preds,
Y=Y, continuous=continuous)
n_outputs = init_preds.shape[-1]
change_vectors = []
change_quantities = []
for sample_idx in tqdm(range(n)):
example_indices = [i for i in range(n) if i != sample_idx]
example_output_indices = []
for i in example_indices:
example_output_indices.extend(range(i * n_outputs, (i + 1) * n_outputs))
new_ntk = ntk.clone()[example_output_indices]
new_ntk = new_ntk[:, example_output_indices]
new_init_preds = init_preds[example_indices]
new_Y = Y[example_indices]
new_preds = get_predictions_at_time_t(t=t,
eta=eta * n / (n - 1),
ntk=new_ntk,
init_preds=new_init_preds,
Y=new_Y,
continuous=continuous)
change_quantities.append(torch.sum((old_preds[example_indices] - new_preds) ** 2, dim=1).mean(dim=0))
change_vectors.append(new_preds - old_preds[example_indices])
return change_vectors, change_quantities
@utils.with_no_grad
def weight_stability(t, n, eta, init_params, jacobians, ntk, init_preds, Y, continuous=False, without_sgd=True,
l2_reg_coef=0.0, large_model_regime=False, model=None, dataset=None, return_change_vectors=True,
**kwargs):
"""
:param without_sgd: if without_sgd = True, then only ||w1-w2|| will be returned,
otherwise (w1-w2)^T H Sigma^{-1} (w1-w2).
"""
if l2_reg_coef > 0:
ntk = ntk + l2_reg_coef * torch.eye(ntk.shape[0], dtype=torch.float, device=ntk.device)
ntk_inv = torch.inverse(ntk)
old_weights = get_weights_at_time_t(t=t, eta=eta, init_params=init_params, jacobians=jacobians,
ntk=ntk, ntk_inv=ntk_inv, init_preds=init_preds, Y=Y, continuous=continuous,
large_model_regime=large_model_regime, model=model, dataset=dataset, **kwargs)
steady_state_inv_cov = None
if not without_sgd:
if large_model_regime:
raise ValueError("SGD formula works only for small models")
# compute the SGD noise covariance matrix at the end
assert (model is not None) and (dataset is not None)
with utils.SetTemporaryParams(model=model, params=old_weights):
sgd_cov = get_sgd_covariance_full(model=model, dataset=dataset, cpu=False, **kwargs)
# add small amount of isotropic Gaussian noise to make sgd_cov invertible
sgd_cov += 1e-10 * torch.eye(sgd_cov.shape[0], device=sgd_cov.device, dtype=torch.float)
# now we compute H Sigma^{-1}
jacobians_cat = [v.view((v.shape[0], -1)) for k, v in jacobians.items()]
jacobians_cat = torch.cat(jacobians_cat, dim=1) # (n_samples * n_outputs, n_params)
H = torch.mm(jacobians_cat.T, jacobians_cat) + l2_reg_coef * torch.eye(jacobians_cat.shape[1],
device=ntk.device, dtype=torch.float)
# steady_state_inv_cov = torch.mm(H, torch.inverse(sgd_cov))
with utils.Timing(description="Solving the Lyapunov equation"):
steady_state_cov = solve_continuous_lyapunov(a=utils.to_numpy(H), q=utils.to_numpy(sgd_cov))
steady_state_cov = torch.tensor(steady_state_cov, dtype=torch.float, device=ntk.device)
# add small amount of isotropic Gaussian noise to make steady_state_cov invertible
steady_state_cov += 1e-10 * torch.eye(steady_state_cov.shape[0], device=steady_state_cov.device,
dtype=torch.float)
steady_state_inv_cov = torch.inverse(steady_state_cov)
change_vectors = []
change_quantities = []
n_outputs = init_preds.shape[-1]
for sample_idx in tqdm(range(n)):
example_indices = [i for i in range(n) if i != sample_idx]
example_output_indices = []
for i in example_indices:
example_output_indices.extend(range(i * n_outputs, (i + 1) * n_outputs))
new_ntk = ntk.clone()[example_output_indices]
new_ntk = new_ntk[:, example_output_indices]
new_ntk_inv = misc.update_ntk_inv(ntk=ntk, ntk_inv=ntk_inv, keep_indices=example_output_indices)
new_init_preds = init_preds[example_indices]
new_Y = Y[example_indices]
if not large_model_regime:
new_jacobians = dict()
for k, v in jacobians.items():
new_jacobians[k] = v[example_output_indices]
else:
new_jacobians = None
new_dataset = Subset(dataset, example_indices)
new_weights = get_weights_at_time_t(t=t, eta=eta * n / (n - 1), init_params=init_params,
jacobians=new_jacobians, ntk=new_ntk, ntk_inv=new_ntk_inv,
init_preds=new_init_preds, Y=new_Y, continuous=continuous,
large_model_regime=large_model_regime, model=model, dataset=new_dataset,
**kwargs)
total_change = 0.0
param_changes = dict()
for k in old_weights.keys():
param_changes[k] = (new_weights[k] - old_weights[k]).cpu() # to save GPU memory
if return_change_vectors:
change_vectors.append(param_changes)
if without_sgd:
for k in old_weights.keys():
total_change += torch.sum(param_changes[k] ** 2)
else:
param_changes = [v.flatten() for k, v in param_changes.items()]
param_changes = torch.cat(param_changes, dim=0)
total_change = torch.mm(param_changes.view((1, -1)),
torch.mm(steady_state_inv_cov.cpu(), param_changes.view(-1, 1)))
change_quantities.append(total_change)
return change_vectors, change_quantities
@utils.with_no_grad
def test_pred_stability(t, n, eta, ntk, test_train_ntk, train_init_preds, test_init_preds,
train_Y, l2_reg_coef=0.0, continuous=False):
if l2_reg_coef > 0:
ntk = ntk + l2_reg_coef * torch.eye(ntk.shape[0], dtype=torch.float, device=ntk.device)
ntk_inv = torch.inverse(ntk)
old_preds = get_test_predictions_at_time_t(t=t, eta=eta,
ntk=ntk,
test_train_ntk=test_train_ntk,
train_Y=train_Y,
train_init_preds=train_init_preds,
test_init_preds=test_init_preds,
continuous=continuous,
ntk_inv=ntk_inv)
n_outputs = train_init_preds.shape[-1]
change_vectors = []
change_quantities = []
for sample_idx in tqdm(range(n)):
example_indices = [i for i in range(n) if i != sample_idx]
example_output_indices = []
for i in example_indices:
example_output_indices.extend(range(i * n_outputs, (i + 1) * n_outputs))
new_ntk = ntk.clone()[example_output_indices]
new_ntk = new_ntk[:, example_output_indices]
new_test_train_ntk = test_train_ntk[:, example_output_indices]
new_ntk_inv = misc.update_ntk_inv(ntk=ntk, ntk_inv=ntk_inv, keep_indices=example_output_indices)
new_train_init_preds = train_init_preds[example_indices]
new_train_Y = train_Y[example_indices]
new_preds = get_test_predictions_at_time_t(
t=t,
eta=eta * n / (n-1),
train_Y=new_train_Y,
train_init_preds=new_train_init_preds,
test_init_preds=test_init_preds,
continuous=continuous,
ntk=new_ntk,
ntk_inv=new_ntk_inv,
test_train_ntk=new_test_train_ntk)
change_vectors.append(new_preds - old_preds)
change_quantities.append(torch.sum((new_preds - old_preds) ** 2, dim=1).mean(dim=0))
return change_vectors, change_quantities | 0.795062 | 0.327776 |
import argparse
import sys
from array import array
from struct import unpack
from math import sqrt
def doWork(args):
from DataFormats.FWLite import Handle, Events
from ROOT import TGraphErrors, TFile, gROOT, kOrange
output_file = None
gr = None
if not args.files:
return
tracks_h = Handle("std::vector<reco::Track>")
counter = 0
x_squared_mean = [0. for i in range(len(args.files))]
x_mean = [0. for i in range(len(args.files))]
normalization = [0. for i in range(len(args.files))]
rms = [0. for i in range(len(args.files))]
barrel_hits_squared_mean = [0. for i in range(len(args.files))]
barrel_hits_mean = [0. for i in range(len(args.files))]
barrel_hits_normalization = [0. for i in range(len(args.files))]
barrel_hits_rms = [0. for i in range(len(args.files))]
for input_file in args.files:
events = Events(input_file)
for e in range(events.size()):
no_reason_to_stop = 0.
ccc_stop_reason = 0.
a = events.to(e)
a = events.getByLabel("generalTracks", tracks_h)
for track in range(tracks_h.product().size()):
t = tracks_h.product()[track]
# take care of barrel hits calculation. Later
# calculation goes further down since they also "skim"
# events. For the barrel hits calculation, we only
# consider tracks with abs(eta) < 0.8 and of
# highPurity quality.
if t.quality(t.qualityByName('highPurity')) and abs(t.eta()) < 0.8 and t.pt() > 0.65:
barrel_hits_mean[counter] += t.numberOfValidHits()
barrel_hits_squared_mean[counter] += t.numberOfValidHits()**2
barrel_hits_normalization[counter] += 1.
if args.quality and args.quality != 'ANY':
if not t.quality(t.qualityByName(args.quality)):
continue
stop_reason = int(unpack('@B', t.stopReason())[0])
if stop_reason == args.denReason:
no_reason_to_stop += 1
if stop_reason == args.numReason:
ccc_stop_reason += 1
if no_reason_to_stop == 0 or ccc_stop_reason == 0:
continue
x_mean[counter] += ccc_stop_reason/no_reason_to_stop
x_squared_mean[counter] += (ccc_stop_reason/no_reason_to_stop)**2
normalization[counter] += 1
x_mean[counter] = x_mean[counter]/normalization[counter]
x_squared_mean[counter] = x_squared_mean[counter]/normalization[counter]
rms[counter] = sqrt(x_squared_mean[counter] - x_mean[counter]**2)
print '%s: mean and RMS: %f, %f, normalized to lumi mean and RMS: %f, %f' % (input_file,
x_mean[counter],
rms[counter],
x_mean[counter]/args.instlumis[counter],
rms[counter]/args.instlumis[counter])
barrel_hits_mean[counter] = barrel_hits_mean[counter]/barrel_hits_normalization[counter]
barrel_hits_squared_mean[counter] = barrel_hits_squared_mean[counter]/barrel_hits_normalization[counter]
barrel_hits_rms[counter] = sqrt(barrel_hits_squared_mean[counter] - barrel_hits_mean[counter]**2)
print '%s: Barrel Hits mean and RMS: %f, %f, normalized to lumi mean and RMS: %f, %f' % (input_file,
barrel_hits_mean[counter],
barrel_hits_rms[counter],
barrel_hits_mean[counter]/args.instlumis[counter],
barrel_hits_rms[counter]/args.instlumis[counter])
counter += 1
if args.output:
output_file = TFile(args.output, "RECREATE")
output_file.cd()
x_mean_arr = array('f')
x_mean_arr.fromlist(x_mean)
rms_arr = array('f')
rms_arr.fromlist(rms)
lumis = array('f')
lumis.fromlist(args.instlumis)
lumi_errors = array('f')
lumi_errors.fromlist([0. for i in range(len(args.instlumis))])
gr = TGraphErrors(len(lumis), lumis, x_mean_arr, lumi_errors, rms_arr)
gr.SetTitle("TrajectoryStopReason_%d/Trajectories_%d vs Inst. Luminosity" % (args.numReason, args.denReason))
gr.SetMarkerStyle(22)
gr.SetMarkerColor(kOrange)
gr.SetLineColor(kOrange)
gr.Write()
barrel_hits_mean_arr = array('f')
barrel_hits_mean_arr.fromlist(barrel_hits_mean)
barrel_hits_rms_arr = array('f')
barrel_hits_rms_arr.fromlist(barrel_hits_rms)
gr2 = TGraphErrors(len(lumis), lumis, barrel_hits_mean_arr, lumi_errors, barrel_hits_rms_arr)
gr2.SetTitle("Average Barrel Hits vs Inst. Luminosity")
gr2.SetMarkerStyle(22)
gr2.SetMarkerColor(kOrange)
gr2.SetLineColor(kOrange)
gr2.Write()
output_file.Close()
def checkArgs(args):
if args.files and not args.instlumis:
print "Maybe you forgot to supply also the luminosity information for the supplied files ...? Quitting."
sys.exit(1)
if args.instlumis and not args.files:
print "Maybe you forgot to supply also the files for the supplied luminosities ...? Quitting."
sys.exit(1)
if args.files and args.instlumis and not (len(args.files) == len(args.instlumis)):
print "The number of files and instantaneous luminosities supplied does not match. Quitting."
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Derive the behaviour of HIP-related quantities for many files, better if at different inst.lumi.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--files',
action = 'store',
type = str,
nargs = '+',
help = 'Files to be processed')
parser.add_argument('-i', '--instlumis',
action = 'store',
type = float,
nargs = '+',
help = "Instantaneous luminosities associated to the supplied files. The ordering **MUST** follow the ones of --files.")
parser.add_argument('-q', '--quality',
default = 'ANY',
nargs = '?',
choices= ['ANY', 'highPurity', 'loose', 'tight'],
help = 'Select tracks with the specified quality only. ANY means select all tracks in the collection.',
type = str)
# UNINITIALIZED = 0,
# MAX_HITS = 1,
# MAX_LOST_HITS = 2,
# MAX_CONSECUTIVE_LOST_HITS = 3,
# LOST_HIT_FRACTION = 4,
# MIN_PT = 5,
# CHARGE_SIGNIFICANCE = 6,
# LOOPER = 7,
# MAX_CCC_LOST_HITS = 8,
# NO_SEGMENTS_FOR_VALID_LAYERS = 9,
# NOT_STOPPED = 255 // this is the max allowed since it will be streamed as type uint8_t
parser.add_argument('-n', '--numReason',
action = 'store',
type = int,
choices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 255],
default = 8,
help = 'Trajectory Stop Reason to monitor (numerator)')
parser.add_argument('-d', '--denReason',
action = 'store',
type = int,
choices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 255],
default = 255,
help = 'Trajectory Stop Reason to monitor (denominator)')
parser.add_argument('-o', '--output',
help = 'Output ROOT files that will store the results',
type = str)
args = parser.parse_args()
checkArgs(args)
doWork(args) | hip_xsec.py | import argparse
import sys
from array import array
from struct import unpack
from math import sqrt
def doWork(args):
from DataFormats.FWLite import Handle, Events
from ROOT import TGraphErrors, TFile, gROOT, kOrange
output_file = None
gr = None
if not args.files:
return
tracks_h = Handle("std::vector<reco::Track>")
counter = 0
x_squared_mean = [0. for i in range(len(args.files))]
x_mean = [0. for i in range(len(args.files))]
normalization = [0. for i in range(len(args.files))]
rms = [0. for i in range(len(args.files))]
barrel_hits_squared_mean = [0. for i in range(len(args.files))]
barrel_hits_mean = [0. for i in range(len(args.files))]
barrel_hits_normalization = [0. for i in range(len(args.files))]
barrel_hits_rms = [0. for i in range(len(args.files))]
for input_file in args.files:
events = Events(input_file)
for e in range(events.size()):
no_reason_to_stop = 0.
ccc_stop_reason = 0.
a = events.to(e)
a = events.getByLabel("generalTracks", tracks_h)
for track in range(tracks_h.product().size()):
t = tracks_h.product()[track]
# take care of barrel hits calculation. Later
# calculation goes further down since they also "skim"
# events. For the barrel hits calculation, we only
# consider tracks with abs(eta) < 0.8 and of
# highPurity quality.
if t.quality(t.qualityByName('highPurity')) and abs(t.eta()) < 0.8 and t.pt() > 0.65:
barrel_hits_mean[counter] += t.numberOfValidHits()
barrel_hits_squared_mean[counter] += t.numberOfValidHits()**2
barrel_hits_normalization[counter] += 1.
if args.quality and args.quality != 'ANY':
if not t.quality(t.qualityByName(args.quality)):
continue
stop_reason = int(unpack('@B', t.stopReason())[0])
if stop_reason == args.denReason:
no_reason_to_stop += 1
if stop_reason == args.numReason:
ccc_stop_reason += 1
if no_reason_to_stop == 0 or ccc_stop_reason == 0:
continue
x_mean[counter] += ccc_stop_reason/no_reason_to_stop
x_squared_mean[counter] += (ccc_stop_reason/no_reason_to_stop)**2
normalization[counter] += 1
x_mean[counter] = x_mean[counter]/normalization[counter]
x_squared_mean[counter] = x_squared_mean[counter]/normalization[counter]
rms[counter] = sqrt(x_squared_mean[counter] - x_mean[counter]**2)
print '%s: mean and RMS: %f, %f, normalized to lumi mean and RMS: %f, %f' % (input_file,
x_mean[counter],
rms[counter],
x_mean[counter]/args.instlumis[counter],
rms[counter]/args.instlumis[counter])
barrel_hits_mean[counter] = barrel_hits_mean[counter]/barrel_hits_normalization[counter]
barrel_hits_squared_mean[counter] = barrel_hits_squared_mean[counter]/barrel_hits_normalization[counter]
barrel_hits_rms[counter] = sqrt(barrel_hits_squared_mean[counter] - barrel_hits_mean[counter]**2)
print '%s: Barrel Hits mean and RMS: %f, %f, normalized to lumi mean and RMS: %f, %f' % (input_file,
barrel_hits_mean[counter],
barrel_hits_rms[counter],
barrel_hits_mean[counter]/args.instlumis[counter],
barrel_hits_rms[counter]/args.instlumis[counter])
counter += 1
if args.output:
output_file = TFile(args.output, "RECREATE")
output_file.cd()
x_mean_arr = array('f')
x_mean_arr.fromlist(x_mean)
rms_arr = array('f')
rms_arr.fromlist(rms)
lumis = array('f')
lumis.fromlist(args.instlumis)
lumi_errors = array('f')
lumi_errors.fromlist([0. for i in range(len(args.instlumis))])
gr = TGraphErrors(len(lumis), lumis, x_mean_arr, lumi_errors, rms_arr)
gr.SetTitle("TrajectoryStopReason_%d/Trajectories_%d vs Inst. Luminosity" % (args.numReason, args.denReason))
gr.SetMarkerStyle(22)
gr.SetMarkerColor(kOrange)
gr.SetLineColor(kOrange)
gr.Write()
barrel_hits_mean_arr = array('f')
barrel_hits_mean_arr.fromlist(barrel_hits_mean)
barrel_hits_rms_arr = array('f')
barrel_hits_rms_arr.fromlist(barrel_hits_rms)
gr2 = TGraphErrors(len(lumis), lumis, barrel_hits_mean_arr, lumi_errors, barrel_hits_rms_arr)
gr2.SetTitle("Average Barrel Hits vs Inst. Luminosity")
gr2.SetMarkerStyle(22)
gr2.SetMarkerColor(kOrange)
gr2.SetLineColor(kOrange)
gr2.Write()
output_file.Close()
def checkArgs(args):
if args.files and not args.instlumis:
print "Maybe you forgot to supply also the luminosity information for the supplied files ...? Quitting."
sys.exit(1)
if args.instlumis and not args.files:
print "Maybe you forgot to supply also the files for the supplied luminosities ...? Quitting."
sys.exit(1)
if args.files and args.instlumis and not (len(args.files) == len(args.instlumis)):
print "The number of files and instantaneous luminosities supplied does not match. Quitting."
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Derive the behaviour of HIP-related quantities for many files, better if at different inst.lumi.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--files',
action = 'store',
type = str,
nargs = '+',
help = 'Files to be processed')
parser.add_argument('-i', '--instlumis',
action = 'store',
type = float,
nargs = '+',
help = "Instantaneous luminosities associated to the supplied files. The ordering **MUST** follow the ones of --files.")
parser.add_argument('-q', '--quality',
default = 'ANY',
nargs = '?',
choices= ['ANY', 'highPurity', 'loose', 'tight'],
help = 'Select tracks with the specified quality only. ANY means select all tracks in the collection.',
type = str)
# UNINITIALIZED = 0,
# MAX_HITS = 1,
# MAX_LOST_HITS = 2,
# MAX_CONSECUTIVE_LOST_HITS = 3,
# LOST_HIT_FRACTION = 4,
# MIN_PT = 5,
# CHARGE_SIGNIFICANCE = 6,
# LOOPER = 7,
# MAX_CCC_LOST_HITS = 8,
# NO_SEGMENTS_FOR_VALID_LAYERS = 9,
# NOT_STOPPED = 255 // this is the max allowed since it will be streamed as type uint8_t
parser.add_argument('-n', '--numReason',
action = 'store',
type = int,
choices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 255],
default = 8,
help = 'Trajectory Stop Reason to monitor (numerator)')
parser.add_argument('-d', '--denReason',
action = 'store',
type = int,
choices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 255],
default = 255,
help = 'Trajectory Stop Reason to monitor (denominator)')
parser.add_argument('-o', '--output',
help = 'Output ROOT files that will store the results',
type = str)
args = parser.parse_args()
checkArgs(args)
doWork(args) | 0.29088 | 0.295942 |
import pygame, sys, math, time
from utils import MenuItemIndex, utils, TimerObject
from BaseRenderer import BaseRenderer
class SpaceRace():
def __init__(self, pyg, screen):
# General settings
print("init SpaceRace")
self.pyg = pyg
self.myfont = self.pyg.font.SysFont("monospace", 30)
self.screen = screen
self.width = pyg.display.Info().current_w
self.height = pyg.display.Info().current_h
self.state = "menu"
# Loading the items on screen and do some calculations
self.spaceship = pyg.image.load("Assets/spaceship-basic.png")
self.track = pyg.image.load("Assets/track-2.png")
self.track_mask = pyg.image.load("Assets/track-2.png")
self.startfinish_checker = pyg.image.load("Assets/startfinish.png")
self.can_lap_checker = pyg.image.load("Assets/startfinish.png")
self.startfinish = pyg.image.load("Assets/chequered.png")
self.spaceshipWidth = self.spaceship.get_rect().size[0]
self.spaceshipHeight = self.spaceship.get_rect().size[1]
self.trackWidth = self.track.get_rect().size[0]
self.trackHeight = self.track.get_rect().size[1]
# Space ship start location
self.spaceshipX = -142
self.spaceshipY = -487
# Space ship starting variables
self.rotation = 0
self.speed = 0
self.max_speed = 20
self.acceleration = 0.3
self.keys = [False, False, False, False]
# Things with timers and laps
self.start_time = 0
self.laptime = TimerObject()
self.bestlaptime = TimerObject("00:00:000", 0)
self.laps = 0
self.can_lap = False
# Menu items
self.option_items = []
options = ("Continue", "Exit")
actions = ("game", "quit")
for index, option in enumerate(options):
option_item = MenuItemIndex(str(option), actions[index], index, None, 80)
t_h = len(options) * option_item.height
pos_x = (self.width / 2) - (option_item.width / 2)
pos_y = (self.height / 2) - (t_h / 2) + ((index * 2) + index * option_item.height)
option_item.set_position(pos_x, pos_y)
self.option_items.append(option_item)
# 30FPS masterrace
def background(self):
if self.state == "menu":
self.menu()
elif self.state == "quit":
return "return=main"
elif self.state == "game":
# Well, check every frame
self.speed_controll()
# React to button presses of the arrow keys and do something with it
if self.keys[0] == True: #Left
if self.can_move(self.spaceshipX + self.speed, self.spaceshipY):
self.spaceshipX += self.speed
self.rotation = 90
if self.keys[1] == True: #Right
if self.can_move(self.spaceshipX - self.speed, self.spaceshipY):
self.spaceshipX -= self.speed
self.rotation = 270
if self.keys[2] == True: #Up
if self.can_move(self.spaceshipX, self.spaceshipY + self.speed):
self.spaceshipY += self.speed
self.rotation = 0
if self.keys[3] == True: #Down
if self.can_move(self.spaceshipX, self.spaceshipY - self.speed):
self.spaceshipY -= self.speed
self.rotation = 180
if self.keys[2] and self.keys[0] == True: #Up Left
self.rotation = 45
if self.keys[2] and self.keys[1] == True: #Up Right
self.rotation = 315
if self.keys[3] and self.keys[0] == True: #Down Left
self.rotation = 135
if self.keys[3] and self.keys[1] == True: #Down Right
self.rotation = 225
# Draw track
self.rotatedimg = self.pyg.transform.rotate(self.spaceship, self.rotation)
self.screen.blit(self.track, (self.width/2 + self.spaceshipX, self.height/2 + self.spaceshipY))
# Track markers
startfinishX = 454 + self.spaceshipX
startfinishY = 787 + self.spaceshipY
can_lap_checkerX = 2221 + self.spaceshipX
can_lap_checkerY = 787 + self.spaceshipY
# Draw track markers
# The checkers are invisible
self.screen.blit(self.startfinish_checker, (startfinishX, startfinishY))
self.screen.blit(self.can_lap_checker, (can_lap_checkerX, can_lap_checkerY))
# This one is chequered xD
self.screen.blit(self.startfinish, (startfinishX, startfinishY))
# Draw rotated space ship on top of everything
self.screen.blit(self.rotatedimg, ((self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2)))
# Check if markers have been hit
startfinish_hit = utils.collisionDetect(self.startfinish_checker, startfinishX, startfinishY, self.rotatedimg, (self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2), self.speed)
can_lap_checker_hit = utils.collisionDetect(self.can_lap_checker, can_lap_checkerX, can_lap_checkerY, self.rotatedimg, (self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2), self.speed)
# Check if space ship passed the lap marker halfway the lap
if can_lap_checker_hit == True:
self.can_lap = True
# Calculate the lap time
self.laptime = utils.get_elapsed_time(self.start_time)
# Check if space ship passed start finish and do stuf like reset the laptime an add one lap to the counter
if startfinish_hit == True and self.can_lap == True:
if self.laptime.millis < self.bestlaptime.millis or self.bestlaptime.millis == 0:
self.bestlaptime.millis = self.laptime.millis
self.bestlaptime.disp_time = self.laptime.disp_time
self.start_time = 0
self.laps += 1
self.can_lap = False
# Draw lap information
self.disp_laptime = self.myfont.render("Time: " + self.laptime.disp_time, 1, (255, 255, 0))
self.disp_bestlaptime = self.myfont.render("Highscore: " + self.bestlaptime.disp_time, 1, (225, 225, 0))
self.disp_laps = self.myfont.render("Laps: " + str(self.laps), 1, (225, 225, 0))
self.screen.blit(self.disp_laptime, (20, 20))
self.screen.blit(self.disp_bestlaptime, (20, 60))
self.screen.blit(self.disp_laps, (20, 100))
def run(self, event):
if self.state == "menu":
s = self.menu()
return s
elif self.state == "game":
i = event
# Detect if and which button(s) is/are pressed
if i.type == self.pyg.KEYDOWN:
if self.start_time == 0:
# Start the timer when you start moving
self.start_time = utils.start_timer()
if i.key == self.pyg.K_LEFT:
self.keys[0] = True
if i.key == self.pyg.K_RIGHT:
self.keys[1] = True
if i.key == self.pyg.K_UP:
self.keys[2] = True
if i.key == self.pyg.K_DOWN:
self.keys[3] = True
if i.type == self.pyg.KEYUP:
if i.key == self.pyg.K_LEFT:
self.keys[0] = False
if i.key == self.pyg.K_RIGHT:
self.keys[1] = False
if i.key == self.pyg.K_UP:
self.keys[2] = False
if i.key == self.pyg.K_DOWN:
self.keys[3] = False
# Manage the speed of the space ship
def speed_controll(self):
drag = 0.5
# Prevents the speed from dipping below 0
if self.speed < 0:
self.speed = 1
# Prevents the speed from exceeding the speed limit
if self.speed > self.max_speed:
self.speed = self.max_speed
# If there is movement in any direction
if any(k == True for k in self.keys):
self.speed += self.acceleration
# If there is no movement at all
if all(k == False for k in self.keys) and self.speed > 0:
self.speed -= drag
if self.speed > 1:
if self.keys[2] and self.keys[0] == True: #Up Left
self.speed -= drag
if self.keys[2] and self.keys[1] == True: #Up Right
self.speed -= drag
if self.keys[3] and self.keys[0] == True: #Down Left
self.speed -= drag
if self.keys[3] and self.keys[1] == True: #Down Right
self.speed -= drag
# Manage the menu clicks
def menu(self):
for option in self.option_items:
mouseProperties = self.pyg.mouse.get_pos()
if option.is_mouse_selection(mouseProperties[0], mouseProperties[1]):
option.set_selected(True)
if self.pyg.mouse.get_pressed()[0]:
self.state = option.redir
else:
option.set_selected(False)
self.screen.blit(option.label, option.position)
# Decides if the character is allowed to move
def can_move(self, min_x, min_y):
x = math.floor(0 - min_x)
y = math.floor(0 - min_y)
#x and y not outside track.width and height
if (x < 0 or x > self.trackWidth - 1 - self.speed):
return False
if (y < 0 or y > self.trackHeight - 1 - self.speed):
return False
# Don't move if transparent
if (self.color_code(x, y).a) > 0:
return True
else:
self.speed -= 10
return False
# Return the RGBA value of a pixel at a given location
def color_code(self, x, y):
if str(x)[0] == "-":
x = math.floor(0 - x)
y = math.floor(0 - y)
color_code = self.track_mask.get_at((x,y))
return color_code | UranusInvaders/SpaceRace.py | import pygame, sys, math, time
from utils import MenuItemIndex, utils, TimerObject
from BaseRenderer import BaseRenderer
class SpaceRace():
def __init__(self, pyg, screen):
# General settings
print("init SpaceRace")
self.pyg = pyg
self.myfont = self.pyg.font.SysFont("monospace", 30)
self.screen = screen
self.width = pyg.display.Info().current_w
self.height = pyg.display.Info().current_h
self.state = "menu"
# Loading the items on screen and do some calculations
self.spaceship = pyg.image.load("Assets/spaceship-basic.png")
self.track = pyg.image.load("Assets/track-2.png")
self.track_mask = pyg.image.load("Assets/track-2.png")
self.startfinish_checker = pyg.image.load("Assets/startfinish.png")
self.can_lap_checker = pyg.image.load("Assets/startfinish.png")
self.startfinish = pyg.image.load("Assets/chequered.png")
self.spaceshipWidth = self.spaceship.get_rect().size[0]
self.spaceshipHeight = self.spaceship.get_rect().size[1]
self.trackWidth = self.track.get_rect().size[0]
self.trackHeight = self.track.get_rect().size[1]
# Space ship start location
self.spaceshipX = -142
self.spaceshipY = -487
# Space ship starting variables
self.rotation = 0
self.speed = 0
self.max_speed = 20
self.acceleration = 0.3
self.keys = [False, False, False, False]
# Things with timers and laps
self.start_time = 0
self.laptime = TimerObject()
self.bestlaptime = TimerObject("00:00:000", 0)
self.laps = 0
self.can_lap = False
# Menu items
self.option_items = []
options = ("Continue", "Exit")
actions = ("game", "quit")
for index, option in enumerate(options):
option_item = MenuItemIndex(str(option), actions[index], index, None, 80)
t_h = len(options) * option_item.height
pos_x = (self.width / 2) - (option_item.width / 2)
pos_y = (self.height / 2) - (t_h / 2) + ((index * 2) + index * option_item.height)
option_item.set_position(pos_x, pos_y)
self.option_items.append(option_item)
# 30FPS masterrace
def background(self):
if self.state == "menu":
self.menu()
elif self.state == "quit":
return "return=main"
elif self.state == "game":
# Well, check every frame
self.speed_controll()
# React to button presses of the arrow keys and do something with it
if self.keys[0] == True: #Left
if self.can_move(self.spaceshipX + self.speed, self.spaceshipY):
self.spaceshipX += self.speed
self.rotation = 90
if self.keys[1] == True: #Right
if self.can_move(self.spaceshipX - self.speed, self.spaceshipY):
self.spaceshipX -= self.speed
self.rotation = 270
if self.keys[2] == True: #Up
if self.can_move(self.spaceshipX, self.spaceshipY + self.speed):
self.spaceshipY += self.speed
self.rotation = 0
if self.keys[3] == True: #Down
if self.can_move(self.spaceshipX, self.spaceshipY - self.speed):
self.spaceshipY -= self.speed
self.rotation = 180
if self.keys[2] and self.keys[0] == True: #Up Left
self.rotation = 45
if self.keys[2] and self.keys[1] == True: #Up Right
self.rotation = 315
if self.keys[3] and self.keys[0] == True: #Down Left
self.rotation = 135
if self.keys[3] and self.keys[1] == True: #Down Right
self.rotation = 225
# Draw track
self.rotatedimg = self.pyg.transform.rotate(self.spaceship, self.rotation)
self.screen.blit(self.track, (self.width/2 + self.spaceshipX, self.height/2 + self.spaceshipY))
# Track markers
startfinishX = 454 + self.spaceshipX
startfinishY = 787 + self.spaceshipY
can_lap_checkerX = 2221 + self.spaceshipX
can_lap_checkerY = 787 + self.spaceshipY
# Draw track markers
# The checkers are invisible
self.screen.blit(self.startfinish_checker, (startfinishX, startfinishY))
self.screen.blit(self.can_lap_checker, (can_lap_checkerX, can_lap_checkerY))
# This one is chequered xD
self.screen.blit(self.startfinish, (startfinishX, startfinishY))
# Draw rotated space ship on top of everything
self.screen.blit(self.rotatedimg, ((self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2)))
# Check if markers have been hit
startfinish_hit = utils.collisionDetect(self.startfinish_checker, startfinishX, startfinishY, self.rotatedimg, (self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2), self.speed)
can_lap_checker_hit = utils.collisionDetect(self.can_lap_checker, can_lap_checkerX, can_lap_checkerY, self.rotatedimg, (self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2), self.speed)
# Check if space ship passed the lap marker halfway the lap
if can_lap_checker_hit == True:
self.can_lap = True
# Calculate the lap time
self.laptime = utils.get_elapsed_time(self.start_time)
# Check if space ship passed start finish and do stuf like reset the laptime an add one lap to the counter
if startfinish_hit == True and self.can_lap == True:
if self.laptime.millis < self.bestlaptime.millis or self.bestlaptime.millis == 0:
self.bestlaptime.millis = self.laptime.millis
self.bestlaptime.disp_time = self.laptime.disp_time
self.start_time = 0
self.laps += 1
self.can_lap = False
# Draw lap information
self.disp_laptime = self.myfont.render("Time: " + self.laptime.disp_time, 1, (255, 255, 0))
self.disp_bestlaptime = self.myfont.render("Highscore: " + self.bestlaptime.disp_time, 1, (225, 225, 0))
self.disp_laps = self.myfont.render("Laps: " + str(self.laps), 1, (225, 225, 0))
self.screen.blit(self.disp_laptime, (20, 20))
self.screen.blit(self.disp_bestlaptime, (20, 60))
self.screen.blit(self.disp_laps, (20, 100))
def run(self, event):
if self.state == "menu":
s = self.menu()
return s
elif self.state == "game":
i = event
# Detect if and which button(s) is/are pressed
if i.type == self.pyg.KEYDOWN:
if self.start_time == 0:
# Start the timer when you start moving
self.start_time = utils.start_timer()
if i.key == self.pyg.K_LEFT:
self.keys[0] = True
if i.key == self.pyg.K_RIGHT:
self.keys[1] = True
if i.key == self.pyg.K_UP:
self.keys[2] = True
if i.key == self.pyg.K_DOWN:
self.keys[3] = True
if i.type == self.pyg.KEYUP:
if i.key == self.pyg.K_LEFT:
self.keys[0] = False
if i.key == self.pyg.K_RIGHT:
self.keys[1] = False
if i.key == self.pyg.K_UP:
self.keys[2] = False
if i.key == self.pyg.K_DOWN:
self.keys[3] = False
# Manage the speed of the space ship
def speed_controll(self):
drag = 0.5
# Prevents the speed from dipping below 0
if self.speed < 0:
self.speed = 1
# Prevents the speed from exceeding the speed limit
if self.speed > self.max_speed:
self.speed = self.max_speed
# If there is movement in any direction
if any(k == True for k in self.keys):
self.speed += self.acceleration
# If there is no movement at all
if all(k == False for k in self.keys) and self.speed > 0:
self.speed -= drag
if self.speed > 1:
if self.keys[2] and self.keys[0] == True: #Up Left
self.speed -= drag
if self.keys[2] and self.keys[1] == True: #Up Right
self.speed -= drag
if self.keys[3] and self.keys[0] == True: #Down Left
self.speed -= drag
if self.keys[3] and self.keys[1] == True: #Down Right
self.speed -= drag
# Manage the menu clicks
def menu(self):
for option in self.option_items:
mouseProperties = self.pyg.mouse.get_pos()
if option.is_mouse_selection(mouseProperties[0], mouseProperties[1]):
option.set_selected(True)
if self.pyg.mouse.get_pressed()[0]:
self.state = option.redir
else:
option.set_selected(False)
self.screen.blit(option.label, option.position)
# Decides if the character is allowed to move
def can_move(self, min_x, min_y):
x = math.floor(0 - min_x)
y = math.floor(0 - min_y)
#x and y not outside track.width and height
if (x < 0 or x > self.trackWidth - 1 - self.speed):
return False
if (y < 0 or y > self.trackHeight - 1 - self.speed):
return False
# Don't move if transparent
if (self.color_code(x, y).a) > 0:
return True
else:
self.speed -= 10
return False
# Return the RGBA value of a pixel at a given location
def color_code(self, x, y):
if str(x)[0] == "-":
x = math.floor(0 - x)
y = math.floor(0 - y)
color_code = self.track_mask.get_at((x,y))
return color_code | 0.373647 | 0.207175 |
__file__ = 'OffSystem_v1'
__date__ = '5/29/14'
__author__ = 'ABREZNIC'
import os, arcpy, xlwt, datetime
#date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
#variables
qcfolder = "C:\\TxDOT\\QC\\OffSystem"
roadways = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
where = """ RTE_CLASS = '2' OR RTE_CLASS = '3' """
subfiles = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.SUBFILES"
cities = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
districts = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.District\\TPP_GIS.APP_TPP_GIS_ADMIN.District"
workspace = qcfolder + "\\" + today
if not os.path.exists(workspace):
os.makedirs(workspace)
else:
for file in os.listdir(workspace):
thefile = os.path.join(workspace, file)
os.remove(thefile)
#print "Folder already exists for today. Please ether rename or delete the QC folder with today's date."
def overlap():
print "starting " + str(now)
arcpy.Select_analysis(roadways, workspace + "\\FC_Streets.shp", """ RTE_CLASS = '3' """)
arcpy.Erase_analysis(workspace + "\\FC_Streets.shp", cities, workspace + "\\FC_Streets_Errors.shp")
print "fc"
arcpy.Clip_analysis(roadways, cities, workspace + "\\City_Roads.shp")
print "City"
arcpy.Select_analysis(workspace + "\\City_Roads.shp", workspace + "\\County_Roads_Errors.shp", """ RTE_CLASS = '2' """)
print "cr select"
arcpy.Merge_management([workspace + "\\County_Roads_Errors.shp", workspace + "\\FC_Streets_Errors.shp"], workspace + "\\MergedErrors.shp")
print "merge"
arcpy.SpatialJoin_analysis(workspace + "\\MergedErrors.shp", districts, workspace + "\\City_OverlapErrors.shp")
print "SJ"
arcpy.Delete_management(workspace + "\\City_Roads.shp")
arcpy.Delete_management(workspace + "\\FC_Streets.shp")
arcpy.Delete_management(workspace + "\\County_Roads_Errors.shp")
arcpy.Delete_management(workspace + "\\FC_Streets_Errors.shp")
arcpy.Delete_management(workspace + "\\MergedErrors.shp")
print "end " + str(now)
errors = []
cursor = arcpy.UpdateCursor(workspace + "\\City_OverlapErrors.shp")
for row in cursor:
geom = row.shape
len = geom.length * .000621371
row.setValue("RTE_LEN", len)
cursor.updateRow(row)
rowinfo = [row.RTE_ID, row.RTE_LEN, row.DIST_NM, row.DIST_NBR]
errors.append(rowinfo)
del cursor
del row
return errors
def routeopen():
cursor = arcpy.SearchCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
if row.RTE_OPEN == 1:
rte_subfiles = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for record in rte_subfiles:
status = record.HIGHWAY_STATUS
if status != 4:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append(status)
errorinfo.append("RTE_OPEN = 1 requires HIGHWAY_STATUS = 4")
errors.append(errorinfo)
elif row.RTE_OPEN == 0:
rte_subfiles = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for record in rte_subfiles:
status = record.HIGHWAY_STATUS
if status != 0:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append(status)
errorinfo.append("RTE_OPEN = 0 requires HIGHWAY_STATUS = 0")
errors.append(errorinfo)
else:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append("N/A")
errorinfo.append("RTE_OPEN must be 1 or 0")
errors.append(errorinfo)
return errors
del cursor
del row
def measurelength():
cursor = arcpy.UpdateCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
Mdiff = abs(Mmax - Mmin)
wholelen = geom.length * .000621371
shp_len = round(wholelen, 3)
rte_len = row.RTE_LEN
testlen = abs(shp_len - Mdiff)
if testlen <= .003 and abs(rte_len - testlen) > .003:
row.setValue("RTE_LEN", wholelen)
cursor.updateRow(row)
elif abs(shp_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
elif abs(rte_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
elif abs(shp_len - rte_len) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
else:
pass
return errors
del cursor
del row
def subfilelength():
dictionary = {}
cursor = arcpy.SearchCursor(roadways, where)
for row in cursor:
id = row.RTE_ID
len = row.RTE_LEN
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
if id not in dictionary.keys():
dictionary[str(id)] = [len, Mmin, Mmax]
else:
currentrecord = dictionary[id]
currentlength = currentrecord[0]
currentmin = currentrecord[1]
currentmax = currentrecord[2]
newlen = currentlength + len
if Mmin < currentmin:
currentmin = Mmin
if Mmax > currentmax:
currentmax = Mmax
dictionary[str(id)] = [newlen, currentmin, currentmax]
del cursor
del row
errors = []
for i in dictionary.keys():
firstflag = 0
sublength = 0
linevalues = dictionary[i]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
cursor = arcpy.SearchCursor(subfiles, "RTE_ID = '" + i + "'", "", "", "BMP A")
for row in cursor:
if firstflag == 0:
bmp1 = row.BMP
firstflag += 1
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp-bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. OBJECTID: " + row.OBJECTID)
errors.append(errorinfo)
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
if abs(linemin - bmp1) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
if abs(linemax - emp) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
return errors
def assemblereport():
book = xlwt.Workbook()
print "Overlap Errors..."
overlapsheet = book.add_sheet("City Boundary Overlap")
line = 0
overlapsheet.write(line, 0, "The following Route IDs are County Roads and FC Streets which cross a City Boundary as found in City_OverlapErrors.shp")
line += 1
overlapsheet.write(line, 0, "RTE_ID")
overlapsheet.write(line, 1, "Overlap Length")
overlapsheet.write(line, 2, "District Name")
overlapsheet.write(line, 3, "District Number")
line += 1
overlaplist = overlap()
for i in overlaplist:
overlapsheet.write(line, 0, i[0])
overlapsheet.write(line, 1, i[1])
overlapsheet.write(line, 2, i[2])
overlapsheet.write(line, 3, i[3])
line += 1
print "Route Open Errors..."
opensheet = book.add_sheet("Route Open")
line = 0
opensheet.write(line, 0, "The following Route IDs contain an error between RTE_OPEN in TxDOT_Roadways and ROADWAY_STATUS in SUBFILES")
line += 1
opensheet.write(line, 0, "RTE_ID")
opensheet.write(line, 1, "RTE_OPEN")
opensheet.write(line, 2, "HIGHWAY_STATUS")
opensheet.write(line, 3, "Description")
line += 1
openlist = routeopen()
for i in openlist:
opensheet.write(line, 0, i[0])
opensheet.write(line, 1, i[1])
opensheet.write(line, 2, i[2])
opensheet.write(line, 3, i[3])
line += 1
print "Geometry and Measure Errors..."
geomsheet = book.add_sheet("Geometry and Measures")
line = 0
geomsheet.write(line, 0, "The following Route IDs contain an error between their measures' length, shape length, and RTE_LEN")
line += 1
geomsheet.write(line, 0, "RTE_ID")
geomsheet.write(line, 1, "Measures' Length")
geomsheet.write(line, 2, "Shape Length")
geomsheet.write(line, 3, "RTE_LEN")
line += 1
geomlist = measurelength()
for i in geomlist:
geomsheet.write(line, 0, i[0])
geomsheet.write(line, 1, i[1])
geomsheet.write(line, 2, i[2])
geomsheet.write(line, 3, i[3])
line += 1
print "Subfile Length Errors..."
subsheet = book.add_sheet("Subfile Lengths")
line = 0
subsheet.write(line, 0, "The following Route IDs contain an error between their line and SUBFILES lengths")
line += 1
subsheet.write(line, 0, "RTE_ID")
subsheet.write(line, 1, "District")
subsheet.write(line, 2, "BMP")
subsheet.write(line, 3, "Min Measure")
subsheet.write(line, 4, "EMP")
subsheet.write(line, 5, "Max Measure")
subsheet.write(line, 6, "Subfile Len")
subsheet.write(line, 7, "RTE_LEN")
subsheet.write(line, 8, "Description")
line += 1
sublist = subfilelength()
for i in sublist:
subsheet.write(line, 0, i[0])
subsheet.write(line, 1, i[1])
subsheet.write(line, 2, i[2])
subsheet.write(line, 3, i[3])
subsheet.write(line, 4, i[4])
subsheet.write(line, 5, i[5])
subsheet.write(line, 6, i[6])
subsheet.write(line, 7, i[7])
subsheet.write(line, 8, i[8])
line += 1
book.save(workspace + "\\ErrorReport_" + today + ".xls")
print "and away we go... " + str(now)
assemblereport()
print "that's all folks!" + str(now) | QC/old/OffSystem_v1.py | __file__ = 'OffSystem_v1'
__date__ = '5/29/14'
__author__ = 'ABREZNIC'
import os, arcpy, xlwt, datetime
#date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
#variables
qcfolder = "C:\\TxDOT\\QC\\OffSystem"
roadways = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
where = """ RTE_CLASS = '2' OR RTE_CLASS = '3' """
subfiles = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.SUBFILES"
cities = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
districts = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.District\\TPP_GIS.APP_TPP_GIS_ADMIN.District"
workspace = qcfolder + "\\" + today
if not os.path.exists(workspace):
os.makedirs(workspace)
else:
for file in os.listdir(workspace):
thefile = os.path.join(workspace, file)
os.remove(thefile)
#print "Folder already exists for today. Please ether rename or delete the QC folder with today's date."
def overlap():
print "starting " + str(now)
arcpy.Select_analysis(roadways, workspace + "\\FC_Streets.shp", """ RTE_CLASS = '3' """)
arcpy.Erase_analysis(workspace + "\\FC_Streets.shp", cities, workspace + "\\FC_Streets_Errors.shp")
print "fc"
arcpy.Clip_analysis(roadways, cities, workspace + "\\City_Roads.shp")
print "City"
arcpy.Select_analysis(workspace + "\\City_Roads.shp", workspace + "\\County_Roads_Errors.shp", """ RTE_CLASS = '2' """)
print "cr select"
arcpy.Merge_management([workspace + "\\County_Roads_Errors.shp", workspace + "\\FC_Streets_Errors.shp"], workspace + "\\MergedErrors.shp")
print "merge"
arcpy.SpatialJoin_analysis(workspace + "\\MergedErrors.shp", districts, workspace + "\\City_OverlapErrors.shp")
print "SJ"
arcpy.Delete_management(workspace + "\\City_Roads.shp")
arcpy.Delete_management(workspace + "\\FC_Streets.shp")
arcpy.Delete_management(workspace + "\\County_Roads_Errors.shp")
arcpy.Delete_management(workspace + "\\FC_Streets_Errors.shp")
arcpy.Delete_management(workspace + "\\MergedErrors.shp")
print "end " + str(now)
errors = []
cursor = arcpy.UpdateCursor(workspace + "\\City_OverlapErrors.shp")
for row in cursor:
geom = row.shape
len = geom.length * .000621371
row.setValue("RTE_LEN", len)
cursor.updateRow(row)
rowinfo = [row.RTE_ID, row.RTE_LEN, row.DIST_NM, row.DIST_NBR]
errors.append(rowinfo)
del cursor
del row
return errors
def routeopen():
cursor = arcpy.SearchCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
if row.RTE_OPEN == 1:
rte_subfiles = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for record in rte_subfiles:
status = record.HIGHWAY_STATUS
if status != 4:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append(status)
errorinfo.append("RTE_OPEN = 1 requires HIGHWAY_STATUS = 4")
errors.append(errorinfo)
elif row.RTE_OPEN == 0:
rte_subfiles = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for record in rte_subfiles:
status = record.HIGHWAY_STATUS
if status != 0:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append(status)
errorinfo.append("RTE_OPEN = 0 requires HIGHWAY_STATUS = 0")
errors.append(errorinfo)
else:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append("N/A")
errorinfo.append("RTE_OPEN must be 1 or 0")
errors.append(errorinfo)
return errors
del cursor
del row
def measurelength():
cursor = arcpy.UpdateCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
Mdiff = abs(Mmax - Mmin)
wholelen = geom.length * .000621371
shp_len = round(wholelen, 3)
rte_len = row.RTE_LEN
testlen = abs(shp_len - Mdiff)
if testlen <= .003 and abs(rte_len - testlen) > .003:
row.setValue("RTE_LEN", wholelen)
cursor.updateRow(row)
elif abs(shp_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
elif abs(rte_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
elif abs(shp_len - rte_len) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
else:
pass
return errors
del cursor
del row
def subfilelength():
dictionary = {}
cursor = arcpy.SearchCursor(roadways, where)
for row in cursor:
id = row.RTE_ID
len = row.RTE_LEN
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
if id not in dictionary.keys():
dictionary[str(id)] = [len, Mmin, Mmax]
else:
currentrecord = dictionary[id]
currentlength = currentrecord[0]
currentmin = currentrecord[1]
currentmax = currentrecord[2]
newlen = currentlength + len
if Mmin < currentmin:
currentmin = Mmin
if Mmax > currentmax:
currentmax = Mmax
dictionary[str(id)] = [newlen, currentmin, currentmax]
del cursor
del row
errors = []
for i in dictionary.keys():
firstflag = 0
sublength = 0
linevalues = dictionary[i]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
cursor = arcpy.SearchCursor(subfiles, "RTE_ID = '" + i + "'", "", "", "BMP A")
for row in cursor:
if firstflag == 0:
bmp1 = row.BMP
firstflag += 1
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp-bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. OBJECTID: " + row.OBJECTID)
errors.append(errorinfo)
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
if abs(linemin - bmp1) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
if abs(linemax - emp) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
return errors
def assemblereport():
book = xlwt.Workbook()
print "Overlap Errors..."
overlapsheet = book.add_sheet("City Boundary Overlap")
line = 0
overlapsheet.write(line, 0, "The following Route IDs are County Roads and FC Streets which cross a City Boundary as found in City_OverlapErrors.shp")
line += 1
overlapsheet.write(line, 0, "RTE_ID")
overlapsheet.write(line, 1, "Overlap Length")
overlapsheet.write(line, 2, "District Name")
overlapsheet.write(line, 3, "District Number")
line += 1
overlaplist = overlap()
for i in overlaplist:
overlapsheet.write(line, 0, i[0])
overlapsheet.write(line, 1, i[1])
overlapsheet.write(line, 2, i[2])
overlapsheet.write(line, 3, i[3])
line += 1
print "Route Open Errors..."
opensheet = book.add_sheet("Route Open")
line = 0
opensheet.write(line, 0, "The following Route IDs contain an error between RTE_OPEN in TxDOT_Roadways and ROADWAY_STATUS in SUBFILES")
line += 1
opensheet.write(line, 0, "RTE_ID")
opensheet.write(line, 1, "RTE_OPEN")
opensheet.write(line, 2, "HIGHWAY_STATUS")
opensheet.write(line, 3, "Description")
line += 1
openlist = routeopen()
for i in openlist:
opensheet.write(line, 0, i[0])
opensheet.write(line, 1, i[1])
opensheet.write(line, 2, i[2])
opensheet.write(line, 3, i[3])
line += 1
print "Geometry and Measure Errors..."
geomsheet = book.add_sheet("Geometry and Measures")
line = 0
geomsheet.write(line, 0, "The following Route IDs contain an error between their measures' length, shape length, and RTE_LEN")
line += 1
geomsheet.write(line, 0, "RTE_ID")
geomsheet.write(line, 1, "Measures' Length")
geomsheet.write(line, 2, "Shape Length")
geomsheet.write(line, 3, "RTE_LEN")
line += 1
geomlist = measurelength()
for i in geomlist:
geomsheet.write(line, 0, i[0])
geomsheet.write(line, 1, i[1])
geomsheet.write(line, 2, i[2])
geomsheet.write(line, 3, i[3])
line += 1
print "Subfile Length Errors..."
subsheet = book.add_sheet("Subfile Lengths")
line = 0
subsheet.write(line, 0, "The following Route IDs contain an error between their line and SUBFILES lengths")
line += 1
subsheet.write(line, 0, "RTE_ID")
subsheet.write(line, 1, "District")
subsheet.write(line, 2, "BMP")
subsheet.write(line, 3, "Min Measure")
subsheet.write(line, 4, "EMP")
subsheet.write(line, 5, "Max Measure")
subsheet.write(line, 6, "Subfile Len")
subsheet.write(line, 7, "RTE_LEN")
subsheet.write(line, 8, "Description")
line += 1
sublist = subfilelength()
for i in sublist:
subsheet.write(line, 0, i[0])
subsheet.write(line, 1, i[1])
subsheet.write(line, 2, i[2])
subsheet.write(line, 3, i[3])
subsheet.write(line, 4, i[4])
subsheet.write(line, 5, i[5])
subsheet.write(line, 6, i[6])
subsheet.write(line, 7, i[7])
subsheet.write(line, 8, i[8])
line += 1
book.save(workspace + "\\ErrorReport_" + today + ".xls")
print "and away we go... " + str(now)
assemblereport()
print "that's all folks!" + str(now) | 0.096514 | 0.162247 |
import mock
from karborclient.tests.unit import base
from karborclient.tests.unit.v1 import fakes
cs = fakes.FakeClient()
mock_request_return = ({}, {'quota': {'plans': 50}})
class QuotasTest(base.TestCaseShell):
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_quota_update(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.update(fakes.PROJECT_ID, {'plans': 50})
mock_request.assert_called_with(
'PUT',
'/quotas/{project_id}'.format(project_id=fakes.PROJECT_ID),
data={'quota': {'plans': 50}}, headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_quota_update_with_none(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.update(fakes.PROJECT_ID, {'plans': None})
mock_request.assert_called_with(
'PUT',
'/quotas/{project_id}'.format(project_id=fakes.PROJECT_ID),
data={'quota': {'plans': 50}}, headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.get(fakes.PROJECT_ID, detail=False)
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}'.format(project_id=fakes.PROJECT_ID),
headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota_with_headers(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.get(fakes.PROJECT_ID, False, session_id='fake_session_id')
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}'.format(project_id=fakes.PROJECT_ID),
headers={'X-Configuration-Session': 'fake_session_id'})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota_with_detail(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.get(fakes.PROJECT_ID, detail=True)
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}/detail'.format(
project_id=fakes.PROJECT_ID),
headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota_with_default(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.defaults(fakes.PROJECT_ID)
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}/defaults'.format(
project_id=fakes.PROJECT_ID),
headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota_default_with_headers(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.defaults(fakes.PROJECT_ID, session_id='fake_session_id')
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}/defaults'.format(
project_id=fakes.PROJECT_ID),
headers={'X-Configuration-Session': 'fake_session_id'}) | karborclient/tests/unit/v1/test_quotas.py |
import mock
from karborclient.tests.unit import base
from karborclient.tests.unit.v1 import fakes
cs = fakes.FakeClient()
mock_request_return = ({}, {'quota': {'plans': 50}})
class QuotasTest(base.TestCaseShell):
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_quota_update(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.update(fakes.PROJECT_ID, {'plans': 50})
mock_request.assert_called_with(
'PUT',
'/quotas/{project_id}'.format(project_id=fakes.PROJECT_ID),
data={'quota': {'plans': 50}}, headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_quota_update_with_none(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.update(fakes.PROJECT_ID, {'plans': None})
mock_request.assert_called_with(
'PUT',
'/quotas/{project_id}'.format(project_id=fakes.PROJECT_ID),
data={'quota': {'plans': 50}}, headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.get(fakes.PROJECT_ID, detail=False)
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}'.format(project_id=fakes.PROJECT_ID),
headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota_with_headers(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.get(fakes.PROJECT_ID, False, session_id='fake_session_id')
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}'.format(project_id=fakes.PROJECT_ID),
headers={'X-Configuration-Session': 'fake_session_id'})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota_with_detail(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.get(fakes.PROJECT_ID, detail=True)
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}/detail'.format(
project_id=fakes.PROJECT_ID),
headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota_with_default(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.defaults(fakes.PROJECT_ID)
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}/defaults'.format(
project_id=fakes.PROJECT_ID),
headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_quota_default_with_headers(self, mock_request):
mock_request.return_value = mock_request_return
cs.quotas.defaults(fakes.PROJECT_ID, session_id='fake_session_id')
mock_request.assert_called_with(
'GET',
'/quotas/{project_id}/defaults'.format(
project_id=fakes.PROJECT_ID),
headers={'X-Configuration-Session': 'fake_session_id'}) | 0.584271 | 0.161849 |
import json
import logging
import optparse
import os
import shutil
import tempfile
import urllib2
from xml.etree.ElementTree import tostring
try:
# For Python 3.0 and later
from shutil import unpack_archive
except ImportError:
# Fall back to Python 2 import
from setuptools.archive_util import unpack_archive
try:
# For Python 3.0 and later
from urllib.request import urlretrieve
except ImportError:
# Fall back to Python 2 imports
from urllib import urlretrieve
_log_name = __name__
if _log_name == '__builtin__':
_log_name = 'toolshed.installed.manual.data.manager'
log = logging.getLogger( _log_name )
# --- These methods are called by/within the Galaxy Application
def exec_before_job( app, inp_data, out_data, param_dict, tool=None, **kwd ):
# Look for any data tables that haven't been defined for this data manager before and dynamically add them to Galaxy
param_dict = dict( **param_dict )
data_tables_param = param_dict.get( 'data_tables', [] )
if not isinstance( data_tables_param, list ):
data_tables_param = [data_tables_param]
if tool:
tool_shed_repository = tool.tool_shed_repository
else:
tool_shed_repository = None
tdtm = None
data_manager = app.data_managers.get_manager( tool.data_manager_id, None )
for data_table_param in data_tables_param:
data_table_name = data_table_param.get( 'data_table_name', None )
if data_table_name:
# get data table managed by this data Manager
data_table = app.tool_data_tables.get_tables().get( str( data_table_name ), None )
if data_table:
data_table_filename = data_table.get_filename_for_source( data_manager, None )
if not data_table_filename:
if tdtm is None:
from tool_shed.tools import data_table_manager
tdtm = data_table_manager.ToolDataTableManager( app )
target_dir, tool_path, relative_target_dir = tdtm.get_target_install_dir( tool_shed_repository )
# Dynamically add this data table
log.debug( "Attempting to dynamically create a missing Tool Data Table named %s." % data_table_name )
repo_info = tdtm.generate_repository_info_elem_from_repository( tool_shed_repository, parent_elem=None )
if repo_info is not None:
repo_info = tostring( repo_info )
tmp_file = tempfile.NamedTemporaryFile()
tmp_file.write( __get_new_xml_definition( app, data_table, data_manager, repo_info, target_dir ) )
tmp_file.flush()
app.tool_data_tables.add_new_entries_from_config_file( tmp_file.name, None, app.config.shed_tool_data_table_config, persist=True )
tmp_file.close()
def __get_new_xml_definition( app, data_table, data_manager, repo_info=None, location_file_dir=None ):
sub_dict = { 'table_name': data_table.name, 'comment_char': '', 'columns': '', 'file_path': '' }
sub_dict.update( data_manager.get_tool_shed_repository_info_dict() )
if data_table.comment_char:
sub_dict['comment_char'] = 'comment_char="%s"' % ( data_table.comment_char )
for i, name in enumerate( data_table.get_column_name_list() ):
if name is not None:
sub_dict['columns'] = "%s\n%s" % ( sub_dict['columns'], '<column name="%s" index="%s" />' % ( name, i ) )
location_file_dir = location_file_dir or app.config.galaxy_data_manager_data_path
for filename in data_table.filenames.keys():
sub_dict['file_path'] = os.path.basename( filename )
sub_dict['file_path'] = os.path.join( location_file_dir, sub_dict['file_path'] ) # os.path.abspath?
if not os.path.exists( sub_dict['file_path'] ):
# Create empty file
log.debug( "Attempting to create a missing location file %s." % sub_dict['file_path'] )
open( sub_dict['file_path'], 'wb+' ).close()
break
sub_dict[ 'repo_info' ] = repo_info or ''
return """
<tables><table name="%(table_name)s" %(comment_char)s>
%(columns)s
<file path="%(file_path)s" />
%(repo_info)s
</table></tables>
""" % sub_dict
def galaxy_code_get_available_data_tables( trans ):
# list of data tables
return [ ( x, x, False ) for x in trans.app.tool_data_tables.get_tables().keys() ]
def galaxy_code_get_available_data_table_columns( trans, data_table_name ):
return [ ( x, x, True ) for x in trans.app.tool_data_tables.get( data_table_name ).get_column_name_list() ]
# --- End Galaxy called Methods ---
def get_data_table_entries( params, galaxy_data_manager_data_path ):
rval = {}
data_tables = params.get( 'data_tables', [] )
for data_table in data_tables:
entry_dict = {}
for column in data_table.get( 'columns', [] ):
value = column.get( 'data_table_column_value', '' )
if column.get( 'is_path', {} ).get( 'is_path_selector', None ) == 'yes' and column.get( 'is_path', {} ).get( 'abspath', None ) == 'abspath':
value = os.path.abspath( os.path.join( galaxy_data_manager_data_path, value ) )
entry_dict[ column.get( 'data_table_column_name', '' ) ] = value
data_table_name = data_table['data_table_name']
rval[ data_table_name ] = rval.get( data_table_name, [] )
rval[ data_table_name ].append( entry_dict )
return rval
def get_file_content( params, target_directory ):
directory_content = params.get( 'directory_content', [] )
for content in directory_content:
target_path = os.path.join( target_directory, content.get( 'subdir', '' ) )
try:
os.makedirs( target_path )
except OSError:
pass
if content.get( 'file_source', {}).get( 'file_source_selector', None ) == 'URL':
( filename, headers ) = urlretrieve( content.get( 'file_source', {}).get( 'file_URL', None ) )
try:
bname = headers['Content-Disposition']
except KeyError:
bname = os.path.basename( urllib2.urlparse.urlsplit( content.get( 'file_source', {}).get( 'file_URL', None ) ).path )
else:
filename = content.get( 'file_source', {}).get( 'file_history', None )
bname = os.path.basename( filename )
file_action = content.get( 'file_action', {}).get( 'file_action_selector', None )
if file_action == 'unpack':
unpack_archive( filename, target_path )
else:
filename_override = content.get( 'file_action', {}).get( 'filename_override', None )
if filename_override:
target_path = os.path.join( target_path, filename_override )
else:
target_path = os.path.join( target_path, bname )
shutil.copyfile( filename, target_path )
return len( directory_content )
def main():
parser = optparse.OptionParser()
parser.add_option( '', '--galaxy_data_manager_data_path', dest='galaxy_data_manager_data_path', default='', help='Root path for galaxy_data_manager_data_path' )
(options, args) = parser.parse_args()
filename = args[0]
params = json.loads( open( filename ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
data_table_entries = get_data_table_entries( params['param_dict'], options.galaxy_data_manager_data_path )
# save info to json file
open( filename, 'wb' ).write( json.dumps( { "data_tables": data_table_entries} ) )
get_file_content( params['param_dict'], target_directory )
if __name__ == "__main__":
main() | data_managers/data_manager_manual/data_manager/data_manager_manual.py |
import json
import logging
import optparse
import os
import shutil
import tempfile
import urllib2
from xml.etree.ElementTree import tostring
try:
# For Python 3.0 and later
from shutil import unpack_archive
except ImportError:
# Fall back to Python 2 import
from setuptools.archive_util import unpack_archive
try:
# For Python 3.0 and later
from urllib.request import urlretrieve
except ImportError:
# Fall back to Python 2 imports
from urllib import urlretrieve
_log_name = __name__
if _log_name == '__builtin__':
_log_name = 'toolshed.installed.manual.data.manager'
log = logging.getLogger( _log_name )
# --- These methods are called by/within the Galaxy Application
def exec_before_job( app, inp_data, out_data, param_dict, tool=None, **kwd ):
# Look for any data tables that haven't been defined for this data manager before and dynamically add them to Galaxy
param_dict = dict( **param_dict )
data_tables_param = param_dict.get( 'data_tables', [] )
if not isinstance( data_tables_param, list ):
data_tables_param = [data_tables_param]
if tool:
tool_shed_repository = tool.tool_shed_repository
else:
tool_shed_repository = None
tdtm = None
data_manager = app.data_managers.get_manager( tool.data_manager_id, None )
for data_table_param in data_tables_param:
data_table_name = data_table_param.get( 'data_table_name', None )
if data_table_name:
# get data table managed by this data Manager
data_table = app.tool_data_tables.get_tables().get( str( data_table_name ), None )
if data_table:
data_table_filename = data_table.get_filename_for_source( data_manager, None )
if not data_table_filename:
if tdtm is None:
from tool_shed.tools import data_table_manager
tdtm = data_table_manager.ToolDataTableManager( app )
target_dir, tool_path, relative_target_dir = tdtm.get_target_install_dir( tool_shed_repository )
# Dynamically add this data table
log.debug( "Attempting to dynamically create a missing Tool Data Table named %s." % data_table_name )
repo_info = tdtm.generate_repository_info_elem_from_repository( tool_shed_repository, parent_elem=None )
if repo_info is not None:
repo_info = tostring( repo_info )
tmp_file = tempfile.NamedTemporaryFile()
tmp_file.write( __get_new_xml_definition( app, data_table, data_manager, repo_info, target_dir ) )
tmp_file.flush()
app.tool_data_tables.add_new_entries_from_config_file( tmp_file.name, None, app.config.shed_tool_data_table_config, persist=True )
tmp_file.close()
def __get_new_xml_definition( app, data_table, data_manager, repo_info=None, location_file_dir=None ):
sub_dict = { 'table_name': data_table.name, 'comment_char': '', 'columns': '', 'file_path': '' }
sub_dict.update( data_manager.get_tool_shed_repository_info_dict() )
if data_table.comment_char:
sub_dict['comment_char'] = 'comment_char="%s"' % ( data_table.comment_char )
for i, name in enumerate( data_table.get_column_name_list() ):
if name is not None:
sub_dict['columns'] = "%s\n%s" % ( sub_dict['columns'], '<column name="%s" index="%s" />' % ( name, i ) )
location_file_dir = location_file_dir or app.config.galaxy_data_manager_data_path
for filename in data_table.filenames.keys():
sub_dict['file_path'] = os.path.basename( filename )
sub_dict['file_path'] = os.path.join( location_file_dir, sub_dict['file_path'] ) # os.path.abspath?
if not os.path.exists( sub_dict['file_path'] ):
# Create empty file
log.debug( "Attempting to create a missing location file %s." % sub_dict['file_path'] )
open( sub_dict['file_path'], 'wb+' ).close()
break
sub_dict[ 'repo_info' ] = repo_info or ''
return """
<tables><table name="%(table_name)s" %(comment_char)s>
%(columns)s
<file path="%(file_path)s" />
%(repo_info)s
</table></tables>
""" % sub_dict
def galaxy_code_get_available_data_tables( trans ):
# list of data tables
return [ ( x, x, False ) for x in trans.app.tool_data_tables.get_tables().keys() ]
def galaxy_code_get_available_data_table_columns( trans, data_table_name ):
return [ ( x, x, True ) for x in trans.app.tool_data_tables.get( data_table_name ).get_column_name_list() ]
# --- End Galaxy called Methods ---
def get_data_table_entries( params, galaxy_data_manager_data_path ):
rval = {}
data_tables = params.get( 'data_tables', [] )
for data_table in data_tables:
entry_dict = {}
for column in data_table.get( 'columns', [] ):
value = column.get( 'data_table_column_value', '' )
if column.get( 'is_path', {} ).get( 'is_path_selector', None ) == 'yes' and column.get( 'is_path', {} ).get( 'abspath', None ) == 'abspath':
value = os.path.abspath( os.path.join( galaxy_data_manager_data_path, value ) )
entry_dict[ column.get( 'data_table_column_name', '' ) ] = value
data_table_name = data_table['data_table_name']
rval[ data_table_name ] = rval.get( data_table_name, [] )
rval[ data_table_name ].append( entry_dict )
return rval
def get_file_content( params, target_directory ):
directory_content = params.get( 'directory_content', [] )
for content in directory_content:
target_path = os.path.join( target_directory, content.get( 'subdir', '' ) )
try:
os.makedirs( target_path )
except OSError:
pass
if content.get( 'file_source', {}).get( 'file_source_selector', None ) == 'URL':
( filename, headers ) = urlretrieve( content.get( 'file_source', {}).get( 'file_URL', None ) )
try:
bname = headers['Content-Disposition']
except KeyError:
bname = os.path.basename( urllib2.urlparse.urlsplit( content.get( 'file_source', {}).get( 'file_URL', None ) ).path )
else:
filename = content.get( 'file_source', {}).get( 'file_history', None )
bname = os.path.basename( filename )
file_action = content.get( 'file_action', {}).get( 'file_action_selector', None )
if file_action == 'unpack':
unpack_archive( filename, target_path )
else:
filename_override = content.get( 'file_action', {}).get( 'filename_override', None )
if filename_override:
target_path = os.path.join( target_path, filename_override )
else:
target_path = os.path.join( target_path, bname )
shutil.copyfile( filename, target_path )
return len( directory_content )
def main():
parser = optparse.OptionParser()
parser.add_option( '', '--galaxy_data_manager_data_path', dest='galaxy_data_manager_data_path', default='', help='Root path for galaxy_data_manager_data_path' )
(options, args) = parser.parse_args()
filename = args[0]
params = json.loads( open( filename ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
data_table_entries = get_data_table_entries( params['param_dict'], options.galaxy_data_manager_data_path )
# save info to json file
open( filename, 'wb' ).write( json.dumps( { "data_tables": data_table_entries} ) )
get_file_content( params['param_dict'], target_directory )
if __name__ == "__main__":
main() | 0.253399 | 0.185301 |
import logging
import threading
import time
from playsound import playsound
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
from sshtunnel import HandlerSSHTunnelForwarderError, SSHTunnelForwarder
class CommentResourceAccess:
"""A data loader class that expands a truncated pull request comment from GHTorrent MongoDB.
Args:
ssh_host (str): SSH tunnel host
ssh_port (int): SSH tunnel port number
ssh_username (str): SSH tunnel username
ssh_pkey (str): Path to the SSH private key
ssh_private_key_password (str): password to the SSH private key
db_host (str): MongoDB host
db_port (int): MongoDB port number
db_username (str): MongoDB username
db_password (str): MongoDB password
db (str): MongoDB database
error_alert_sound_file (str): A path pointing to the error alert sound.
"""
# Static variables
server = None
__lock = threading.Lock()
ssh_tunnel_error_count = 0
show_ssh_tunnel_warning = True
def __init__(self,
ssh_host: str,
ssh_port: int,
ssh_username: str,
ssh_pkey: str,
ssh_private_key_password: str,
db_host: str,
db_port: int,
db_username: str,
db_password: str,
db: str,
error_alert_sound_file: str):
self.logger = logging.getLogger(self.__class__.__name__)
self.server = SSHTunnelForwarder((ssh_host, ssh_port),
ssh_username=ssh_username,
ssh_pkey=ssh_pkey,
ssh_private_key_password=ssh_private_key_password,
remote_bind_address=(db_host, db_port),
logger=logging.getLogger('SSHTunnelForwarder'))
# When server starts/restarts, run a check to ensure the tunnel is working.
self.server.skip_tunnel_checkup = False
self.db_username = db_username
self.db_password = <PASSWORD>
self.db = db
self.error_alert_sound_file = error_alert_sound_file
self.mongo_client = None
self.collection = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.mongo_client != None:
self.mongo_client.close()
self.server.stop() # Close SSH tunnel
def __get_connection(self):
if self.server.is_active and self.collection != None:
return self.collection
# Using a thread lock to avoid creating multiple SSH tunnels.
with self.__lock:
if not self.server.is_active:
self.logger.info(
f'SSH Tunnel is not active, connecting to {self.server.ssh_username}@{self.server.ssh_host}:{self.server.ssh_port}...')
try:
self.server.restart()
except HandlerSSHTunnelForwarderError as e:
self.ssh_tunnel_error_count += 1
self.logger.error(
f'Cannot establish SSH Tunnel {self.server.ssh_username}@{self.server.ssh_host}:{self.server.ssh_port}, error: {e}')
# This is to get around a bug in SSHTunnel, where threads spawned during start() do not get shutdown if the tunnel id down.
# https://github.com/pahaz/sshtunnel/issues/170
for key, _ in self.server.tunnel_is_up.items():
self.server.tunnel_is_up[key] = True
self.server.stop() # Thus setting is_active = False.
raise
self.mongo_client = MongoClient('127.0.0.1',
self.server.local_bind_port,
username=self.db_username,
password=self.db_password,
authSource=self.db,
authMechanism='SCRAM-SHA-1')
mongo_db = self.mongo_client[self.db]
self.collection = mongo_db['pull_request_comments']
self.logger.info(
f'Connecting to MongoDB 127.0.0.1:{self.server.local_bind_port}.')
# The ismaster command is cheap and does not require auth.
self.mongo_client.admin.command('ismaster')
self.logger.info('Successfully connected to MongoDB server.')
return self.collection
def load(self, owner: str, repo: str, pullreq_id: int, comment_id: int, comment: str):
"""Load the full comment.
Args:
owner (str): GitHub repository owner.
repo (str): GitHub repository name.
pullreq_id (int): Pull request ID.
comment_id (int): Pull request comment ID.
comment (str): Existing comment in the dataset.
Returns:
str: The full comment text. None if the comment ID does not exist in MongoDB.
"""
query = {"owner": owner,
"repo": repo,
"pullreq_id": pullreq_id,
"id": comment_id}
while True:
try:
if self.ssh_tunnel_error_count >= 3:
if self.show_ssh_tunnel_warning:
self.logger.warning(f'SSH Tunnel is down, reached max number of attempts, returning the existing comment.')
self.show_ssh_tunnel_warning = False # Avoid repetitive warning log.
return comment
collection = self.__get_connection()
doc = collection.find_one(query)
break
except Exception as e:
playsound(self.error_alert_sound_file, False)
self.logger.exception(
f'Failed to load comment, owner: {owner}, repo: {repo}, pullreq_id: {pullreq_id}, comment_id: {comment_id}, retry after 5 seconds.')
if isinstance(e, ServerSelectionTimeoutError):
try:
if self.mongo_client != None:
self.mongo_client.close()
except:
self.logger.exception(f'Failed to close Mongo Client.')
time.sleep(5)
if doc is not None:
return doc['body']
else:
return None # When the Pull Request Comment has been deleted. | ghtorrent/CommentResourceAccess.py | import logging
import threading
import time
from playsound import playsound
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
from sshtunnel import HandlerSSHTunnelForwarderError, SSHTunnelForwarder
class CommentResourceAccess:
"""A data loader class that expands a truncated pull request comment from GHTorrent MongoDB.
Args:
ssh_host (str): SSH tunnel host
ssh_port (int): SSH tunnel port number
ssh_username (str): SSH tunnel username
ssh_pkey (str): Path to the SSH private key
ssh_private_key_password (str): password to the SSH private key
db_host (str): MongoDB host
db_port (int): MongoDB port number
db_username (str): MongoDB username
db_password (str): MongoDB password
db (str): MongoDB database
error_alert_sound_file (str): A path pointing to the error alert sound.
"""
# Static variables
server = None
__lock = threading.Lock()
ssh_tunnel_error_count = 0
show_ssh_tunnel_warning = True
def __init__(self,
ssh_host: str,
ssh_port: int,
ssh_username: str,
ssh_pkey: str,
ssh_private_key_password: str,
db_host: str,
db_port: int,
db_username: str,
db_password: str,
db: str,
error_alert_sound_file: str):
self.logger = logging.getLogger(self.__class__.__name__)
self.server = SSHTunnelForwarder((ssh_host, ssh_port),
ssh_username=ssh_username,
ssh_pkey=ssh_pkey,
ssh_private_key_password=ssh_private_key_password,
remote_bind_address=(db_host, db_port),
logger=logging.getLogger('SSHTunnelForwarder'))
# When server starts/restarts, run a check to ensure the tunnel is working.
self.server.skip_tunnel_checkup = False
self.db_username = db_username
self.db_password = <PASSWORD>
self.db = db
self.error_alert_sound_file = error_alert_sound_file
self.mongo_client = None
self.collection = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.mongo_client != None:
self.mongo_client.close()
self.server.stop() # Close SSH tunnel
def __get_connection(self):
if self.server.is_active and self.collection != None:
return self.collection
# Using a thread lock to avoid creating multiple SSH tunnels.
with self.__lock:
if not self.server.is_active:
self.logger.info(
f'SSH Tunnel is not active, connecting to {self.server.ssh_username}@{self.server.ssh_host}:{self.server.ssh_port}...')
try:
self.server.restart()
except HandlerSSHTunnelForwarderError as e:
self.ssh_tunnel_error_count += 1
self.logger.error(
f'Cannot establish SSH Tunnel {self.server.ssh_username}@{self.server.ssh_host}:{self.server.ssh_port}, error: {e}')
# This is to get around a bug in SSHTunnel, where threads spawned during start() do not get shutdown if the tunnel id down.
# https://github.com/pahaz/sshtunnel/issues/170
for key, _ in self.server.tunnel_is_up.items():
self.server.tunnel_is_up[key] = True
self.server.stop() # Thus setting is_active = False.
raise
self.mongo_client = MongoClient('127.0.0.1',
self.server.local_bind_port,
username=self.db_username,
password=self.db_password,
authSource=self.db,
authMechanism='SCRAM-SHA-1')
mongo_db = self.mongo_client[self.db]
self.collection = mongo_db['pull_request_comments']
self.logger.info(
f'Connecting to MongoDB 127.0.0.1:{self.server.local_bind_port}.')
# The ismaster command is cheap and does not require auth.
self.mongo_client.admin.command('ismaster')
self.logger.info('Successfully connected to MongoDB server.')
return self.collection
def load(self, owner: str, repo: str, pullreq_id: int, comment_id: int, comment: str):
"""Load the full comment.
Args:
owner (str): GitHub repository owner.
repo (str): GitHub repository name.
pullreq_id (int): Pull request ID.
comment_id (int): Pull request comment ID.
comment (str): Existing comment in the dataset.
Returns:
str: The full comment text. None if the comment ID does not exist in MongoDB.
"""
query = {"owner": owner,
"repo": repo,
"pullreq_id": pullreq_id,
"id": comment_id}
while True:
try:
if self.ssh_tunnel_error_count >= 3:
if self.show_ssh_tunnel_warning:
self.logger.warning(f'SSH Tunnel is down, reached max number of attempts, returning the existing comment.')
self.show_ssh_tunnel_warning = False # Avoid repetitive warning log.
return comment
collection = self.__get_connection()
doc = collection.find_one(query)
break
except Exception as e:
playsound(self.error_alert_sound_file, False)
self.logger.exception(
f'Failed to load comment, owner: {owner}, repo: {repo}, pullreq_id: {pullreq_id}, comment_id: {comment_id}, retry after 5 seconds.')
if isinstance(e, ServerSelectionTimeoutError):
try:
if self.mongo_client != None:
self.mongo_client.close()
except:
self.logger.exception(f'Failed to close Mongo Client.')
time.sleep(5)
if doc is not None:
return doc['body']
else:
return None # When the Pull Request Comment has been deleted. | 0.605333 | 0.074332 |
try:
import gsdl2 as pygame
except:
import pygame
import random
class MapGrid():
def __init__(self, map_width, map_height):
# set map values
self.map_width = map_width
self.map_height = map_width
# generate outside rooms
self.outside_terrain_grid = self._generate_empty_noise_grid(self.map_width, self.map_height)
def _generate_empty_noise_grid(self, map_width, map_height):
'''
creates a new 2d array with the given specs
and filled with random 1s and 0s
'''
new_map_grid = [] # create our new list
for x in range(map_width):
new_map_grid.append([]) # add our columns to the array
for y in range(map_height):
new_map_grid[x].append(random.choice([0,1])) # fill in our rows
return new_map_grid
def _generate_outside_terrain(self, empty_outside_terrain_grid, number_of_generations):
'''
creates a bubble effect with cellular automaton
'''
grid = empty_outside_terrain_grid
number_of_generations = number_of_generations
for x in range(number_of_generations):
next_grid = []
for column_index, column in enumerate(grid):
next_column = []
next_grid.append(next_column)
for tile_index, tile in enumerate(column):
# get the surrounding tile values for each tile
top_left = grid[column_index - 1][tile_index - 1]
top_mid = grid[column_index][tile_index - 1]
try:
top_right = grid[column_index + 1][tile_index - 1]
except IndexError:
top_right = 0
center_left = grid[column_index - 1][tile_index]
center_mid = grid[column_index][tile_index]
try:
center_right = grid[column_index + 1][tile_index]
except IndexError:
center_right = 0
try:
bottom_left = grid[column_index - 1][tile_index + 1]
except IndexError:
bottom_left = 0
try:
bottom_mid = grid[column_index][tile_index + 1]
except IndexError:
bottom_mid = 0
try:
bottom_right = grid[column_index + 1][tile_index + 1]
except IndexError:
bottom_right = 0
close_neighbors = (top_mid + center_left + center_mid +
center_right + bottom_mid)
far_neighbors = (top_left + top_right +
bottom_left + bottom_right)
number_of_neighbors = close_neighbors + far_neighbors
# decide the what the next cell will be based on these rules
if number_of_neighbors > random.choice([3,4,5]):
next_cell = 1
else:
next_cell = 0
if close_neighbors > 3:
next_cell = 1
# create the new cell
next_column.append(next_cell)
grid = next_grid
return next_grid
if __name__ == '__main__':
# general map stats
map_width = 280
map_height = 180
# start with one generation
tile_size = 3
map_grid = MapGrid(map_width, map_height)
#print map_grid.outside_terrain_grid
pygame.init()
screen = pygame.display.set_mode((map_width * tile_size,map_height * tile_size))
one_tile = pygame.Surface((1, 1))
one_tile.fill((0,0,0))
zero_tile = pygame.Surface((1, 1))
zero_tile.fill((255,255,255))
colors = {0: zero_tile, 1: one_tile}
background = pygame.Surface((map_width * tile_size,map_height * tile_size))
clock = pygame.time.Clock()
first_gen = True
timer = 12
running = True
while running == True:
clock.tick() # there was 3 fps lock :)
pygame.display.set_caption('2D Cellular Automaton Simulation 1.0 Mad Cloud Games - FPS: ' + str(clock.get_fps()))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if first_gen:
themap = map_grid.outside_terrain_grid
else:
themap = map_grid._generate_outside_terrain(themap, 1)
for column_index, column in enumerate(themap):
for tile_index, tile in enumerate(column):
screen.blit(colors[tile], (tile_index * tile_size, column_index * tile_size))
pygame.display.flip()
if first_gen:
timer -= 1
if timer < 0:
first_gen = False
pygame.quit() | demos/16_cellular.py | try:
import gsdl2 as pygame
except:
import pygame
import random
class MapGrid():
def __init__(self, map_width, map_height):
# set map values
self.map_width = map_width
self.map_height = map_width
# generate outside rooms
self.outside_terrain_grid = self._generate_empty_noise_grid(self.map_width, self.map_height)
def _generate_empty_noise_grid(self, map_width, map_height):
'''
creates a new 2d array with the given specs
and filled with random 1s and 0s
'''
new_map_grid = [] # create our new list
for x in range(map_width):
new_map_grid.append([]) # add our columns to the array
for y in range(map_height):
new_map_grid[x].append(random.choice([0,1])) # fill in our rows
return new_map_grid
def _generate_outside_terrain(self, empty_outside_terrain_grid, number_of_generations):
'''
creates a bubble effect with cellular automaton
'''
grid = empty_outside_terrain_grid
number_of_generations = number_of_generations
for x in range(number_of_generations):
next_grid = []
for column_index, column in enumerate(grid):
next_column = []
next_grid.append(next_column)
for tile_index, tile in enumerate(column):
# get the surrounding tile values for each tile
top_left = grid[column_index - 1][tile_index - 1]
top_mid = grid[column_index][tile_index - 1]
try:
top_right = grid[column_index + 1][tile_index - 1]
except IndexError:
top_right = 0
center_left = grid[column_index - 1][tile_index]
center_mid = grid[column_index][tile_index]
try:
center_right = grid[column_index + 1][tile_index]
except IndexError:
center_right = 0
try:
bottom_left = grid[column_index - 1][tile_index + 1]
except IndexError:
bottom_left = 0
try:
bottom_mid = grid[column_index][tile_index + 1]
except IndexError:
bottom_mid = 0
try:
bottom_right = grid[column_index + 1][tile_index + 1]
except IndexError:
bottom_right = 0
close_neighbors = (top_mid + center_left + center_mid +
center_right + bottom_mid)
far_neighbors = (top_left + top_right +
bottom_left + bottom_right)
number_of_neighbors = close_neighbors + far_neighbors
# decide the what the next cell will be based on these rules
if number_of_neighbors > random.choice([3,4,5]):
next_cell = 1
else:
next_cell = 0
if close_neighbors > 3:
next_cell = 1
# create the new cell
next_column.append(next_cell)
grid = next_grid
return next_grid
if __name__ == '__main__':
# general map stats
map_width = 280
map_height = 180
# start with one generation
tile_size = 3
map_grid = MapGrid(map_width, map_height)
#print map_grid.outside_terrain_grid
pygame.init()
screen = pygame.display.set_mode((map_width * tile_size,map_height * tile_size))
one_tile = pygame.Surface((1, 1))
one_tile.fill((0,0,0))
zero_tile = pygame.Surface((1, 1))
zero_tile.fill((255,255,255))
colors = {0: zero_tile, 1: one_tile}
background = pygame.Surface((map_width * tile_size,map_height * tile_size))
clock = pygame.time.Clock()
first_gen = True
timer = 12
running = True
while running == True:
clock.tick() # there was 3 fps lock :)
pygame.display.set_caption('2D Cellular Automaton Simulation 1.0 Mad Cloud Games - FPS: ' + str(clock.get_fps()))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if first_gen:
themap = map_grid.outside_terrain_grid
else:
themap = map_grid._generate_outside_terrain(themap, 1)
for column_index, column in enumerate(themap):
for tile_index, tile in enumerate(column):
screen.blit(colors[tile], (tile_index * tile_size, column_index * tile_size))
pygame.display.flip()
if first_gen:
timer -= 1
if timer < 0:
first_gen = False
pygame.quit() | 0.242295 | 0.373847 |
from .plugin import Plugin
class Monitor(Plugin):
def __init__(self, running_average=True, epoch_average=True, smoothing=0.7,
precision=None, number_format=None, unit=''):
if precision is None:
precision = 4
if number_format is None:
number_format = '.{}f'.format(precision)
number_format = ':' + number_format
super(Monitor, self).__init__([(1, 'iteration'), (1, 'epoch')])
self.smoothing = smoothing
self.with_running_average = running_average
self.with_epoch_average = epoch_average
self.log_format = number_format
self.log_unit = unit
self.log_epoch_fields = None
self.log_iter_fields = ['{last' + number_format + '}' + unit]
if self.with_running_average:
self.log_iter_fields += [' ({running_avg' + number_format + '}' + unit + ')']
if self.with_epoch_average:
self.log_epoch_fields = ['{epoch_mean' + number_format + '}' + unit]
def register(self, trainer):
self.trainer = trainer
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['log_format'] = self.log_format
stats['log_unit'] = self.log_unit
stats['log_iter_fields'] = self.log_iter_fields
if self.with_epoch_average:
stats['log_epoch_fields'] = self.log_epoch_fields
if self.with_epoch_average:
stats['epoch_stats'] = (0, 0)
def iteration(self, *args):
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['last'] = self._get_value(*args)
if self.with_epoch_average:
stats['epoch_stats'] = tuple(sum(t) for t in
zip(stats['epoch_stats'], (stats['last'], 1)))
if self.with_running_average:
previous_avg = stats.get('running_avg', 0)
stats['running_avg'] = previous_avg * self.smoothing + \
stats['last'] * (1 - self.smoothing)
def epoch(self, idx):
stats = self.trainer.stats.setdefault(self.stat_name, {})
if self.with_epoch_average:
epoch_stats = stats['epoch_stats']
stats['epoch_mean'] = epoch_stats[0] / epoch_stats[1]
stats['epoch_stats'] = (0, 0) | torch/utils/trainer/plugins/monitor.py | from .plugin import Plugin
class Monitor(Plugin):
def __init__(self, running_average=True, epoch_average=True, smoothing=0.7,
precision=None, number_format=None, unit=''):
if precision is None:
precision = 4
if number_format is None:
number_format = '.{}f'.format(precision)
number_format = ':' + number_format
super(Monitor, self).__init__([(1, 'iteration'), (1, 'epoch')])
self.smoothing = smoothing
self.with_running_average = running_average
self.with_epoch_average = epoch_average
self.log_format = number_format
self.log_unit = unit
self.log_epoch_fields = None
self.log_iter_fields = ['{last' + number_format + '}' + unit]
if self.with_running_average:
self.log_iter_fields += [' ({running_avg' + number_format + '}' + unit + ')']
if self.with_epoch_average:
self.log_epoch_fields = ['{epoch_mean' + number_format + '}' + unit]
def register(self, trainer):
self.trainer = trainer
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['log_format'] = self.log_format
stats['log_unit'] = self.log_unit
stats['log_iter_fields'] = self.log_iter_fields
if self.with_epoch_average:
stats['log_epoch_fields'] = self.log_epoch_fields
if self.with_epoch_average:
stats['epoch_stats'] = (0, 0)
def iteration(self, *args):
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['last'] = self._get_value(*args)
if self.with_epoch_average:
stats['epoch_stats'] = tuple(sum(t) for t in
zip(stats['epoch_stats'], (stats['last'], 1)))
if self.with_running_average:
previous_avg = stats.get('running_avg', 0)
stats['running_avg'] = previous_avg * self.smoothing + \
stats['last'] * (1 - self.smoothing)
def epoch(self, idx):
stats = self.trainer.stats.setdefault(self.stat_name, {})
if self.with_epoch_average:
epoch_stats = stats['epoch_stats']
stats['epoch_mean'] = epoch_stats[0] / epoch_stats[1]
stats['epoch_stats'] = (0, 0) | 0.599016 | 0.205974 |
from common import RunningStyle, PasswordPolicyConf
from copy import deepcopy
from sys import platform
# possible jtr names
john_nick_names = [
'j', 'jtr', 'JTR', 'JtR', 'John', 'john', 'J', '<NAME>', 'Jtr'
]
# possible hc names
hc_nick_names = ['h', 'hc', 'HC', 'hashcat', 'H', 'Hashcat', 'Hc']
# For detailed information about what each field means, please refer to readme.md
# jtr's default configuration
jtr_default_config = {
'running_style':
RunningStyle.JTR,
'max_password_length':
127, # input/output greater than this are ignored
'min_cut_length':
128, # max_password_length + 1
'm_threshold':
2,
'executable_path':
"../JohnTheRipper/run/john"
if platform != "win32" else "../JohnTheRipper/run/john.exe",
'password_policy':
PasswordPolicyConf(),
'preprocess_path':
'../data/preprocess/',
'enable_regex':
False,
'debug':
False,
'binary_search_file_executable': # pointing to the executable for binary searching a sorted file
'look',
'lookup_threshold':
131073, #2^17 + 1
}
# hc's default configuration
hc_default_config = {
'running_style':
RunningStyle.HC,
'max_password_length':
255, # input/output greater than this are ignored
'min_cut_length':
256, # max_password_length + 1
'm_threshold':
2,
'executable_path':
"../HashcatRulesEngine/hcre"
if platform != "win32" else "../HashcatRulesEngine/hcre.exe",
'password_policy':
PasswordPolicyConf(),
'preprocess_path':
'../data/preprocess/',
'enable_regex':
False,
'debug':
False,
'binary_search_file_executable': # pointing to the executable for binary searching a sorted file
'look',
'lookup_threshold':
131073, #2^17 + 1
'batch_size_of_words':
1024 * 1024,
'batch_size_of_rules':
'auto', # either an int or auto
}
# caution, unix default look only supports look up on file < 2GB
class Configuration():
""" Contains the running config, it constructs a dictioanry """
def __init__(self, running_style=RunningStyle.JTR, **kwargs):
""" Initialize a configuration dict.
Args:
running_style: either JTR/HC
kwargs: optional args, set specific field in the dictionary.
"""
self.config = deepcopy(
hc_default_config
) if running_style == RunningStyle.HC else deepcopy(jtr_default_config)
for k, v in kwargs.items():
self.config[k] = v
def __setitem__(self, key, item):
self.config[key] = item
def __getitem__(self, key):
return self.config[key]
def reset_to_hc(self, **kwargs):
""" reset configuration to HC default, also support setting specific field """
self.config = deepcopy(hc_default_config)
for k, v in kwargs.items():
self.config[k] = v
def reset_to_jtr(self, **kwargs):
""" reset configuration to JTR default, also support setting specific field """
self.config = deepcopy(jtr_default_config)
for k, v in kwargs.items():
self.config[k] = v
def is_jtr(self):
""" return True if running on JTR mode """
return self.config['running_style'] == RunningStyle.JTR
def is_hc(self):
""" return True if running on HC mode """
return self.config['running_style'] == RunningStyle.HC
def short_config_string(self):
""" get quick info of configuration """
return "{}(WL) {}(RL) {}(Testset) {}".format(
self['wordlist_path']['name'], self['rulelist_path']['name'],
self['pwlist_path']['name'], self['running_style'])
def get_log_addr(self):
""" get log file addr"""
return "../results/demo_file-{}-{}-{}.log".format(
self['wordlist_path']['name'], self['rulelist_path']['name'],
self['pwlist_path']['name'])
RUNTIME_CONFIG = Configuration() | src/config.py |
from common import RunningStyle, PasswordPolicyConf
from copy import deepcopy
from sys import platform
# possible jtr names
john_nick_names = [
'j', 'jtr', 'JTR', 'JtR', 'John', 'john', 'J', '<NAME>', 'Jtr'
]
# possible hc names
hc_nick_names = ['h', 'hc', 'HC', 'hashcat', 'H', 'Hashcat', 'Hc']
# For detailed information about what each field means, please refer to readme.md
# jtr's default configuration
jtr_default_config = {
'running_style':
RunningStyle.JTR,
'max_password_length':
127, # input/output greater than this are ignored
'min_cut_length':
128, # max_password_length + 1
'm_threshold':
2,
'executable_path':
"../JohnTheRipper/run/john"
if platform != "win32" else "../JohnTheRipper/run/john.exe",
'password_policy':
PasswordPolicyConf(),
'preprocess_path':
'../data/preprocess/',
'enable_regex':
False,
'debug':
False,
'binary_search_file_executable': # pointing to the executable for binary searching a sorted file
'look',
'lookup_threshold':
131073, #2^17 + 1
}
# hc's default configuration
hc_default_config = {
'running_style':
RunningStyle.HC,
'max_password_length':
255, # input/output greater than this are ignored
'min_cut_length':
256, # max_password_length + 1
'm_threshold':
2,
'executable_path':
"../HashcatRulesEngine/hcre"
if platform != "win32" else "../HashcatRulesEngine/hcre.exe",
'password_policy':
PasswordPolicyConf(),
'preprocess_path':
'../data/preprocess/',
'enable_regex':
False,
'debug':
False,
'binary_search_file_executable': # pointing to the executable for binary searching a sorted file
'look',
'lookup_threshold':
131073, #2^17 + 1
'batch_size_of_words':
1024 * 1024,
'batch_size_of_rules':
'auto', # either an int or auto
}
# caution, unix default look only supports look up on file < 2GB
class Configuration():
""" Contains the running config, it constructs a dictioanry """
def __init__(self, running_style=RunningStyle.JTR, **kwargs):
""" Initialize a configuration dict.
Args:
running_style: either JTR/HC
kwargs: optional args, set specific field in the dictionary.
"""
self.config = deepcopy(
hc_default_config
) if running_style == RunningStyle.HC else deepcopy(jtr_default_config)
for k, v in kwargs.items():
self.config[k] = v
def __setitem__(self, key, item):
self.config[key] = item
def __getitem__(self, key):
return self.config[key]
def reset_to_hc(self, **kwargs):
""" reset configuration to HC default, also support setting specific field """
self.config = deepcopy(hc_default_config)
for k, v in kwargs.items():
self.config[k] = v
def reset_to_jtr(self, **kwargs):
""" reset configuration to JTR default, also support setting specific field """
self.config = deepcopy(jtr_default_config)
for k, v in kwargs.items():
self.config[k] = v
def is_jtr(self):
""" return True if running on JTR mode """
return self.config['running_style'] == RunningStyle.JTR
def is_hc(self):
""" return True if running on HC mode """
return self.config['running_style'] == RunningStyle.HC
def short_config_string(self):
""" get quick info of configuration """
return "{}(WL) {}(RL) {}(Testset) {}".format(
self['wordlist_path']['name'], self['rulelist_path']['name'],
self['pwlist_path']['name'], self['running_style'])
def get_log_addr(self):
""" get log file addr"""
return "../results/demo_file-{}-{}-{}.log".format(
self['wordlist_path']['name'], self['rulelist_path']['name'],
self['pwlist_path']['name'])
RUNTIME_CONFIG = Configuration() | 0.501221 | 0.173533 |
import logging
import os
import uuid
from enum import Enum
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
from taurus_datajob_api import ApiClient
from taurus_datajob_api import Configuration
from taurus_datajob_api import DataJobExecution
from taurus_datajob_api import DataJobExecutionRequest
from taurus_datajob_api import DataJobsExecutionApi
from urllib3 import Retry
from vdk.internal.control.auth.auth import Authentication
log = logging.getLogger(__name__)
class VDKJobExecutionException(AirflowException):
"""
Exception class for exceptions raised for failed, cancelled or skipped job executions.
"""
class JobStatus(str, Enum):
"""
Enum for the possible statuses a job execution can have.
"""
SUBMITTED = "submitted"
RUNNING = "running"
SUCCEEDED = "succeeded"
CANCELLED = "cancelled"
SKIPPED = "skipped"
USER_ERROR = "user_error"
PLATFORM_ERROR = "platform_error"
class VDKHook(HttpHook):
def __init__(
self,
conn_id: str,
job_name: str,
team_name: str,
timeout: int = 1, # TODO: Set reasonable default
):
super().__init__(http_conn_id=conn_id)
self.job_name = job_name
self.team_name = team_name
self.timeout = timeout
self.deployment_id = "production" # currently multiple deployments are not supported so this remains hardcoded
# setting these manually to avoid using VDKConfig
self.op_id = os.environ.get("VDK_OP_ID_OVERRIDE", f"{uuid.uuid4().hex}"[:16])
self.http_verify_ssl = os.getenv(
"VDK_CONTROL_HTTP_VERIFY_SSL", "True"
).lower() in ("true", "1", "t")
self.http_connection_pool_maxsize = int(
os.getenv("VDK_CONTROL_HTTP_CONNECTION_POOL_MAXSIZE", "2")
)
self.http_total_retries = int(os.getenv("VDK_CONTROL_HTTP_TOTAL_RETRIES", "10"))
self.http_connect_retries = int(
os.getenv("VDK_CONTROL_HTTP_CONNECT_RETRIES", "6")
)
self.http_read_retries = int(os.getenv("VDK_CONTROL_HTTP_READ_RETRIES", "6"))
self.__execution_api = self._get_execution_api()
def start_job_execution(self, **request_kwargs) -> None:
"""
Triggers a manual Datajob execution.
:param: request_kwargs: Request arguments to be included with the HTTP request
"""
execution_request = DataJobExecutionRequest(
started_by="airflow-provider-vdk",
args=request_kwargs,
)
_, _, headers = self.__execution_api.data_job_execution_start_with_http_info(
team_name=self.team_name,
job_name=self.job_name,
deployment_id=self.deployment_id,
data_job_execution_request=execution_request,
_request_timeout=self.timeout,
)
log.debug(f"Received headers: {headers}")
def cancel_job_execution(self, execution_id: str) -> None:
"""
Cancels a Datajob execution.
:param execution_id: ID of the job execution
"""
self.__execution_api.data_job_execution_cancel(
team_name=self.team_name,
job_name=self.job_name,
execution_id=execution_id,
_request_timeout=self.timeout,
)
def get_job_execution_log(self, execution_id: str) -> str:
"""
Returns the stored execution logs for a particular job execution.
:param execution_id: ID of the job execution
:return: job execution logs
"""
return self.__execution_api.data_job_logs_download(
team_name=self.team_name, job_name=self.job_name, execution_id=execution_id
).logs
def get_job_execution_status(self, execution_id: str) -> DataJobExecution:
"""
Returns the execution status for a particular job execution.
:param execution_id: ID of the job execution
:return: The execution status object listing details about the status of this particular execution
"""
return self.__execution_api.data_job_execution_read(
team_name=self.team_name, job_name=self.job_name, execution_id=execution_id
)
def _get_rest_api_url_from_connection(self):
conn = self.get_connection(self.http_conn_id)
if conn.host and "://" in conn.host:
base_url = conn.host
else:
# schema defaults to HTTPS
schema = conn.schema if conn.schema else "https"
host = conn.host if conn.host else ""
base_url = schema + "://" + host
if conn.port:
base_url = base_url + ":" + str(conn.port)
return base_url
def _get_execution_api(self):
rest_api_url = self._get_rest_api_url_from_connection()
config = Configuration(host=rest_api_url, api_key=None)
config.connection_pool_maxsize = self.http_connection_pool_maxsize
config.retries = Retry(
total=self.http_total_retries,
connect=self.http_connect_retries,
read=self.http_read_retries,
backoff_factor=2,
status_forcelist=[500, 502, 503, 504],
)
config.client_side_validation = False
config.verify_ssl = self.http_verify_ssl
config.access_token = Authentication().read_access_token()
api_client = ApiClient(config)
# We are setting X-OPID - this is send in telemetry and printed in logs on server side - make it easier
# to troubleshoot and trace requests across different services
api_client.set_default_header("X-OPID", self.op_id)
return DataJobsExecutionApi(api_client) | projects/vdk-plugins/airflow-provider-vdk/vdk_provider/hooks/vdk.py | import logging
import os
import uuid
from enum import Enum
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
from taurus_datajob_api import ApiClient
from taurus_datajob_api import Configuration
from taurus_datajob_api import DataJobExecution
from taurus_datajob_api import DataJobExecutionRequest
from taurus_datajob_api import DataJobsExecutionApi
from urllib3 import Retry
from vdk.internal.control.auth.auth import Authentication
log = logging.getLogger(__name__)
class VDKJobExecutionException(AirflowException):
"""
Exception class for exceptions raised for failed, cancelled or skipped job executions.
"""
class JobStatus(str, Enum):
"""
Enum for the possible statuses a job execution can have.
"""
SUBMITTED = "submitted"
RUNNING = "running"
SUCCEEDED = "succeeded"
CANCELLED = "cancelled"
SKIPPED = "skipped"
USER_ERROR = "user_error"
PLATFORM_ERROR = "platform_error"
class VDKHook(HttpHook):
def __init__(
self,
conn_id: str,
job_name: str,
team_name: str,
timeout: int = 1, # TODO: Set reasonable default
):
super().__init__(http_conn_id=conn_id)
self.job_name = job_name
self.team_name = team_name
self.timeout = timeout
self.deployment_id = "production" # currently multiple deployments are not supported so this remains hardcoded
# setting these manually to avoid using VDKConfig
self.op_id = os.environ.get("VDK_OP_ID_OVERRIDE", f"{uuid.uuid4().hex}"[:16])
self.http_verify_ssl = os.getenv(
"VDK_CONTROL_HTTP_VERIFY_SSL", "True"
).lower() in ("true", "1", "t")
self.http_connection_pool_maxsize = int(
os.getenv("VDK_CONTROL_HTTP_CONNECTION_POOL_MAXSIZE", "2")
)
self.http_total_retries = int(os.getenv("VDK_CONTROL_HTTP_TOTAL_RETRIES", "10"))
self.http_connect_retries = int(
os.getenv("VDK_CONTROL_HTTP_CONNECT_RETRIES", "6")
)
self.http_read_retries = int(os.getenv("VDK_CONTROL_HTTP_READ_RETRIES", "6"))
self.__execution_api = self._get_execution_api()
def start_job_execution(self, **request_kwargs) -> None:
"""
Triggers a manual Datajob execution.
:param: request_kwargs: Request arguments to be included with the HTTP request
"""
execution_request = DataJobExecutionRequest(
started_by="airflow-provider-vdk",
args=request_kwargs,
)
_, _, headers = self.__execution_api.data_job_execution_start_with_http_info(
team_name=self.team_name,
job_name=self.job_name,
deployment_id=self.deployment_id,
data_job_execution_request=execution_request,
_request_timeout=self.timeout,
)
log.debug(f"Received headers: {headers}")
def cancel_job_execution(self, execution_id: str) -> None:
"""
Cancels a Datajob execution.
:param execution_id: ID of the job execution
"""
self.__execution_api.data_job_execution_cancel(
team_name=self.team_name,
job_name=self.job_name,
execution_id=execution_id,
_request_timeout=self.timeout,
)
def get_job_execution_log(self, execution_id: str) -> str:
"""
Returns the stored execution logs for a particular job execution.
:param execution_id: ID of the job execution
:return: job execution logs
"""
return self.__execution_api.data_job_logs_download(
team_name=self.team_name, job_name=self.job_name, execution_id=execution_id
).logs
def get_job_execution_status(self, execution_id: str) -> DataJobExecution:
"""
Returns the execution status for a particular job execution.
:param execution_id: ID of the job execution
:return: The execution status object listing details about the status of this particular execution
"""
return self.__execution_api.data_job_execution_read(
team_name=self.team_name, job_name=self.job_name, execution_id=execution_id
)
def _get_rest_api_url_from_connection(self):
conn = self.get_connection(self.http_conn_id)
if conn.host and "://" in conn.host:
base_url = conn.host
else:
# schema defaults to HTTPS
schema = conn.schema if conn.schema else "https"
host = conn.host if conn.host else ""
base_url = schema + "://" + host
if conn.port:
base_url = base_url + ":" + str(conn.port)
return base_url
def _get_execution_api(self):
rest_api_url = self._get_rest_api_url_from_connection()
config = Configuration(host=rest_api_url, api_key=None)
config.connection_pool_maxsize = self.http_connection_pool_maxsize
config.retries = Retry(
total=self.http_total_retries,
connect=self.http_connect_retries,
read=self.http_read_retries,
backoff_factor=2,
status_forcelist=[500, 502, 503, 504],
)
config.client_side_validation = False
config.verify_ssl = self.http_verify_ssl
config.access_token = Authentication().read_access_token()
api_client = ApiClient(config)
# We are setting X-OPID - this is send in telemetry and printed in logs on server side - make it easier
# to troubleshoot and trace requests across different services
api_client.set_default_header("X-OPID", self.op_id)
return DataJobsExecutionApi(api_client) | 0.470493 | 0.078926 |
import os
import requests
from PIL import Image
from io import BytesIO
import random
class Module:
DESC = ""
ARGC = 0
ARG_WARNING = "There are not enough arguments to continue."
ACC_TOKEN = os.getenv("GROUPME_ACCESS_TOKEN")
def __init__:
print("Loaded module %s." % self.__class__.__name__)
def wave(self):
return "Hello!"
def lines(self, query):
return [line for line in query.split("\n") if line != ""]
@staticmethod
def safe_spaces(text):
return text.replace(" ", "\u2004")
class ImageModule(Module):
def upload_image(self, data) -> str:
headers = {
"X-Access-Token": self.ACC_TOKEN,
"Content-Type": "image/gif",
}
r = requests.post("https://image.groupme.com/pictures", data = data, headers = headers)
return r.json()["payload"]["url"]
def upload_pil_image(self, image: Image):
output = BytesIO()
image.save(output, format="GIF", mode="RGB")
return self.upload_image(output.getvalue())
def pil_from_url(self, url):
response = requests.get(url, stream=True)
response.raw.decode_content = True;
return Image.open(response.raw)
def resize(self, image: Image, width):
natural_width, natural_height = image.size
height = int(width * natural_height / natural_width)
image = image.resize((width, height), Image.ANTIALIAS)
return image
def limit_image_size(self, image: Image, max_width=1000):
natural_width, natural_height = image.size
if natural_width > max_width:
image = self.resize(image, max_width)
return image
def get_portrait(self, user_id, group_id):
req = requests.get(f"https://api.groupme.com/v3/groups/{group_id}?token={self.ACC_TOKEN}")
json = req.json()
members = json["response"]["members"]
for member in members
if member["user_id"] === user_id:
return member["image_url"]
def get_source_url(self, message, include_avatar=True):
mention_attachments = [attachment for attachment in message.raw["attachments"] if attachment["type"] == "mentions"]
if message.image_url is not None:
return message.image_url
elif len(mention_attachments) > 0:
return self.get_portrait(mention_attachments[0]["user_ids"][0], message.group_id)
if include_avatar:
return message.avatar_url | cmds/base.py | import os
import requests
from PIL import Image
from io import BytesIO
import random
class Module:
DESC = ""
ARGC = 0
ARG_WARNING = "There are not enough arguments to continue."
ACC_TOKEN = os.getenv("GROUPME_ACCESS_TOKEN")
def __init__:
print("Loaded module %s." % self.__class__.__name__)
def wave(self):
return "Hello!"
def lines(self, query):
return [line for line in query.split("\n") if line != ""]
@staticmethod
def safe_spaces(text):
return text.replace(" ", "\u2004")
class ImageModule(Module):
def upload_image(self, data) -> str:
headers = {
"X-Access-Token": self.ACC_TOKEN,
"Content-Type": "image/gif",
}
r = requests.post("https://image.groupme.com/pictures", data = data, headers = headers)
return r.json()["payload"]["url"]
def upload_pil_image(self, image: Image):
output = BytesIO()
image.save(output, format="GIF", mode="RGB")
return self.upload_image(output.getvalue())
def pil_from_url(self, url):
response = requests.get(url, stream=True)
response.raw.decode_content = True;
return Image.open(response.raw)
def resize(self, image: Image, width):
natural_width, natural_height = image.size
height = int(width * natural_height / natural_width)
image = image.resize((width, height), Image.ANTIALIAS)
return image
def limit_image_size(self, image: Image, max_width=1000):
natural_width, natural_height = image.size
if natural_width > max_width:
image = self.resize(image, max_width)
return image
def get_portrait(self, user_id, group_id):
req = requests.get(f"https://api.groupme.com/v3/groups/{group_id}?token={self.ACC_TOKEN}")
json = req.json()
members = json["response"]["members"]
for member in members
if member["user_id"] === user_id:
return member["image_url"]
def get_source_url(self, message, include_avatar=True):
mention_attachments = [attachment for attachment in message.raw["attachments"] if attachment["type"] == "mentions"]
if message.image_url is not None:
return message.image_url
elif len(mention_attachments) > 0:
return self.get_portrait(mention_attachments[0]["user_ids"][0], message.group_id)
if include_avatar:
return message.avatar_url | 0.303525 | 0.080574 |
import rospy
import datetime
import os
import json
from std_msgs.msg import String
from sensor_msgs.msg import Image
from acrv_apc_2017_perception.msg import autosegmenter_msg
import cv_bridge
import cv2
NUM_IMGS = 7
seen_items = [
"plastic_wine_glass",
"hinged_ruled_index_cards",
"black_fashion_gloves",
"fiskars_scissors",
"colgate_toothbrush_4pk",
"ticonderoga_pencils",
"tennis_ball_container",
"expo_eraser",
"balloons",
"flashlight",
"white_facecloth",
"scotch_sponges",
"robots_everywhere",
"speed_stick",
"marbles",
"windex",
"duct_tape",
"bath_sponge",
"epsom_salts",
"burts_bees_baby_wipes",
"toilet_brush",
"ice_cube_tray",
"robots_dvd",
"table_cloth",
"irish_spring_soap",
"avery_binder",
"hanes_socks",
"glue_sticks",
"reynolds_wrap",
"mouse_traps",
"measuring_spoons",
"tissue_box",
"pie_plates",
"band_aid_tape",
"hand_weight",
"poland_spring_water",
"mesh_cup",
"crayons",
"laugh_out_loud_jokes",
"composition_book"
]
rospy.init_node('image_capture')
cb = cv_bridge.CvBridge()
pub = rospy.Publisher(rospy.get_param('/autosegmenter_image_topic', '/autosegmenter_image_topic'), autosegmenter_msg, queue_size=1000)
start = datetime.datetime.now()
dt = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
os.makedirs('captured_images/%s'%dt)
objects_list_ros = []
objects_pairs_list = []
obj_pair = []
# Load the objects from the unseen_items folder
for d, dirs, files in os.walk('item_data/'):
for f in files:
if f.endswith('.json'):
with open(os.path.join(d, f), 'r') as jf:
print(jf)
j = json.load(jf)
item_name = j['name']
if item_name in seen_items:
print('ignored seen item')
continue
obj_pair.append(item_name)
objects_list_ros.append(String(item_name))
if len(obj_pair) == 2:
objects_pairs_list.append(obj_pair)
obj_pair = []
if len(obj_pair) == 1:
objects_pairs_list.append(obj_pair)
print('\nITS IMPORTANT THAT YOU RUN THIS SCRIPT IN THE DATA FOLDER\n')
# Take pictures and pass them to the auto segmenter service.
for op in objects_pairs_list:
print('Put %s into the tote. Enter to Continue' % op)
for i in range(NUM_IMGS):
raw_input()
img = rospy.wait_for_message('/realsense_wrist/rgb/image_rect', Image)
msg = autosegmenter_msg()
msg.image = img
msg.image_name.data = '_'.join(op)
op_ros = [String(o) for o in op]
msg.content = op_ros
msg.all_items = objects_list_ros
pub.publish(msg)
rgb_image = cb.imgmsg_to_cv2(img, 'bgr8')
cv2.imwrite('captured_images/%s/%s_%s.png' % (dt, '_'.join(op), i), rgb_image)
print('Done %s/%s. Move the objects. Enter to continue' % (i+1, NUM_IMGS))
end = datetime.datetime.now()
print('FINISHED.')
print('Took %s' % (end-start))
print('Leave this node running until all of the images have been processed.')
while True:
rospy.sleep(0.5) | apc_state_machine/state_machine/data/image_capture.py | import rospy
import datetime
import os
import json
from std_msgs.msg import String
from sensor_msgs.msg import Image
from acrv_apc_2017_perception.msg import autosegmenter_msg
import cv_bridge
import cv2
NUM_IMGS = 7
seen_items = [
"plastic_wine_glass",
"hinged_ruled_index_cards",
"black_fashion_gloves",
"fiskars_scissors",
"colgate_toothbrush_4pk",
"ticonderoga_pencils",
"tennis_ball_container",
"expo_eraser",
"balloons",
"flashlight",
"white_facecloth",
"scotch_sponges",
"robots_everywhere",
"speed_stick",
"marbles",
"windex",
"duct_tape",
"bath_sponge",
"epsom_salts",
"burts_bees_baby_wipes",
"toilet_brush",
"ice_cube_tray",
"robots_dvd",
"table_cloth",
"irish_spring_soap",
"avery_binder",
"hanes_socks",
"glue_sticks",
"reynolds_wrap",
"mouse_traps",
"measuring_spoons",
"tissue_box",
"pie_plates",
"band_aid_tape",
"hand_weight",
"poland_spring_water",
"mesh_cup",
"crayons",
"laugh_out_loud_jokes",
"composition_book"
]
rospy.init_node('image_capture')
cb = cv_bridge.CvBridge()
pub = rospy.Publisher(rospy.get_param('/autosegmenter_image_topic', '/autosegmenter_image_topic'), autosegmenter_msg, queue_size=1000)
start = datetime.datetime.now()
dt = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
os.makedirs('captured_images/%s'%dt)
objects_list_ros = []
objects_pairs_list = []
obj_pair = []
# Load the objects from the unseen_items folder
for d, dirs, files in os.walk('item_data/'):
for f in files:
if f.endswith('.json'):
with open(os.path.join(d, f), 'r') as jf:
print(jf)
j = json.load(jf)
item_name = j['name']
if item_name in seen_items:
print('ignored seen item')
continue
obj_pair.append(item_name)
objects_list_ros.append(String(item_name))
if len(obj_pair) == 2:
objects_pairs_list.append(obj_pair)
obj_pair = []
if len(obj_pair) == 1:
objects_pairs_list.append(obj_pair)
print('\nITS IMPORTANT THAT YOU RUN THIS SCRIPT IN THE DATA FOLDER\n')
# Take pictures and pass them to the auto segmenter service.
for op in objects_pairs_list:
print('Put %s into the tote. Enter to Continue' % op)
for i in range(NUM_IMGS):
raw_input()
img = rospy.wait_for_message('/realsense_wrist/rgb/image_rect', Image)
msg = autosegmenter_msg()
msg.image = img
msg.image_name.data = '_'.join(op)
op_ros = [String(o) for o in op]
msg.content = op_ros
msg.all_items = objects_list_ros
pub.publish(msg)
rgb_image = cb.imgmsg_to_cv2(img, 'bgr8')
cv2.imwrite('captured_images/%s/%s_%s.png' % (dt, '_'.join(op), i), rgb_image)
print('Done %s/%s. Move the objects. Enter to continue' % (i+1, NUM_IMGS))
end = datetime.datetime.now()
print('FINISHED.')
print('Took %s' % (end-start))
print('Leave this node running until all of the images have been processed.')
while True:
rospy.sleep(0.5) | 0.375821 | 0.168515 |
import gi
gi.require_version('Gtk', '3.0')
# noinspection PyUnresolvedReferences,PyPep8
from gi.repository import Gtk
class SBrickMotorChannelBox(Gtk.Frame):
def __init__(self, channel, sbrick_channel):
Gtk.Frame.__init__(self)
self.sbrickChannel = sbrick_channel
self.channel = channel
self.sbrick = None
self.set_label("Channel: %d - %s" % ((channel + 1), self.sbrickChannel["name"]))
self.vbox = Gtk.FlowBox() # , orientation=Gtk.Orientation.HORIZONTAL, spacing=3)
self.vbox.set_border_width(2)
self.vbox.set_max_children_per_line(7)
self.vbox.set_min_children_per_line(7)
self.vbox.set_selection_mode(Gtk.SelectionMode.NONE)
self.add(self.vbox)
# self.vbox.pack_start(Gtk.Label("PWM: "), True, False, 0)
self.vbox.add(Gtk.Label("PWM: "))
self.pwmAdjustment = Gtk.Adjustment(255, 0, 255, 5, 10, 0.0)
self.spinPWM = Gtk.SpinButton.new(self.pwmAdjustment, 5, 0)
# self.vbox.pack_start(self.spinPWM, True, False, 0)
self.vbox.add(self.spinPWM)
self.pwmAdjustment.connect("value-changed", self.on_pwm_changed)
self.checkReverse = Gtk.CheckButton("Reverse")
self.checkReverse.connect("toggled", self.on_reverse_changed)
self.vbox.add(self.checkReverse)
# self.vbox.pack_start(self.checkReverse, True, False, 0)
self.checkTime = Gtk.CheckButton("Time MS:")
# self.vbox.pack_start(self.checkTime, True, False, 0)
self.vbox.add(self.checkTime)
self.checkTime.connect("toggled", self.on_time_toggled)
self.timeAdjustment = Gtk.Adjustment(1000, -1, 30000, 100, 1000, 0.0)
self.spinTime = Gtk.SpinButton.new(self.timeAdjustment, 10, 0)
# self.vbox.pack_start(self.spinTime, True, False, 0)
self.vbox.add(self.spinTime)
self.spinTime.set_sensitive(False)
self.checkBrake = Gtk.CheckButton("Break Stop")
# self.vbox.pack_start(self.checkBrake, True, False, 0)
self.vbox.add(self.checkBrake)
self.buttonGo = Gtk.Button("Start")
self.buttonGo.connect("clicked", self.on_switch_go_clicked)
# self.vbox.pack_start(self.buttonGo, True, False, 0)
self.vbox.add(self.buttonGo)
self.set_sensitive(False)
self.on = False
self.pwm = 0
self.reverse = False
# noinspection PyUnusedLocal
def on_switch_go_clicked(self, switch):
self.on = not self.on
if self.sbrick is not None:
pwm = self.spinPWM.get_value_as_int()
timems = -1
if self.checkTime.get_active():
timems = self.spinTime.get_value_as_int()
reverse = self.checkReverse.get_active()
brakestop = self.checkBrake.get_active()
if self.on:
self.buttonGo.set_label("Stop")
self.sbrick.drive(self.channel, pwm, reverse, timems, brakestop)
else:
self.sbrick.stop(self.channel, brakestop)
self.buttonGo.set_label("Start")
def set_sbrick(self, sbrick):
self.sbrick = sbrick
self.set_sensitive(sbrick is not None)
def on_pwm_changed(self, adjustment):
self.pwm = int(adjustment.get_value())
if self.sbrick is not None and self.on:
self.sbrick.change_pwm(self.channel, self.pwm)
def on_reverse_changed(self, checkbox):
self.reverse = checkbox.get_active()
if self.sbrick is not None and self.on:
self.sbrick.change_reverse(self.channel, self.reverse)
def on_time_toggled(self, checkbox):
self.spinTime.set_sensitive(checkbox.get_active())
def stopped(self):
self.buttonGo.set_label("Start")
self.on = False | SBrickMotorChannelBox.py | import gi
gi.require_version('Gtk', '3.0')
# noinspection PyUnresolvedReferences,PyPep8
from gi.repository import Gtk
class SBrickMotorChannelBox(Gtk.Frame):
def __init__(self, channel, sbrick_channel):
Gtk.Frame.__init__(self)
self.sbrickChannel = sbrick_channel
self.channel = channel
self.sbrick = None
self.set_label("Channel: %d - %s" % ((channel + 1), self.sbrickChannel["name"]))
self.vbox = Gtk.FlowBox() # , orientation=Gtk.Orientation.HORIZONTAL, spacing=3)
self.vbox.set_border_width(2)
self.vbox.set_max_children_per_line(7)
self.vbox.set_min_children_per_line(7)
self.vbox.set_selection_mode(Gtk.SelectionMode.NONE)
self.add(self.vbox)
# self.vbox.pack_start(Gtk.Label("PWM: "), True, False, 0)
self.vbox.add(Gtk.Label("PWM: "))
self.pwmAdjustment = Gtk.Adjustment(255, 0, 255, 5, 10, 0.0)
self.spinPWM = Gtk.SpinButton.new(self.pwmAdjustment, 5, 0)
# self.vbox.pack_start(self.spinPWM, True, False, 0)
self.vbox.add(self.spinPWM)
self.pwmAdjustment.connect("value-changed", self.on_pwm_changed)
self.checkReverse = Gtk.CheckButton("Reverse")
self.checkReverse.connect("toggled", self.on_reverse_changed)
self.vbox.add(self.checkReverse)
# self.vbox.pack_start(self.checkReverse, True, False, 0)
self.checkTime = Gtk.CheckButton("Time MS:")
# self.vbox.pack_start(self.checkTime, True, False, 0)
self.vbox.add(self.checkTime)
self.checkTime.connect("toggled", self.on_time_toggled)
self.timeAdjustment = Gtk.Adjustment(1000, -1, 30000, 100, 1000, 0.0)
self.spinTime = Gtk.SpinButton.new(self.timeAdjustment, 10, 0)
# self.vbox.pack_start(self.spinTime, True, False, 0)
self.vbox.add(self.spinTime)
self.spinTime.set_sensitive(False)
self.checkBrake = Gtk.CheckButton("Break Stop")
# self.vbox.pack_start(self.checkBrake, True, False, 0)
self.vbox.add(self.checkBrake)
self.buttonGo = Gtk.Button("Start")
self.buttonGo.connect("clicked", self.on_switch_go_clicked)
# self.vbox.pack_start(self.buttonGo, True, False, 0)
self.vbox.add(self.buttonGo)
self.set_sensitive(False)
self.on = False
self.pwm = 0
self.reverse = False
# noinspection PyUnusedLocal
def on_switch_go_clicked(self, switch):
self.on = not self.on
if self.sbrick is not None:
pwm = self.spinPWM.get_value_as_int()
timems = -1
if self.checkTime.get_active():
timems = self.spinTime.get_value_as_int()
reverse = self.checkReverse.get_active()
brakestop = self.checkBrake.get_active()
if self.on:
self.buttonGo.set_label("Stop")
self.sbrick.drive(self.channel, pwm, reverse, timems, brakestop)
else:
self.sbrick.stop(self.channel, brakestop)
self.buttonGo.set_label("Start")
def set_sbrick(self, sbrick):
self.sbrick = sbrick
self.set_sensitive(sbrick is not None)
def on_pwm_changed(self, adjustment):
self.pwm = int(adjustment.get_value())
if self.sbrick is not None and self.on:
self.sbrick.change_pwm(self.channel, self.pwm)
def on_reverse_changed(self, checkbox):
self.reverse = checkbox.get_active()
if self.sbrick is not None and self.on:
self.sbrick.change_reverse(self.channel, self.reverse)
def on_time_toggled(self, checkbox):
self.spinTime.set_sensitive(checkbox.get_active())
def stopped(self):
self.buttonGo.set_label("Start")
self.on = False | 0.558688 | 0.115611 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2
from tensorflow.core.framework import summary_pb2 as tensorflow_dot_core_dot_framework_dot_summary__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/util/event.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n tensorflow/core/util/event.proto\x12\ntensorflow\x1a%tensorflow/core/framework/graph.proto\x1a\'tensorflow/core/framework/summary.proto\"\x9b\x01\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x01(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x16\n\x0c\x66ile_version\x18\x03 \x01(\tH\x00\x12)\n\tgraph_def\x18\x04 \x01(\x0b\x32\x14.tensorflow.GraphDefH\x00\x12&\n\x07summary\x18\x05 \x01(\x0b\x32\x13.tensorflow.SummaryH\x00\x42\x06\n\x04whatb\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_summary__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='tensorflow.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='wall_time', full_name='tensorflow.Event.wall_time', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='step', full_name='tensorflow.Event.step', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_version', full_name='tensorflow.Event.file_version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='graph_def', full_name='tensorflow.Event.graph_def', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='summary', full_name='tensorflow.Event.summary', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='what', full_name='tensorflow.Event.what',
index=0, containing_type=None, fields=[]),
],
serialized_start=129,
serialized_end=284,
)
_EVENT.fields_by_name['graph_def'].message_type = tensorflow_dot_core_dot_framework_dot_graph__pb2._GRAPHDEF
_EVENT.fields_by_name['summary'].message_type = tensorflow_dot_core_dot_framework_dot_summary__pb2._SUMMARY
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['file_version'])
_EVENT.fields_by_name['file_version'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['graph_def'])
_EVENT.fields_by_name['graph_def'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['summary'])
_EVENT.fields_by_name['summary'].containing_oneof = _EVENT.oneofs_by_name['what']
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'tensorflow.core.util.event_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.Event)
))
_sym_db.RegisterMessage(Event)
# @@protoc_insertion_point(module_scope) | third-party/corenlp/third-party/stanza/stanza/research/templates/third-party/tensorflow/core/util/event_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2
from tensorflow.core.framework import summary_pb2 as tensorflow_dot_core_dot_framework_dot_summary__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/util/event.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n tensorflow/core/util/event.proto\x12\ntensorflow\x1a%tensorflow/core/framework/graph.proto\x1a\'tensorflow/core/framework/summary.proto\"\x9b\x01\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x01(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x16\n\x0c\x66ile_version\x18\x03 \x01(\tH\x00\x12)\n\tgraph_def\x18\x04 \x01(\x0b\x32\x14.tensorflow.GraphDefH\x00\x12&\n\x07summary\x18\x05 \x01(\x0b\x32\x13.tensorflow.SummaryH\x00\x42\x06\n\x04whatb\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_summary__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='tensorflow.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='wall_time', full_name='tensorflow.Event.wall_time', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='step', full_name='tensorflow.Event.step', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_version', full_name='tensorflow.Event.file_version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='graph_def', full_name='tensorflow.Event.graph_def', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='summary', full_name='tensorflow.Event.summary', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='what', full_name='tensorflow.Event.what',
index=0, containing_type=None, fields=[]),
],
serialized_start=129,
serialized_end=284,
)
_EVENT.fields_by_name['graph_def'].message_type = tensorflow_dot_core_dot_framework_dot_graph__pb2._GRAPHDEF
_EVENT.fields_by_name['summary'].message_type = tensorflow_dot_core_dot_framework_dot_summary__pb2._SUMMARY
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['file_version'])
_EVENT.fields_by_name['file_version'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['graph_def'])
_EVENT.fields_by_name['graph_def'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['summary'])
_EVENT.fields_by_name['summary'].containing_oneof = _EVENT.oneofs_by_name['what']
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'tensorflow.core.util.event_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.Event)
))
_sym_db.RegisterMessage(Event)
# @@protoc_insertion_point(module_scope) | 0.202996 | 0.115811 |
from steamCLI.steamapp import SteamApp
class Results:
def __init__(self, app: SteamApp, result: list=None, max_chars: int=79):
self.app = app
if result is None:
self.result = []
else:
self.result = result
self.max_chars = max_chars
self.description = None
self.site_stats = None
self.steam = None
self.itad = None
def format_steam_info(self):
"""
Formats information that was gathered from Steam API.
:return: formatted string.
"""
release = (self.app.release_date if self.app.release_date
else 'no release date')
initial = (round(self.app.initial_price / 100, 2)
if self.app.initial_price else 'N/A')
current = (round(self.app.final_price / 100, 2) if self.app.final_price
else 'N/A')
currency = f' {self.app.currency}' if self.app.currency else ''
self.steam = list()
self.steam.append(f'*** {self.app.title} ({release}) ***')
self.steam.append(f'{current}{currency} ({self.app.discount}% '
f'from {initial}{currency})')
self.steam.append(f'Metacritic score: {self.app.metacritic}')
def format_steam_website_info(self):
"""
Formats information that was gathered from Steam website.
:return: formatted string.
"""
self.site_stats = list()
if self.app.overall_count:
self.site_stats.append(f'{self.app.overall_count} overall reviews '
f'({self.app.overall_percent} positive)')
else:
self.site_stats.append("No overall reviews available")
# It makes sense to show absence of recent reviews only if overall
# reviews are missing as well.
if self.app.overall_count and not self.app.recent_count:
self.site_stats.append("No recent reviews available")
if self.app.recent_count:
self.site_stats.append(f'{self.app.recent_count} recent reviews '
f'({self.app.recent_percent} positive)')
def format_historical_low(self):
"""
Formats information on historical low prices of the given application.
:return: formatted string.
"""
lowest = (f'{self.app.historical_low:.2f}' if self.app.historical_low
else 'N/A')
currency = f' {self.app.currency}' if self.app.currency else ''
cut = self.app.historical_cut if self.app.historical_cut else 'N/A'
shop = self.app.historical_shop if self.app.historical_shop else 'N/A'
self.itad = list()
self.itad.append(f'Historical low: {lowest}{currency} (-{cut}%)')
self.itad.append(f'Shop: {shop}')
def format_description(self) -> str:
"""
Formats given application's description.
:return: formatted string.
"""
if self.app.description:
self.description = self.app.description
else:
self.description = 'Short description unavailable'
return self._center_text([self.description])
def print_results(self):
print('\n', ''.center(self.max_chars, '*') + '\n')
print(self._center_text(self.steam))
if self.site_stats:
print('\n', self._center_text(self.site_stats))
if self.itad:
print('\n', self._center_text(self.itad))
if self.description:
print('\n', self.description.center(self.max_chars))
print('\n', ''.center(self.max_chars, '*') + '\n')
def _center_text(self, text: list) -> str:
"""
Helper method that centers given text.
:param text: list with string values that need to be centered.
:return: centered string.
"""
return '\n'.join(line.center(self.max_chars) for line in text) | steamCLI/results.py | from steamCLI.steamapp import SteamApp
class Results:
def __init__(self, app: SteamApp, result: list=None, max_chars: int=79):
self.app = app
if result is None:
self.result = []
else:
self.result = result
self.max_chars = max_chars
self.description = None
self.site_stats = None
self.steam = None
self.itad = None
def format_steam_info(self):
"""
Formats information that was gathered from Steam API.
:return: formatted string.
"""
release = (self.app.release_date if self.app.release_date
else 'no release date')
initial = (round(self.app.initial_price / 100, 2)
if self.app.initial_price else 'N/A')
current = (round(self.app.final_price / 100, 2) if self.app.final_price
else 'N/A')
currency = f' {self.app.currency}' if self.app.currency else ''
self.steam = list()
self.steam.append(f'*** {self.app.title} ({release}) ***')
self.steam.append(f'{current}{currency} ({self.app.discount}% '
f'from {initial}{currency})')
self.steam.append(f'Metacritic score: {self.app.metacritic}')
def format_steam_website_info(self):
"""
Formats information that was gathered from Steam website.
:return: formatted string.
"""
self.site_stats = list()
if self.app.overall_count:
self.site_stats.append(f'{self.app.overall_count} overall reviews '
f'({self.app.overall_percent} positive)')
else:
self.site_stats.append("No overall reviews available")
# It makes sense to show absence of recent reviews only if overall
# reviews are missing as well.
if self.app.overall_count and not self.app.recent_count:
self.site_stats.append("No recent reviews available")
if self.app.recent_count:
self.site_stats.append(f'{self.app.recent_count} recent reviews '
f'({self.app.recent_percent} positive)')
def format_historical_low(self):
"""
Formats information on historical low prices of the given application.
:return: formatted string.
"""
lowest = (f'{self.app.historical_low:.2f}' if self.app.historical_low
else 'N/A')
currency = f' {self.app.currency}' if self.app.currency else ''
cut = self.app.historical_cut if self.app.historical_cut else 'N/A'
shop = self.app.historical_shop if self.app.historical_shop else 'N/A'
self.itad = list()
self.itad.append(f'Historical low: {lowest}{currency} (-{cut}%)')
self.itad.append(f'Shop: {shop}')
def format_description(self) -> str:
"""
Formats given application's description.
:return: formatted string.
"""
if self.app.description:
self.description = self.app.description
else:
self.description = 'Short description unavailable'
return self._center_text([self.description])
def print_results(self):
print('\n', ''.center(self.max_chars, '*') + '\n')
print(self._center_text(self.steam))
if self.site_stats:
print('\n', self._center_text(self.site_stats))
if self.itad:
print('\n', self._center_text(self.itad))
if self.description:
print('\n', self.description.center(self.max_chars))
print('\n', ''.center(self.max_chars, '*') + '\n')
def _center_text(self, text: list) -> str:
"""
Helper method that centers given text.
:param text: list with string values that need to be centered.
:return: centered string.
"""
return '\n'.join(line.center(self.max_chars) for line in text) | 0.625552 | 0.146484 |
from PIL import Image, ImageChops
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("input_file", type=str, help='The file to rainbowiefy')
parser.add_argument("--blend-amount", "-b", type=float, default=0.25, help='How vibrant the colours are')
parser.add_argument("--hue-rate", "-r", type=int, default=30, help='How fast the colors change')
parser.add_argument("--duration", "-d", type=int, default=60, help='How long the gif is')
parser.add_argument("--optimize", default=False, action='store_true', help='Tell the gif encoder to "optimize" it. Not sure what that means')
parser.add_argument("--disable-transparency", default=False, action='store_true', help='Make the resulting image not have any transparency (not recommended)')
parser.add_argument("--transparency-sensitivity", "-t", type=int, default=1, help='if alpha < sensitivity, make that pixel transparent')
parser.add_argument("--output-file", default="out/output.gif", type=str, help='The file to save the gif to')
parser.add_argument("--pdb", default=False, action='store_true', help='Trips a PDB tracepoint for debugging')
parser.add_argument("--debug", default=False, action='store_true', help='Print debug messages')
args = parser.parse_args()
print("Starting up")
DEBUG = args.debug
if DEBUG:
print("DEBUG - Debug mode on")
RGBA_MODE = "RGBA"
PALETTE_MODE = "P"
input_file = args.input_file
base_image = Image.open(input_file).convert(RGBA_MODE)
images = []
def get_transparency_palette_loc(img):
# Too lazy to do conversions right now. Just pass in the right mode
if img.mode != RGBA_MODE:
print(f"WARN - img mode was not RGBA_MODE. Actual: {img.mode}")
return None
paletted_data = img.convert(PALETTE_MODE).getdata()
for idx, val in enumerate(img.getdata()):
alpha = val[3]
if alpha == 0:
return paletted_data[idx]
# If none of the pixels are fully transparent, just give up
print(f"INFO - none of the pixels were fully transparent")
return None
def make_all_transparent_into_same_pallete(img, trans_loc, sensitivity=args.transparency_sensitivity):
palette_img = img.convert(PALETTE_MODE)
for idx, val in enumerate(img.getdata()):
alpha = val[3]
width, height = palette_img.size
x,y = divmod(idx, width)
if alpha < sensitivity:
palette_img.putpixel((y,x), trans_loc)
return palette_img.convert(RGBA_MODE)
for hue in range(0, 360, args.hue_rate):
hsv_string = "hsv({hue},100%,100%)".format(hue=hue)
im = Image.new(RGBA_MODE, base_image.size, hsv_string)
blended = ImageChops.blend(base_image, im, args.blend_amount)
composited = ImageChops.composite(blended, base_image, base_image)
images.append(composited)
if args.pdb:
import pdb; pdb.set_trace()
gif_encoder_args = {
"duration": args.duration,
"loop": 0,
"optimize": args.optimize
}
transparency_loc = get_transparency_palette_loc(base_image)
if DEBUG:
print(f"DEBUG - transparency_loc was {transparency_loc}")
if transparency_loc is not None and not args.disable_transparency:
images = [make_all_transparent_into_same_pallete(x, transparency_loc) for x in images]
gif_encoder_args["transparency"] = transparency_loc
print(f"INFO - Printing to {args.output_file}")
images[0].save(args.output_file,
save_all=True,
append_images=images[1:],
**gif_encoder_args)
print("Job's done") | rainbow.py |
from PIL import Image, ImageChops
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("input_file", type=str, help='The file to rainbowiefy')
parser.add_argument("--blend-amount", "-b", type=float, default=0.25, help='How vibrant the colours are')
parser.add_argument("--hue-rate", "-r", type=int, default=30, help='How fast the colors change')
parser.add_argument("--duration", "-d", type=int, default=60, help='How long the gif is')
parser.add_argument("--optimize", default=False, action='store_true', help='Tell the gif encoder to "optimize" it. Not sure what that means')
parser.add_argument("--disable-transparency", default=False, action='store_true', help='Make the resulting image not have any transparency (not recommended)')
parser.add_argument("--transparency-sensitivity", "-t", type=int, default=1, help='if alpha < sensitivity, make that pixel transparent')
parser.add_argument("--output-file", default="out/output.gif", type=str, help='The file to save the gif to')
parser.add_argument("--pdb", default=False, action='store_true', help='Trips a PDB tracepoint for debugging')
parser.add_argument("--debug", default=False, action='store_true', help='Print debug messages')
args = parser.parse_args()
print("Starting up")
DEBUG = args.debug
if DEBUG:
print("DEBUG - Debug mode on")
RGBA_MODE = "RGBA"
PALETTE_MODE = "P"
input_file = args.input_file
base_image = Image.open(input_file).convert(RGBA_MODE)
images = []
def get_transparency_palette_loc(img):
# Too lazy to do conversions right now. Just pass in the right mode
if img.mode != RGBA_MODE:
print(f"WARN - img mode was not RGBA_MODE. Actual: {img.mode}")
return None
paletted_data = img.convert(PALETTE_MODE).getdata()
for idx, val in enumerate(img.getdata()):
alpha = val[3]
if alpha == 0:
return paletted_data[idx]
# If none of the pixels are fully transparent, just give up
print(f"INFO - none of the pixels were fully transparent")
return None
def make_all_transparent_into_same_pallete(img, trans_loc, sensitivity=args.transparency_sensitivity):
palette_img = img.convert(PALETTE_MODE)
for idx, val in enumerate(img.getdata()):
alpha = val[3]
width, height = palette_img.size
x,y = divmod(idx, width)
if alpha < sensitivity:
palette_img.putpixel((y,x), trans_loc)
return palette_img.convert(RGBA_MODE)
for hue in range(0, 360, args.hue_rate):
hsv_string = "hsv({hue},100%,100%)".format(hue=hue)
im = Image.new(RGBA_MODE, base_image.size, hsv_string)
blended = ImageChops.blend(base_image, im, args.blend_amount)
composited = ImageChops.composite(blended, base_image, base_image)
images.append(composited)
if args.pdb:
import pdb; pdb.set_trace()
gif_encoder_args = {
"duration": args.duration,
"loop": 0,
"optimize": args.optimize
}
transparency_loc = get_transparency_palette_loc(base_image)
if DEBUG:
print(f"DEBUG - transparency_loc was {transparency_loc}")
if transparency_loc is not None and not args.disable_transparency:
images = [make_all_transparent_into_same_pallete(x, transparency_loc) for x in images]
gif_encoder_args["transparency"] = transparency_loc
print(f"INFO - Printing to {args.output_file}")
images[0].save(args.output_file,
save_all=True,
append_images=images[1:],
**gif_encoder_args)
print("Job's done") | 0.425009 | 0.113162 |
import numpy as np
from functions import *
import sys
import multiprocessing as mp
import datetime
import logging
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', default=1, type=int, help='Model number.')
parser.add_argument('-K', '--K', default=3, type=int, help='Model number.')
parser.add_argument('-np','--nproc', default=12, type=int, help='Number of processes to run in parallel.')
parser.add_argument('-r' ,'--reps', default=20, type=int, help='Number of replications per sample size.')
parser.add_argument('-mi','--maxit', default=2000, type=int, help='Maximum EM iterations.')
parser.add_argument('-e', '--eps', default=1e-8, type=float, help='EM stopping criterion.')
args = parser.parse_args()
print(args)
model = args.model # Model number
n_proc = args.nproc # Number of cores to use
max_iter = args.maxit # Maximum EM iterations
eps = args.eps # EM Stopping criterion.
num_init = 5 # Number of EM initializations
reps = args.reps # Number of replications to run per sample size
K = args.K
exec(open("models.py").read())
logging.basicConfig(filename='std_mod' + str(model) + '_K' + str(K) + '.log', filemode='w', format='%(asctime)s %(message)s')
print(ns)
print("Chose Model " + str(model))
print(model)
def sample(n):
""" Sample from the mixture. """
theta, sigma, pi = get_params(n)
return sample_mixture(theta, sigma, pi, n)
def init_params(n, K):
""" Starting values for EM algorithm. """
theta0, sigma0, pi0 = get_params(n)
theta_start = np.empty([K,d])
sigma_start = np.empty([K,d,d])
pi_start = np.empty([K])
inds = range(K0)
# Make a partition of starting values near the true components.
while True:
s_inds = np.random.choice(inds, size=K)
unique,counts = np.unique(s_inds, return_counts=True)
if unique.size==K0:
break
for i in range(K):
if mix_type == "weak":
theta_start[i,:] = theta0[s_inds[i],:] + np.random.normal(0, 0.005*n**(-0.083), size=d).reshape((1,d))
else:
theta_start[i,:] = theta0[s_inds[i],:] + np.random.normal(0, n**(-0.25), size=d).reshape((1,d))
if mix_type == "weak":
sigma_start[i,:,:] = sigma0[s_inds[i],:,:] + np.diag(np.abs(np.random.normal(0, 0.0005*n**(-0.25), size=d)))
else:
sigma_start[i,:,:] = sigma0[s_inds[i],:,:]
pi_start[i] = pi0[s_inds[i]]/counts[s_inds[i]]
return (theta_start, sigma_start, pi_start)
def process_chunk(bound):
""" Run EM on a range of sample sizes. """
ind_low = bound[0]
ind_high= bound[1]
m = ind_high - ind_low
seed_ctr = 2000 * ind_low # Random seed
chunk_theta = np.empty((m, reps, K,d))
chunk_sigma = np.empty((m, reps, K,d,d))
chunk_pi = np.empty((m, reps, K))
chunk_iters = np.empty((m, reps))
run_out = np.empty((num_init, 5))
for i in range(ind_low, ind_high):
n = int(ns[i])
xi = get_xi(ns[i])
for rep in range(reps):
np.random.seed(seed_ctr)
X = sample(n)
np.random.seed(seed_ctr+1)
theta_start, sigma_start, pi_start = init_params(n,K)
out = em(X, theta_start, sigma_start, pi_start, max_iter=max_iter, eps=eps, mix_type=mix_type, xi=xi)
logging.warning('Model ' + str(model) + ', rep:' + str(rep) + ', n:' + str(n) + ", nind:" + str(i) + ", iters:" + str(out[-1]))
chunk_theta[i-ind_low, rep, :, :] = out[0]
chunk_pi[i-ind_low, rep, :] = out[2]
chunk_sigma[i-ind_low, rep, :, :, :] = out[1]
chunk_iters[i-ind_low, rep] = out[3]
seed_ctr += 1
return (chunk_theta, chunk_sigma, chunk_pi, chunk_iters)
proc_chunks = []
Del = n_num // n_proc
for i in range(n_proc):
if i == n_proc-1:
proc_chunks.append(( (n_proc-1) * Del, n_num) )
else:
proc_chunks.append(( (i*Del, (i+1)*Del ) ))
if n_proc == 12:
proc_chunks = [(0, 25), (25, 40), (40, 50), (50, 60), (60, 67), (67, 75), (75, 80), (80, 85), (85, 90), (90, 94), (94, 97), (97, 100)]
else:
proc_chunks = [(0, 12), (12, 20), (20, 25), (25, 30), (30, 35), (35, 39), (39, 42), (42, 45)]
with mp.Pool(processes=n_proc) as pool:
proc_results = [pool.apply_async(process_chunk,
args=(chunk,))
for chunk in proc_chunks]
result_chunks = [r.get() for r in proc_results]
done_theta = np.concatenate([result_chunks[j][0] for j in range(n_proc)], axis=0)
done_sigma = np.concatenate([result_chunks[j][1] for j in range(n_proc)], axis=0)
done_pi = np.concatenate([result_chunks[j][2] for j in range(n_proc)], axis=0)
done_iters = np.concatenate([result_chunks[j][3] for j in range(n_proc)], axis=0)
np.save("results/result_model" + str(model) +"_K" + str(K) + "_theta.npy", done_theta)
np.save("results/result_model" + str(model) +"_K" + str(K) + "_pi.npy", done_pi)
np.save("results/result_model" + str(model) +"_K" + str(K) + "_iters.npy", done_iters)
if mix_type != "si":
np.save("results/result_model" + str(model) +"_K" + str(K) + "_sigma.npy", done_sigma) | experiment.py | import numpy as np
from functions import *
import sys
import multiprocessing as mp
import datetime
import logging
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', default=1, type=int, help='Model number.')
parser.add_argument('-K', '--K', default=3, type=int, help='Model number.')
parser.add_argument('-np','--nproc', default=12, type=int, help='Number of processes to run in parallel.')
parser.add_argument('-r' ,'--reps', default=20, type=int, help='Number of replications per sample size.')
parser.add_argument('-mi','--maxit', default=2000, type=int, help='Maximum EM iterations.')
parser.add_argument('-e', '--eps', default=1e-8, type=float, help='EM stopping criterion.')
args = parser.parse_args()
print(args)
model = args.model # Model number
n_proc = args.nproc # Number of cores to use
max_iter = args.maxit # Maximum EM iterations
eps = args.eps # EM Stopping criterion.
num_init = 5 # Number of EM initializations
reps = args.reps # Number of replications to run per sample size
K = args.K
exec(open("models.py").read())
logging.basicConfig(filename='std_mod' + str(model) + '_K' + str(K) + '.log', filemode='w', format='%(asctime)s %(message)s')
print(ns)
print("Chose Model " + str(model))
print(model)
def sample(n):
""" Sample from the mixture. """
theta, sigma, pi = get_params(n)
return sample_mixture(theta, sigma, pi, n)
def init_params(n, K):
""" Starting values for EM algorithm. """
theta0, sigma0, pi0 = get_params(n)
theta_start = np.empty([K,d])
sigma_start = np.empty([K,d,d])
pi_start = np.empty([K])
inds = range(K0)
# Make a partition of starting values near the true components.
while True:
s_inds = np.random.choice(inds, size=K)
unique,counts = np.unique(s_inds, return_counts=True)
if unique.size==K0:
break
for i in range(K):
if mix_type == "weak":
theta_start[i,:] = theta0[s_inds[i],:] + np.random.normal(0, 0.005*n**(-0.083), size=d).reshape((1,d))
else:
theta_start[i,:] = theta0[s_inds[i],:] + np.random.normal(0, n**(-0.25), size=d).reshape((1,d))
if mix_type == "weak":
sigma_start[i,:,:] = sigma0[s_inds[i],:,:] + np.diag(np.abs(np.random.normal(0, 0.0005*n**(-0.25), size=d)))
else:
sigma_start[i,:,:] = sigma0[s_inds[i],:,:]
pi_start[i] = pi0[s_inds[i]]/counts[s_inds[i]]
return (theta_start, sigma_start, pi_start)
def process_chunk(bound):
""" Run EM on a range of sample sizes. """
ind_low = bound[0]
ind_high= bound[1]
m = ind_high - ind_low
seed_ctr = 2000 * ind_low # Random seed
chunk_theta = np.empty((m, reps, K,d))
chunk_sigma = np.empty((m, reps, K,d,d))
chunk_pi = np.empty((m, reps, K))
chunk_iters = np.empty((m, reps))
run_out = np.empty((num_init, 5))
for i in range(ind_low, ind_high):
n = int(ns[i])
xi = get_xi(ns[i])
for rep in range(reps):
np.random.seed(seed_ctr)
X = sample(n)
np.random.seed(seed_ctr+1)
theta_start, sigma_start, pi_start = init_params(n,K)
out = em(X, theta_start, sigma_start, pi_start, max_iter=max_iter, eps=eps, mix_type=mix_type, xi=xi)
logging.warning('Model ' + str(model) + ', rep:' + str(rep) + ', n:' + str(n) + ", nind:" + str(i) + ", iters:" + str(out[-1]))
chunk_theta[i-ind_low, rep, :, :] = out[0]
chunk_pi[i-ind_low, rep, :] = out[2]
chunk_sigma[i-ind_low, rep, :, :, :] = out[1]
chunk_iters[i-ind_low, rep] = out[3]
seed_ctr += 1
return (chunk_theta, chunk_sigma, chunk_pi, chunk_iters)
proc_chunks = []
Del = n_num // n_proc
for i in range(n_proc):
if i == n_proc-1:
proc_chunks.append(( (n_proc-1) * Del, n_num) )
else:
proc_chunks.append(( (i*Del, (i+1)*Del ) ))
if n_proc == 12:
proc_chunks = [(0, 25), (25, 40), (40, 50), (50, 60), (60, 67), (67, 75), (75, 80), (80, 85), (85, 90), (90, 94), (94, 97), (97, 100)]
else:
proc_chunks = [(0, 12), (12, 20), (20, 25), (25, 30), (30, 35), (35, 39), (39, 42), (42, 45)]
with mp.Pool(processes=n_proc) as pool:
proc_results = [pool.apply_async(process_chunk,
args=(chunk,))
for chunk in proc_chunks]
result_chunks = [r.get() for r in proc_results]
done_theta = np.concatenate([result_chunks[j][0] for j in range(n_proc)], axis=0)
done_sigma = np.concatenate([result_chunks[j][1] for j in range(n_proc)], axis=0)
done_pi = np.concatenate([result_chunks[j][2] for j in range(n_proc)], axis=0)
done_iters = np.concatenate([result_chunks[j][3] for j in range(n_proc)], axis=0)
np.save("results/result_model" + str(model) +"_K" + str(K) + "_theta.npy", done_theta)
np.save("results/result_model" + str(model) +"_K" + str(K) + "_pi.npy", done_pi)
np.save("results/result_model" + str(model) +"_K" + str(K) + "_iters.npy", done_iters)
if mix_type != "si":
np.save("results/result_model" + str(model) +"_K" + str(K) + "_sigma.npy", done_sigma) | 0.341583 | 0.151624 |
from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from rest_framework.schemas import get_schema_view
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
from drive.documents.urls import documents_router
from drive.users.urls import users_router
API_PREFIX = 'api/v<version>/'
urlpatterns = [
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path(API_PREFIX, include(users_router.urls)),
path(API_PREFIX, include(documents_router.urls)),
path('openapi', get_schema_view(
title="Drive API",
description="Upload Documents and Metadata",
version="1.0.0"
), name='openapi-schema'),
path('docs/', TemplateView.as_view(
template_name='swagger-ui.html',
extra_context={'schema_url': 'openapi-schema'}
), name='swagger-ui'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns | config/urls.py | from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from rest_framework.schemas import get_schema_view
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
from drive.documents.urls import documents_router
from drive.users.urls import users_router
API_PREFIX = 'api/v<version>/'
urlpatterns = [
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path(API_PREFIX, include(users_router.urls)),
path(API_PREFIX, include(documents_router.urls)),
path('openapi', get_schema_view(
title="Drive API",
description="Upload Documents and Metadata",
version="1.0.0"
), name='openapi-schema'),
path('docs/', TemplateView.as_view(
template_name='swagger-ui.html',
extra_context={'schema_url': 'openapi-schema'}
), name='swagger-ui'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns | 0.392337 | 0.059047 |
import os
import shutil
import math
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import numpy as np
import gc
from typing import Tuple
from sklearn import preprocessing
from sklearn.utils import shuffle
import datetime
import time
from src.data_process.config_paths import DataPathsManager
from src.training.training_config import TrainingSetup, TrainingParams
from src.data_process.spectrogram_augmenter import noise_overlay, mask_spectrogram
from src.training.LrTweaker import LrTweaker
def calculate_eta(curr_step: int, total_steps: int, start_time: float) -> str:
"""
Calculate the time left for the process.
:param curr_step: Current step
:param total_steps: Total steps
:param start_time: Start time
:return: Time left for the training as string.
"""
if curr_step == 0:
return "ETA: --:--:--"
time_left = (total_steps - curr_step) * (time.time() - start_time) / curr_step
return str(datetime.timedelta(seconds=time_left))
def prepare_output_dirs(
model_path: str,
training_log_path: str,
training_name: str,
overwrite_previous: bool,
) -> None:
"""
Prepare the output directories for the training.
:param model_path: Path to the model
:param training_log_path: Path to the training log
:param training_name: Name of the training
:param overwrite_previous: Overwrite previous training
:return:
"""
if os.path.exists(f"{model_path}{training_name}"):
if overwrite_previous:
print("WARNING: Model with the same name already exists. Overwriting it...")
shutil.rmtree(f"{model_path}{training_name}")
else:
print("ERROR: Model with the same name already exists. Skipping...")
print("INFO: To overwrite the models, use the overwrite_previous flag.")
return
if os.path.exists(f"{training_log_path}{training_name}"):
if overwrite_previous:
print(
"WARNING: Logs with the same name already exists. Overwriting them..."
)
shutil.rmtree(f"{training_log_path}{training_name}")
else:
print("ERROR: Logs with the same name already exists. Skipping...")
print("INFO: To overwrite the logs, use the overwrite_previous flag.")
return
def augment_data(spectrogram: np.ndarray) -> list:
"""
Augment the given data.
:param spectrogram: Data to augment
:return: Augmented data
"""
result = [
mask_spectrogram(spectrogram, n_freq_masks=1, n_time_masks=0),
# mask_spectrogram(spectrogram, n_freq_masks=3, n_time_masks=0),
noise_overlay(spectrogram, noise_pct=0.7, noise_amt=0.05),
]
return result
def load_data(
training_config: TrainingSetup,
metadata: pd.DataFrame,
path: str,
augment: bool = False,
) -> Tuple[list, list]:
"""
Load the data from the given path.
:param training_config: Configuration for the training
:param metadata: Metadata of the data
:param path: Path to the data
:param augment: Augment loaded data
:return:
"""
result = []
result_labels = []
for row in metadata.iterrows():
# TODO: Move validation of data to separate module
try:
# Load the data
data = np.load(os.path.join(path, f"{row[1]['track_id']}.npy"))
# Check if the data is long enough
if data.shape[1] > training_config.p.input_w:
result.append(data)
result_labels.append(row[1]["genre_top"])
if augment:
aug_data = augment_data(data)
result.extend(aug_data)
# Add the same label for each augmentation
result_labels.extend([row[1]["genre_top"]] * len(aug_data))
except (FileNotFoundError, ValueError):
pass
return result, result_labels
def prepare_data(
training_config: TrainingSetup, org_data: list, labels: list
) -> Tuple[np.ndarray, np.ndarray]:
"""
Prepare the data for the training. Each data is subarray of the spectrogram of given length.
:param training_config: Configuration for the training
:param org_data: Original data
:param labels: Labels for the data
:return:
"""
input_data = []
for data in org_data:
# Find starting point for each data
starting_index = np.random.randint(0, data.shape[1] - training_config.p.input_w)
input_data.append(
data[:, starting_index : starting_index + training_config.p.input_w]
)
input_data = np.stack(input_data)
input_label = np.array(labels)
return input_data, input_label
def run_training(
training_name: str,
training_metadata: pd.DataFrame,
training_path: str,
validation_metadata: pd.DataFrame,
validation_path: str,
test_metadata: pd.DataFrame,
test_path: str,
data_paths: DataPathsManager,
augment: bool,
overwrite_previous: bool = False,
) -> None:
"""
Run the training.
:param training_name: Name of the training
:param training_metadata: Metadata of the training data
:param training_path: Path to the training data
:param validation_metadata: Metadata of the validation data
:param validation_path: Path to the validation data
:param test_metadata: Metadata of the test data
:param test_path: Path to the test data
:param data_paths: Paths to the data
:param augment: Augment the data
:param overwrite_previous: Overwrite previous training
:return:
"""
training_config = TrainingSetup(TrainingParams())
# Setup callbacks
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=f"{data_paths.training_log_path}{training_name}", update_freq="epoch"
)
# Learning rate tweaker which decreases the learning rate if loss is not decreasing
lr_tweaker = LrTweaker(
training_config,
patience=training_config.p.learning_rate_patience,
decrease_multiplier=training_config.p.learning_rate_decrease_multiplier,
min_lr=training_config.p.learning_rate_min,
)
# Dummy ReduceLROnPlateau which is bugged and doesn't work, but is good to display learning rate with verbose
dummy_lr = tf.keras.callbacks.ReduceLROnPlateau(
monitor="loss",
factor=training_config.p.learning_rate_decrease_multiplier,
patience=training_config.p.learning_rate_patience,
min_lr=training_config.p.learning_rate_min,
)
prepare_output_dirs(
data_paths.training_model_path,
data_paths.training_log_path,
training_name,
overwrite_previous,
)
# Save initial checkpoint of training
training_config.save(training_name, "init", data_paths.training_model_path)
training_config.model.compile(
optimizer=training_config.optimizer,
loss=training_config.loss,
metrics=["accuracy"],
)
# Load all training and validation data into memory
train_data, train_label = load_data(
training_config, training_metadata, training_path, augment
)
val_data, val_label = load_data(
training_config, validation_metadata, validation_path
)
# Change labels from string to int
train_label = training_config.label_encoder.fit_transform(train_label)
val_label = training_config.label_encoder.fit_transform(val_label)
# Epoch ETA estimator
training_start_time = time.time()
# Collect garbage to avoid memory leak
gc.collect()
# Every epoch has own data
for epoch_id in range(training_config.p.starting_epoch, training_config.p.epochs):
eta = calculate_eta(epoch_id, training_config.p.epochs, training_start_time)
print(f"Epoch: {epoch_id}/{training_config.p.epochs}. ETA: {eta}")
# Get subarrays for training and validation
input_data, input_label = prepare_data(training_config, train_data, train_label)
val_input_data, val_input_label = prepare_data(
training_config, val_data, val_label
)
# Shuffle the data
input_data, input_label = shuffle(input_data, input_label)
val_input_data, val_input_label = shuffle(val_input_data, val_input_label)
# Split data to parts of equal size
if input_data.shape[0] > training_config.p.patch_size:
input_data = np.array_split(
input_data,
math.ceil(input_data.shape[0] / training_config.p.patch_size),
)
input_label = np.array_split(
input_label,
math.ceil(input_label.shape[0] / training_config.p.patch_size),
)
else:
input_data = [input_data]
input_label = [input_label]
fits_per_epoch: int = len(input_data)
# For each part of data, run models training
for i in range(fits_per_epoch):
epoch_story = training_config.model.fit(
input_data[i],
input_label[i],
initial_epoch=epoch_id,
epochs=epoch_id + 1,
batch_size=training_config.p.batch_size,
validation_data=(val_input_data, val_input_label),
shuffle=True,
callbacks=[tensorboard_callback, dummy_lr],
)
# Tweak model's learning rate
lr_tweaker.on_epoch_end(epoch_story.history["loss"][0])
# Clear gpu session
tf.keras.backend.clear_session()
# Collect garbage to avoid memory leak
gc.collect()
# Save models after each epoch
training_config.save(
training_name, f"{epoch_id}", data_paths.training_model_path
)
# Test model on training data(debug only)
# tm.test_model_training(
# training_name,
# training_config,
# data_paths,
# test_metadata,
# test_path,
# epoch_id,
# ) | src/training/training_manager.py | import os
import shutil
import math
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import numpy as np
import gc
from typing import Tuple
from sklearn import preprocessing
from sklearn.utils import shuffle
import datetime
import time
from src.data_process.config_paths import DataPathsManager
from src.training.training_config import TrainingSetup, TrainingParams
from src.data_process.spectrogram_augmenter import noise_overlay, mask_spectrogram
from src.training.LrTweaker import LrTweaker
def calculate_eta(curr_step: int, total_steps: int, start_time: float) -> str:
"""
Calculate the time left for the process.
:param curr_step: Current step
:param total_steps: Total steps
:param start_time: Start time
:return: Time left for the training as string.
"""
if curr_step == 0:
return "ETA: --:--:--"
time_left = (total_steps - curr_step) * (time.time() - start_time) / curr_step
return str(datetime.timedelta(seconds=time_left))
def prepare_output_dirs(
model_path: str,
training_log_path: str,
training_name: str,
overwrite_previous: bool,
) -> None:
"""
Prepare the output directories for the training.
:param model_path: Path to the model
:param training_log_path: Path to the training log
:param training_name: Name of the training
:param overwrite_previous: Overwrite previous training
:return:
"""
if os.path.exists(f"{model_path}{training_name}"):
if overwrite_previous:
print("WARNING: Model with the same name already exists. Overwriting it...")
shutil.rmtree(f"{model_path}{training_name}")
else:
print("ERROR: Model with the same name already exists. Skipping...")
print("INFO: To overwrite the models, use the overwrite_previous flag.")
return
if os.path.exists(f"{training_log_path}{training_name}"):
if overwrite_previous:
print(
"WARNING: Logs with the same name already exists. Overwriting them..."
)
shutil.rmtree(f"{training_log_path}{training_name}")
else:
print("ERROR: Logs with the same name already exists. Skipping...")
print("INFO: To overwrite the logs, use the overwrite_previous flag.")
return
def augment_data(spectrogram: np.ndarray) -> list:
"""
Augment the given data.
:param spectrogram: Data to augment
:return: Augmented data
"""
result = [
mask_spectrogram(spectrogram, n_freq_masks=1, n_time_masks=0),
# mask_spectrogram(spectrogram, n_freq_masks=3, n_time_masks=0),
noise_overlay(spectrogram, noise_pct=0.7, noise_amt=0.05),
]
return result
def load_data(
training_config: TrainingSetup,
metadata: pd.DataFrame,
path: str,
augment: bool = False,
) -> Tuple[list, list]:
"""
Load the data from the given path.
:param training_config: Configuration for the training
:param metadata: Metadata of the data
:param path: Path to the data
:param augment: Augment loaded data
:return:
"""
result = []
result_labels = []
for row in metadata.iterrows():
# TODO: Move validation of data to separate module
try:
# Load the data
data = np.load(os.path.join(path, f"{row[1]['track_id']}.npy"))
# Check if the data is long enough
if data.shape[1] > training_config.p.input_w:
result.append(data)
result_labels.append(row[1]["genre_top"])
if augment:
aug_data = augment_data(data)
result.extend(aug_data)
# Add the same label for each augmentation
result_labels.extend([row[1]["genre_top"]] * len(aug_data))
except (FileNotFoundError, ValueError):
pass
return result, result_labels
def prepare_data(
training_config: TrainingSetup, org_data: list, labels: list
) -> Tuple[np.ndarray, np.ndarray]:
"""
Prepare the data for the training. Each data is subarray of the spectrogram of given length.
:param training_config: Configuration for the training
:param org_data: Original data
:param labels: Labels for the data
:return:
"""
input_data = []
for data in org_data:
# Find starting point for each data
starting_index = np.random.randint(0, data.shape[1] - training_config.p.input_w)
input_data.append(
data[:, starting_index : starting_index + training_config.p.input_w]
)
input_data = np.stack(input_data)
input_label = np.array(labels)
return input_data, input_label
def run_training(
training_name: str,
training_metadata: pd.DataFrame,
training_path: str,
validation_metadata: pd.DataFrame,
validation_path: str,
test_metadata: pd.DataFrame,
test_path: str,
data_paths: DataPathsManager,
augment: bool,
overwrite_previous: bool = False,
) -> None:
"""
Run the training.
:param training_name: Name of the training
:param training_metadata: Metadata of the training data
:param training_path: Path to the training data
:param validation_metadata: Metadata of the validation data
:param validation_path: Path to the validation data
:param test_metadata: Metadata of the test data
:param test_path: Path to the test data
:param data_paths: Paths to the data
:param augment: Augment the data
:param overwrite_previous: Overwrite previous training
:return:
"""
training_config = TrainingSetup(TrainingParams())
# Setup callbacks
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=f"{data_paths.training_log_path}{training_name}", update_freq="epoch"
)
# Learning rate tweaker which decreases the learning rate if loss is not decreasing
lr_tweaker = LrTweaker(
training_config,
patience=training_config.p.learning_rate_patience,
decrease_multiplier=training_config.p.learning_rate_decrease_multiplier,
min_lr=training_config.p.learning_rate_min,
)
# Dummy ReduceLROnPlateau which is bugged and doesn't work, but is good to display learning rate with verbose
dummy_lr = tf.keras.callbacks.ReduceLROnPlateau(
monitor="loss",
factor=training_config.p.learning_rate_decrease_multiplier,
patience=training_config.p.learning_rate_patience,
min_lr=training_config.p.learning_rate_min,
)
prepare_output_dirs(
data_paths.training_model_path,
data_paths.training_log_path,
training_name,
overwrite_previous,
)
# Save initial checkpoint of training
training_config.save(training_name, "init", data_paths.training_model_path)
training_config.model.compile(
optimizer=training_config.optimizer,
loss=training_config.loss,
metrics=["accuracy"],
)
# Load all training and validation data into memory
train_data, train_label = load_data(
training_config, training_metadata, training_path, augment
)
val_data, val_label = load_data(
training_config, validation_metadata, validation_path
)
# Change labels from string to int
train_label = training_config.label_encoder.fit_transform(train_label)
val_label = training_config.label_encoder.fit_transform(val_label)
# Epoch ETA estimator
training_start_time = time.time()
# Collect garbage to avoid memory leak
gc.collect()
# Every epoch has own data
for epoch_id in range(training_config.p.starting_epoch, training_config.p.epochs):
eta = calculate_eta(epoch_id, training_config.p.epochs, training_start_time)
print(f"Epoch: {epoch_id}/{training_config.p.epochs}. ETA: {eta}")
# Get subarrays for training and validation
input_data, input_label = prepare_data(training_config, train_data, train_label)
val_input_data, val_input_label = prepare_data(
training_config, val_data, val_label
)
# Shuffle the data
input_data, input_label = shuffle(input_data, input_label)
val_input_data, val_input_label = shuffle(val_input_data, val_input_label)
# Split data to parts of equal size
if input_data.shape[0] > training_config.p.patch_size:
input_data = np.array_split(
input_data,
math.ceil(input_data.shape[0] / training_config.p.patch_size),
)
input_label = np.array_split(
input_label,
math.ceil(input_label.shape[0] / training_config.p.patch_size),
)
else:
input_data = [input_data]
input_label = [input_label]
fits_per_epoch: int = len(input_data)
# For each part of data, run models training
for i in range(fits_per_epoch):
epoch_story = training_config.model.fit(
input_data[i],
input_label[i],
initial_epoch=epoch_id,
epochs=epoch_id + 1,
batch_size=training_config.p.batch_size,
validation_data=(val_input_data, val_input_label),
shuffle=True,
callbacks=[tensorboard_callback, dummy_lr],
)
# Tweak model's learning rate
lr_tweaker.on_epoch_end(epoch_story.history["loss"][0])
# Clear gpu session
tf.keras.backend.clear_session()
# Collect garbage to avoid memory leak
gc.collect()
# Save models after each epoch
training_config.save(
training_name, f"{epoch_id}", data_paths.training_model_path
)
# Test model on training data(debug only)
# tm.test_model_training(
# training_name,
# training_config,
# data_paths,
# test_metadata,
# test_path,
# epoch_id,
# ) | 0.710226 | 0.468304 |
import pandas as pd
import numpy as np
from constants import DESPESAS_COL, ORCAMENTOS_COL
class Importacao:
def __init__(self, despesas_caminho_arq, orcamentos_caminho_arq, mysql_obj):
self.mysql_obj = mysql_obj
despesas = self.criar_despesas_df(despesas_caminho_arq)
orcamentos = self.criar_orcamentos_df(orcamentos_caminho_arq)
self.fato_orcamento_df = self.criar_fato_orcamento_df(despesas, orcamentos)
def importar(self):
self.inserir_tempo()
self.inserir_programa()
self.inserir_acao()
self.inserir_orgao_superior()
self.inserir_orgao_subordinado()
self.inserir_unidade_orcamentaria()
self.inserir_funcao()
self.inserir_subfuncao()
self.inserir_fato_orcamento()
self.inserir_dim_tempo()
self.inserir_ag_orgao_subordinado_ano()
self.inserir_ag_orgao_superior_ano()
self.inserir_ag_orgao_subordinado_programa_ano()
self.inserir_ag_funcao_ano()
self.inserir_ag_programa_ano()
def criar_despesas_df(self, despesas_caminho_arq):
print('Criando DataFrame Despesas')
despesas_list = [pd.read_csv(
caminho,
delimiter=';', decimal=',',
encoding='ANSI',
usecols=list(DESPESAS_COL.values())
) for caminho in despesas_caminho_arq]
despesas = pd.concat(despesas_list)
despesas.columns = list(DESPESAS_COL.keys())
print(f'Despesas: {len(despesas)}')
print(f'Quantidade de NaNs:\n{despesas.isna().sum()}')
print('Trocando NaNs por strings vazias\n')
despesas.fillna('', inplace=True)
return despesas
def criar_orcamentos_df(self, orcamentos_caminho_arq):
print('Criando DataFrame Orçamento\n')
orcamentos = pd.read_csv(
orcamentos_caminho_arq,
delimiter=';', decimal=',',
encoding='ANSI',
dtype={ORCAMENTOS_COL['exercicio']: np.object},
usecols=list(ORCAMENTOS_COL.values())
)
orcamentos.columns = list(ORCAMENTOS_COL.keys())
orcamentos_mensais = self.dividir_orcamento_por_mes(orcamentos)
return orcamentos_mensais
def dividir_orcamento_por_mes(self, orcamento_anual):
"""Multiplica o DataFrame em 12, dividindo o 'valor_orcado' pela mesma quantidade"""
orcamentos_list = []
orcamento_anual['valor_orcado'] = orcamento_anual['valor_orcado'] / 12
for mes in range(12):
orcamento_mensal = orcamento_anual.copy()
orcamento_mensal['ano_mes'] = orcamento_mensal['exercicio'] + '/' + str(mes + 1).zfill(2)
orcamentos_list.append(orcamento_mensal)
orcamentos_mensais = pd.concat(orcamentos_list)
return orcamentos_mensais
def criar_fato_orcamento_df(self, despesas, orcamentos):
print('Fazendo merge dos DataFrames de Despesas e Orçamento\n')
df = despesas.merge(
orcamentos,
on=[
'ano_mes',
'cod_orgao_superior', 'nome_orgao_superior',
'cod_orgao_subordinado', 'nome_orgao_subordinado',
'cod_uni_orc', 'nome_uni_orc',
'cod_funcao', 'nome_funcao',
'cod_subfuncao', 'nome_subfuncao',
'cod_prog_orc', 'nome_prog_orc',
'cod_acao', 'nome_acao'
],
how='outer'
)
return df
def inserir_tempo(self):
print('Inserindo TEMPO')
insert = """INSERT INTO tempo (ano_mes) VALUES (%s)"""
ano_meses = self.fato_orcamento_df['ano_mes'].unique()
print(f'Quantidade: {len(ano_meses)}\n')
for ano_mes in ano_meses:
self.mysql_obj.execute_par_query(insert, (ano_mes,))
def inserir_programa(self):
print('Inserindo PROGRAMA')
insert = """INSERT INTO programa (cod, nome) VALUES (%s, %s)"""
programas = self.fato_orcamento_df[['cod_prog_orc', 'nome_prog_orc']]
programas_unicos = programas.drop_duplicates(subset='cod_prog_orc', keep='first')
print(f'Quantidade: {len(programas_unicos)}\n')
for linha in programas_unicos.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_acao(self):
print('Inserindo ACAO')
insert = """INSERT INTO acao (cod, PROGRAMA_cod, nome) VALUES (%s, %s, %s)"""
acoes = self.fato_orcamento_df[['cod_acao', 'cod_prog_orc', 'nome_acao']]
acoes_unicas = acoes.drop_duplicates(subset='cod_acao', keep='first')
print(f'Quantidade: {len(acoes_unicas)}\n')
for linha in acoes_unicas.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_orgao_superior(self):
print('Inserindo ORGAO_SUPERIOR')
insert = """INSERT INTO orgao_superior (cod, nome) VALUES (%s, %s)"""
orgaos_superiores = self.fato_orcamento_df[['cod_orgao_superior', 'nome_orgao_superior']]
orgaos_superiores_unicos = orgaos_superiores.drop_duplicates(subset='cod_orgao_superior', keep='first')
print(f'Quantidade: {len(orgaos_superiores_unicos)}\n')
for linha in orgaos_superiores_unicos.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_orgao_subordinado(self):
print('Inserindo ORGAO_SUBORDINADO')
insert = """
INSERT INTO
orgao_subordinado (cod, ORGAO_SUPERIOR_cod, nome)
VALUES
(%s, %s, %s)
"""
orgaos_subordinados = self.fato_orcamento_df[['cod_orgao_subordinado',
'cod_orgao_superior',
'nome_orgao_subordinado']]
orgaos_subordinados_unicos = orgaos_subordinados.drop_duplicates(subset='cod_orgao_subordinado', keep='first')
print(f'Quantidade: {len(orgaos_subordinados_unicos)}\n')
for linha in orgaos_subordinados_unicos.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_unidade_orcamentaria(self):
print('Inserindo UNIDADE_ORCAMENTARIA')
insert = """
INSERT INTO
unidade_orcamentaria (cod, ORGAO_SUBORDINADO_cod, nome)
VALUES
(%s, %s, %s)
"""
unidades_orcamentarias = self.fato_orcamento_df[['cod_uni_orc', 'cod_orgao_subordinado', 'nome_uni_orc']]
unidades_orcamentarias_unicas = unidades_orcamentarias.drop_duplicates(subset='cod_uni_orc', keep='first')
print(f'Quantidade: {len(unidades_orcamentarias_unicas)}\n')
for linha in unidades_orcamentarias_unicas.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_funcao(self):
print('Inserindo FUNCAO')
insert = """INSERT INTO funcao (cod, nome) VALUES (%s, %s)"""
funcoes = self.fato_orcamento_df[['cod_funcao', 'nome_funcao']].drop_duplicates(subset='cod_funcao',
keep='first')
print(f'Quantidade: {len(funcoes)}\n')
for linha in funcoes.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_subfuncao(self):
print('Inserindo SUBFUNCAO')
insert = """INSERT INTO subfuncao (cod, FUNCAO_cod, nome) VALUES (%s, %s, %s)"""
subfuncoes = self.fato_orcamento_df[['cod_subfuncao', 'cod_funcao', 'nome_subfuncao']]
subfuncoes_unicas = subfuncoes.drop_duplicates(subset='cod_subfuncao', keep='first')
print(f'Quantidade: {len(subfuncoes_unicas)}\n')
for linha in subfuncoes_unicas.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def get_tempo_id(self, ano_mes):
query = """SELECT id FROM tempo where tempo.ano_mes = '%s'""" % ano_mes
return self.mysql_obj.execute_read_query(query)[0][0]
def inserir_fato_orcamento(self):
print('Inserindo FATO_ORCAMENTO')
insert = """
INSERT INTO
fato_orcamento (
valor_orcado, valor_liquidado, TEMPO_id, ACAO_cod, SUBFUNCAO_cod, UNIDADE_ORCAMENTARIA_cod
)
VALUES
(%s, %s, %s, %s, %s, %s)
"""
fato_df = self.fato_orcamento_df
ano_meses = fato_df['ano_mes'].unique()
ano_meses_dict = {ano_mes: self.get_tempo_id(ano_mes) for ano_mes in ano_meses}
fatos_orcamentos = fato_df.groupby(['ano_mes', 'cod_acao', 'cod_subfuncao', 'cod_uni_orc']).sum()
fatos_orcamentos = fatos_orcamentos.reset_index()
fatos_orcamentos = fatos_orcamentos.fillna(0.0)
print(f'Quantidade: {len(fatos_orcamentos)}\n')
for fato in fatos_orcamentos.itertuples():
self.mysql_obj.execute_par_query(insert, (fato.valor_orcado, fato.valor_liquidado,
ano_meses_dict[fato.ano_mes], fato.cod_acao,
fato.cod_subfuncao, fato.cod_uni_orc)
)
def inserir_dim_tempo(self):
print('Inserindo DIM_TEMPO\n')
insert = """INSERT INTO dim_tempo (ano) VALUES (%s)"""
ano = self.fato_orcamento_df['exercicio'][0]
query = insert % ano
self.mysql_obj.execute_query(query)
def get_dim_tempo_id(self):
ano_mes = self.fato_orcamento_df['exercicio'][0]
query = """SELECT id FROM dim_tempo where dim_tempo.ano = '%s'""" % ano_mes
return self.mysql_obj.execute_read_query(query)[0][0]
def inserir_ag_orgao_subordinado_ano(self):
print('Inserindo AG_ORGAO_SUBORDINADO_ANO')
insert = """
INSERT INTO
ag_orgao_subordinado_ano (valor_orcado, valor_liquidado, DIM_TEMPO_id, ORGAO_SUBORDINADO_cod)
VALUES
(%s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_orgao_subordinado_ano = self.fato_orcamento_df.groupby(['cod_orgao_subordinado']).sum().reset_index()
ag_orgao_subordinado_ano = ag_orgao_subordinado_ano.fillna(0.0)
print(f'Quantidade: {len(ag_orgao_subordinado_ano)}\n')
for ag in ag_orgao_subordinado_ano.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ano_id, ag.cod_orgao_subordinado)
)
def inserir_ag_orgao_superior_ano(self):
print('Inserindo AG_ORGAO_SUPERIOR_ANO')
insert = """
INSERT INTO
ag_orgao_superior_ano (valor_orcado, valor_liquidado, DIM_TEMPO_id, ORGAO_SUPERIOR_cod)
VALUES
(%s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_orgao_superior_ano = self.fato_orcamento_df.groupby(['cod_orgao_superior']).sum().reset_index()
ag_orgao_superior_ano = ag_orgao_superior_ano.fillna(0.0)
print(f'Quantidade: {len(ag_orgao_superior_ano)}\n')
for ag in ag_orgao_superior_ano.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ano_id, ag.cod_orgao_superior)
)
def inserir_ag_orgao_subordinado_programa_ano(self):
print('Inserindo AG_ORGAO_SUBORDINADO_PROGRAMA_ANO')
insert = """
INSERT INTO
ag_orgao_subordinado_programa_ano (
valor_orcado, valor_liquidado, ORGAO_SUBORDINADO_cod, PROGRAMA_cod, DIM_TEMPO_id
)
VALUES
(%s, %s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_orgao_programa = self.fato_orcamento_df.groupby(['cod_orgao_subordinado', 'cod_prog_orc']).sum()
ag_orgao_programa = ag_orgao_programa.reset_index()
ag_orgao_programa = ag_orgao_programa.fillna(0.0)
print(f'Quantidade: {len(ag_orgao_programa)}\n')
for ag in ag_orgao_programa.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ag.cod_orgao_subordinado, ag.cod_prog_orc,
ano_id,)
)
def inserir_ag_funcao_ano(self):
print('Inserindo AG_FUNCAO_ANO')
insert = """
INSERT INTO
ag_funcao_ano (valor_orcado, valor_liquidado, FUNCAO_cod, DIM_TEMPO_id)
VALUES
(%s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_funcao = self.fato_orcamento_df.groupby(['cod_funcao']).sum().reset_index()
ag_funcao = ag_funcao.fillna(0.0)
print(f'Quantidade: {len(ag_funcao)}\n')
for ag in ag_funcao.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ag.cod_funcao, ano_id)
)
def inserir_ag_programa_ano(self):
print('Inserindo AG_PROGRAMA_ANO')
insert = """
INSERT INTO
ag_programa_ano (valor_orcado, valor_liquidado, PROGRAMA_cod, DIM_TEMPO_id)
VALUES
(%s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_programa = self.fato_orcamento_df.groupby(['cod_prog_orc']).sum().reset_index()
ag_programa = ag_programa.fillna(0.0)
print(f'Quantidade: {len(ag_programa)}\n')
for ag in ag_programa.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ag.cod_prog_orc, ano_id)
) | src/importacao.py | import pandas as pd
import numpy as np
from constants import DESPESAS_COL, ORCAMENTOS_COL
class Importacao:
def __init__(self, despesas_caminho_arq, orcamentos_caminho_arq, mysql_obj):
self.mysql_obj = mysql_obj
despesas = self.criar_despesas_df(despesas_caminho_arq)
orcamentos = self.criar_orcamentos_df(orcamentos_caminho_arq)
self.fato_orcamento_df = self.criar_fato_orcamento_df(despesas, orcamentos)
def importar(self):
self.inserir_tempo()
self.inserir_programa()
self.inserir_acao()
self.inserir_orgao_superior()
self.inserir_orgao_subordinado()
self.inserir_unidade_orcamentaria()
self.inserir_funcao()
self.inserir_subfuncao()
self.inserir_fato_orcamento()
self.inserir_dim_tempo()
self.inserir_ag_orgao_subordinado_ano()
self.inserir_ag_orgao_superior_ano()
self.inserir_ag_orgao_subordinado_programa_ano()
self.inserir_ag_funcao_ano()
self.inserir_ag_programa_ano()
def criar_despesas_df(self, despesas_caminho_arq):
print('Criando DataFrame Despesas')
despesas_list = [pd.read_csv(
caminho,
delimiter=';', decimal=',',
encoding='ANSI',
usecols=list(DESPESAS_COL.values())
) for caminho in despesas_caminho_arq]
despesas = pd.concat(despesas_list)
despesas.columns = list(DESPESAS_COL.keys())
print(f'Despesas: {len(despesas)}')
print(f'Quantidade de NaNs:\n{despesas.isna().sum()}')
print('Trocando NaNs por strings vazias\n')
despesas.fillna('', inplace=True)
return despesas
def criar_orcamentos_df(self, orcamentos_caminho_arq):
print('Criando DataFrame Orçamento\n')
orcamentos = pd.read_csv(
orcamentos_caminho_arq,
delimiter=';', decimal=',',
encoding='ANSI',
dtype={ORCAMENTOS_COL['exercicio']: np.object},
usecols=list(ORCAMENTOS_COL.values())
)
orcamentos.columns = list(ORCAMENTOS_COL.keys())
orcamentos_mensais = self.dividir_orcamento_por_mes(orcamentos)
return orcamentos_mensais
def dividir_orcamento_por_mes(self, orcamento_anual):
"""Multiplica o DataFrame em 12, dividindo o 'valor_orcado' pela mesma quantidade"""
orcamentos_list = []
orcamento_anual['valor_orcado'] = orcamento_anual['valor_orcado'] / 12
for mes in range(12):
orcamento_mensal = orcamento_anual.copy()
orcamento_mensal['ano_mes'] = orcamento_mensal['exercicio'] + '/' + str(mes + 1).zfill(2)
orcamentos_list.append(orcamento_mensal)
orcamentos_mensais = pd.concat(orcamentos_list)
return orcamentos_mensais
def criar_fato_orcamento_df(self, despesas, orcamentos):
print('Fazendo merge dos DataFrames de Despesas e Orçamento\n')
df = despesas.merge(
orcamentos,
on=[
'ano_mes',
'cod_orgao_superior', 'nome_orgao_superior',
'cod_orgao_subordinado', 'nome_orgao_subordinado',
'cod_uni_orc', 'nome_uni_orc',
'cod_funcao', 'nome_funcao',
'cod_subfuncao', 'nome_subfuncao',
'cod_prog_orc', 'nome_prog_orc',
'cod_acao', 'nome_acao'
],
how='outer'
)
return df
def inserir_tempo(self):
print('Inserindo TEMPO')
insert = """INSERT INTO tempo (ano_mes) VALUES (%s)"""
ano_meses = self.fato_orcamento_df['ano_mes'].unique()
print(f'Quantidade: {len(ano_meses)}\n')
for ano_mes in ano_meses:
self.mysql_obj.execute_par_query(insert, (ano_mes,))
def inserir_programa(self):
print('Inserindo PROGRAMA')
insert = """INSERT INTO programa (cod, nome) VALUES (%s, %s)"""
programas = self.fato_orcamento_df[['cod_prog_orc', 'nome_prog_orc']]
programas_unicos = programas.drop_duplicates(subset='cod_prog_orc', keep='first')
print(f'Quantidade: {len(programas_unicos)}\n')
for linha in programas_unicos.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_acao(self):
print('Inserindo ACAO')
insert = """INSERT INTO acao (cod, PROGRAMA_cod, nome) VALUES (%s, %s, %s)"""
acoes = self.fato_orcamento_df[['cod_acao', 'cod_prog_orc', 'nome_acao']]
acoes_unicas = acoes.drop_duplicates(subset='cod_acao', keep='first')
print(f'Quantidade: {len(acoes_unicas)}\n')
for linha in acoes_unicas.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_orgao_superior(self):
print('Inserindo ORGAO_SUPERIOR')
insert = """INSERT INTO orgao_superior (cod, nome) VALUES (%s, %s)"""
orgaos_superiores = self.fato_orcamento_df[['cod_orgao_superior', 'nome_orgao_superior']]
orgaos_superiores_unicos = orgaos_superiores.drop_duplicates(subset='cod_orgao_superior', keep='first')
print(f'Quantidade: {len(orgaos_superiores_unicos)}\n')
for linha in orgaos_superiores_unicos.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_orgao_subordinado(self):
print('Inserindo ORGAO_SUBORDINADO')
insert = """
INSERT INTO
orgao_subordinado (cod, ORGAO_SUPERIOR_cod, nome)
VALUES
(%s, %s, %s)
"""
orgaos_subordinados = self.fato_orcamento_df[['cod_orgao_subordinado',
'cod_orgao_superior',
'nome_orgao_subordinado']]
orgaos_subordinados_unicos = orgaos_subordinados.drop_duplicates(subset='cod_orgao_subordinado', keep='first')
print(f'Quantidade: {len(orgaos_subordinados_unicos)}\n')
for linha in orgaos_subordinados_unicos.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_unidade_orcamentaria(self):
print('Inserindo UNIDADE_ORCAMENTARIA')
insert = """
INSERT INTO
unidade_orcamentaria (cod, ORGAO_SUBORDINADO_cod, nome)
VALUES
(%s, %s, %s)
"""
unidades_orcamentarias = self.fato_orcamento_df[['cod_uni_orc', 'cod_orgao_subordinado', 'nome_uni_orc']]
unidades_orcamentarias_unicas = unidades_orcamentarias.drop_duplicates(subset='cod_uni_orc', keep='first')
print(f'Quantidade: {len(unidades_orcamentarias_unicas)}\n')
for linha in unidades_orcamentarias_unicas.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_funcao(self):
print('Inserindo FUNCAO')
insert = """INSERT INTO funcao (cod, nome) VALUES (%s, %s)"""
funcoes = self.fato_orcamento_df[['cod_funcao', 'nome_funcao']].drop_duplicates(subset='cod_funcao',
keep='first')
print(f'Quantidade: {len(funcoes)}\n')
for linha in funcoes.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def inserir_subfuncao(self):
print('Inserindo SUBFUNCAO')
insert = """INSERT INTO subfuncao (cod, FUNCAO_cod, nome) VALUES (%s, %s, %s)"""
subfuncoes = self.fato_orcamento_df[['cod_subfuncao', 'cod_funcao', 'nome_subfuncao']]
subfuncoes_unicas = subfuncoes.drop_duplicates(subset='cod_subfuncao', keep='first')
print(f'Quantidade: {len(subfuncoes_unicas)}\n')
for linha in subfuncoes_unicas.itertuples(index=None, name=None):
self.mysql_obj.execute_par_query(insert, linha)
def get_tempo_id(self, ano_mes):
query = """SELECT id FROM tempo where tempo.ano_mes = '%s'""" % ano_mes
return self.mysql_obj.execute_read_query(query)[0][0]
def inserir_fato_orcamento(self):
print('Inserindo FATO_ORCAMENTO')
insert = """
INSERT INTO
fato_orcamento (
valor_orcado, valor_liquidado, TEMPO_id, ACAO_cod, SUBFUNCAO_cod, UNIDADE_ORCAMENTARIA_cod
)
VALUES
(%s, %s, %s, %s, %s, %s)
"""
fato_df = self.fato_orcamento_df
ano_meses = fato_df['ano_mes'].unique()
ano_meses_dict = {ano_mes: self.get_tempo_id(ano_mes) for ano_mes in ano_meses}
fatos_orcamentos = fato_df.groupby(['ano_mes', 'cod_acao', 'cod_subfuncao', 'cod_uni_orc']).sum()
fatos_orcamentos = fatos_orcamentos.reset_index()
fatos_orcamentos = fatos_orcamentos.fillna(0.0)
print(f'Quantidade: {len(fatos_orcamentos)}\n')
for fato in fatos_orcamentos.itertuples():
self.mysql_obj.execute_par_query(insert, (fato.valor_orcado, fato.valor_liquidado,
ano_meses_dict[fato.ano_mes], fato.cod_acao,
fato.cod_subfuncao, fato.cod_uni_orc)
)
def inserir_dim_tempo(self):
print('Inserindo DIM_TEMPO\n')
insert = """INSERT INTO dim_tempo (ano) VALUES (%s)"""
ano = self.fato_orcamento_df['exercicio'][0]
query = insert % ano
self.mysql_obj.execute_query(query)
def get_dim_tempo_id(self):
ano_mes = self.fato_orcamento_df['exercicio'][0]
query = """SELECT id FROM dim_tempo where dim_tempo.ano = '%s'""" % ano_mes
return self.mysql_obj.execute_read_query(query)[0][0]
def inserir_ag_orgao_subordinado_ano(self):
print('Inserindo AG_ORGAO_SUBORDINADO_ANO')
insert = """
INSERT INTO
ag_orgao_subordinado_ano (valor_orcado, valor_liquidado, DIM_TEMPO_id, ORGAO_SUBORDINADO_cod)
VALUES
(%s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_orgao_subordinado_ano = self.fato_orcamento_df.groupby(['cod_orgao_subordinado']).sum().reset_index()
ag_orgao_subordinado_ano = ag_orgao_subordinado_ano.fillna(0.0)
print(f'Quantidade: {len(ag_orgao_subordinado_ano)}\n')
for ag in ag_orgao_subordinado_ano.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ano_id, ag.cod_orgao_subordinado)
)
def inserir_ag_orgao_superior_ano(self):
print('Inserindo AG_ORGAO_SUPERIOR_ANO')
insert = """
INSERT INTO
ag_orgao_superior_ano (valor_orcado, valor_liquidado, DIM_TEMPO_id, ORGAO_SUPERIOR_cod)
VALUES
(%s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_orgao_superior_ano = self.fato_orcamento_df.groupby(['cod_orgao_superior']).sum().reset_index()
ag_orgao_superior_ano = ag_orgao_superior_ano.fillna(0.0)
print(f'Quantidade: {len(ag_orgao_superior_ano)}\n')
for ag in ag_orgao_superior_ano.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ano_id, ag.cod_orgao_superior)
)
def inserir_ag_orgao_subordinado_programa_ano(self):
print('Inserindo AG_ORGAO_SUBORDINADO_PROGRAMA_ANO')
insert = """
INSERT INTO
ag_orgao_subordinado_programa_ano (
valor_orcado, valor_liquidado, ORGAO_SUBORDINADO_cod, PROGRAMA_cod, DIM_TEMPO_id
)
VALUES
(%s, %s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_orgao_programa = self.fato_orcamento_df.groupby(['cod_orgao_subordinado', 'cod_prog_orc']).sum()
ag_orgao_programa = ag_orgao_programa.reset_index()
ag_orgao_programa = ag_orgao_programa.fillna(0.0)
print(f'Quantidade: {len(ag_orgao_programa)}\n')
for ag in ag_orgao_programa.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ag.cod_orgao_subordinado, ag.cod_prog_orc,
ano_id,)
)
def inserir_ag_funcao_ano(self):
print('Inserindo AG_FUNCAO_ANO')
insert = """
INSERT INTO
ag_funcao_ano (valor_orcado, valor_liquidado, FUNCAO_cod, DIM_TEMPO_id)
VALUES
(%s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_funcao = self.fato_orcamento_df.groupby(['cod_funcao']).sum().reset_index()
ag_funcao = ag_funcao.fillna(0.0)
print(f'Quantidade: {len(ag_funcao)}\n')
for ag in ag_funcao.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ag.cod_funcao, ano_id)
)
def inserir_ag_programa_ano(self):
print('Inserindo AG_PROGRAMA_ANO')
insert = """
INSERT INTO
ag_programa_ano (valor_orcado, valor_liquidado, PROGRAMA_cod, DIM_TEMPO_id)
VALUES
(%s, %s, %s, %s)
"""
ano_id = self.get_dim_tempo_id()
ag_programa = self.fato_orcamento_df.groupby(['cod_prog_orc']).sum().reset_index()
ag_programa = ag_programa.fillna(0.0)
print(f'Quantidade: {len(ag_programa)}\n')
for ag in ag_programa.itertuples():
self.mysql_obj.execute_par_query(insert, (ag.valor_orcado, ag.valor_liquidado,
ag.cod_prog_orc, ano_id)
) | 0.328529 | 0.164785 |
import utils.epiweek as utils
def get_window(epiweek, left_window, right_window):
"""
generate a time period [epiweek-left_window, epiweek+right_window]
Args:
epiweek - the "central" epiweek for a period
left_window - the length of "left side"
right_window - the length of "right side"
Returns:
A generator of epiweeks within the period
"""
start = utils.add_epiweeks(epiweek, -left_window)
end = utils.add_epiweeks(epiweek, right_window)
return utils.range_epiweeks(start, end, inclusive=True)
def get_start_year(epiweek):
"""
return the starting and ending year of the flu season for an epiweek.
Args:
epiweek - the epiweek for season query
Returns:
the starting year of the season
"""
year, week = utils.split_epiweek(epiweek)
if week <= 20:
return year - 1
elif week >= 40:
return year
def get_period(year, start_week, end_week):
"""
return the corresponding period for a starting year,
starting week, and ending week.
Args:
year - the start year for a season.
start_week - the starting week within the season.
end_week - the ending week within the season.
Returns:
the starting and ending epiweek of the period.
"""
if start_week <= end_week:
if end_week <= 30:
return utils.join_epiweek(year + 1, start_week), \
utils.join_epiweek(year + 1, end_week)
else:
return utils.join_epiweek(year, start_week), \
utils.join_epiweek(year, end_week)
else:
return utils.join_epiweek(year, start_week), \
utils.join_epiweek(year + 1, end_week)
def get_max_window(epiweek):
"""
obtain the maximum window applicable for an epiweek.
Args:
epiweek - the current epiweek.
Returns:
max_window - the maximum window.
"""
start_year = get_start_year(epiweek)
start_week = utils.join_epiweek(start_year, 40)
max_window = utils.delta_epiweeks(start_week, epiweek)
return max_window
def unravel(time_period):
"""
convert string representation of a time period to a epiweek generator.
Args:
time_period - the string representation of time period
Returns:
A generator of epiweeks within the period
"""
return utils.range_epiweeks(time_period[0], time_period[1], inclusive=True) | src/hosp/hosp_utils.py | import utils.epiweek as utils
def get_window(epiweek, left_window, right_window):
"""
generate a time period [epiweek-left_window, epiweek+right_window]
Args:
epiweek - the "central" epiweek for a period
left_window - the length of "left side"
right_window - the length of "right side"
Returns:
A generator of epiweeks within the period
"""
start = utils.add_epiweeks(epiweek, -left_window)
end = utils.add_epiweeks(epiweek, right_window)
return utils.range_epiweeks(start, end, inclusive=True)
def get_start_year(epiweek):
"""
return the starting and ending year of the flu season for an epiweek.
Args:
epiweek - the epiweek for season query
Returns:
the starting year of the season
"""
year, week = utils.split_epiweek(epiweek)
if week <= 20:
return year - 1
elif week >= 40:
return year
def get_period(year, start_week, end_week):
"""
return the corresponding period for a starting year,
starting week, and ending week.
Args:
year - the start year for a season.
start_week - the starting week within the season.
end_week - the ending week within the season.
Returns:
the starting and ending epiweek of the period.
"""
if start_week <= end_week:
if end_week <= 30:
return utils.join_epiweek(year + 1, start_week), \
utils.join_epiweek(year + 1, end_week)
else:
return utils.join_epiweek(year, start_week), \
utils.join_epiweek(year, end_week)
else:
return utils.join_epiweek(year, start_week), \
utils.join_epiweek(year + 1, end_week)
def get_max_window(epiweek):
"""
obtain the maximum window applicable for an epiweek.
Args:
epiweek - the current epiweek.
Returns:
max_window - the maximum window.
"""
start_year = get_start_year(epiweek)
start_week = utils.join_epiweek(start_year, 40)
max_window = utils.delta_epiweeks(start_week, epiweek)
return max_window
def unravel(time_period):
"""
convert string representation of a time period to a epiweek generator.
Args:
time_period - the string representation of time period
Returns:
A generator of epiweeks within the period
"""
return utils.range_epiweeks(time_period[0], time_period[1], inclusive=True) | 0.858318 | 0.664221 |
import math
import numpy as np
import logging
import os
import traceback
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from module.MOptions import MArmTwistOffOptions, MOptionsDataSet
from mmd.PmxData import PmxModel, Bone # noqa
from mmd.VmdData import VmdMotion, VmdBoneFrame, VmdCameraFrame, VmdInfoIk, VmdLightFrame, VmdMorphFrame, VmdShadowFrame, VmdShowIkFrame # noqa
from mmd.VmdWriter import VmdWriter
from module.MParams import BoneLinks # noqa
from module.MMath import MRect, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from utils import MUtils, MServiceUtils, MBezierUtils # noqa
from utils.MLogger import MLogger # noqa
from utils.MException import SizingException, MKilledException
logger = MLogger(__name__, level=1)
RADIANS_01 = math.cos(math.radians(0.1))
RADIANS_05 = math.cos(math.radians(0.5))
RADIANS_1 = math.cos(math.radians(1))
RADIANS_2 = math.cos(math.radians(2))
RADIANS_5 = math.cos(math.radians(5))
RADIANS_8 = math.cos(math.radians(8))
RADIANS_12 = math.cos(math.radians(12))
RADIANS_15 = math.cos(math.radians(15))
class ConvertArmTwistOffService():
def __init__(self, options: MArmTwistOffOptions):
self.options = options
def execute(self):
logging.basicConfig(level=self.options.logging_level, format="%(message)s [%(module_name)s]")
try:
service_data_txt = "捩りOFF変換処理実行\n------------------------\nexeバージョン: {version_name}\n".format(version_name=self.options.version_name) \
service_data_txt = "{service_data_txt} VMD: {vmd}\n".format(service_data_txt=service_data_txt,
vmd=os.path.basename(self.options.motion.path)) # noqa
service_data_txt = "{service_data_txt} モデル: {model}({model_name})\n".format(service_data_txt=service_data_txt,
model=os.path.basename(self.options.motion.path), model_name=self.options.model.name) # noqa
service_data_txt = "{service_data_txt} 不要キー削除: {center_rotation}\n".format(service_data_txt=service_data_txt,
center_rotation=self.options.remove_unnecessary_flg) # noqa
logger.info(service_data_txt, decoration=MLogger.DECORATION_BOX)
# 処理に成功しているか
result = self.convert_twist_off()
# 最後に出力
VmdWriter(MOptionsDataSet(self.options.motion, None, self.options.model, self.options.output_path, False, False, [], None, 0, [])).write()
logger.info("出力終了: %s", os.path.basename(self.options.output_path), decoration=MLogger.DECORATION_BOX, title="成功")
return result
except SizingException as se:
logger.error("捩りOFF変換処理が処理できないデータで終了しました。\n\n%s", se.message, decoration=MLogger.DECORATION_BOX)
except Exception:
logger.critical("捩りOFF変換処理が意図せぬエラーで終了しました。\n\n%s", traceback.format_exc(), decoration=MLogger.DECORATION_BOX)
finally:
logging.shutdown()
# 捩りOFF変換処理実行
def convert_twist_off(self):
futures = []
with ThreadPoolExecutor(thread_name_prefix="twist_off", max_workers=self.options.max_workers) as executor:
futures.append(executor.submit(self.convert_target_twist_off, "右"))
futures.append(executor.submit(self.convert_target_twist_off, "左"))
concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)
for f in futures:
if not f.result():
return False
return True
# 不要キー削除
def remove_unnecessary_bf(self, bone_name: str):
try:
self.options.motion.remove_unnecessary_bf(0, bone_name, self.options.model.bones[bone_name].getRotatable(), \
self.options.model.bones[bone_name].getTranslatable())
return True
except MKilledException as ke:
raise ke
except SizingException as se:
logger.error("捩りOFF変換処理が処理できないデータで終了しました。\n\n%s", se.message, decoration=MLogger.DECORATION_BOX)
return se
except Exception as e:
import traceback
logger.critical("捩りOFF変換処理が意図せぬエラーで終了しました。\n\n%s", traceback.print_exc(), decoration=MLogger.DECORATION_BOX)
raise e
# 1つのボーンに対する捩りOFF変換処理
def convert_target_twist_off(self, direction: str):
motion = self.options.motion
model = self.options.model
bone_name = f"{direction}腕系"
arm_bone_name = f"{direction}腕"
arm_twist_bone_name = f"{direction}腕捩"
elbow_bone_name = f"{direction}ひじ"
wrist_twist_bone_name = f"{direction}手捩"
wrist_bone_name = f"{direction}手首"
finger_bone_name = f"{direction}人指先実体"
finger2_bone_name = f"{direction}小指先実体"
logger.info(f"-- 捩りOFF変換準備:開始【{bone_name}】")
# モデルの手首までのボーンのリンク
finger_links = model.create_link_2_top_one(finger_bone_name, is_defined=False)
finger2_links = model.create_link_2_top_one(finger2_bone_name, is_defined=False)
arm2wrist_links = finger_links.to_links(arm_bone_name)
# 差異の大きい箇所にFKキーフレ追加
fnos = motion.get_differ_fnos(0, list(arm2wrist_links.all().keys()), limit_degrees=20, limit_length=0.5)
# 先に空のキーを登録しておく
prev_sep_fno = 0
for fno in fnos:
for link_name in list(arm2wrist_links.all().keys()):
if link_name in motion.bones:
bf = motion.calc_bf(link_name, fno)
motion.regist_bf(bf, link_name, fno)
if fno // 500 > prev_sep_fno:
logger.count(f"【キーフレ追加 - {bone_name}】", fno, fnos)
prev_sep_fno = fno // 500
logger.info("-- 捩りOFF変換準備:終了【%s】", bone_name)
# 捩りありの状態で一旦保持
org_motion = motion.copy()
prev_sep_fno = 0
for fno in fnos:
# 腕に腕捩りの結果を加算
arm_bf = motion.calc_bf(arm_bone_name, fno)
arm_twist_bf = motion.calc_bf(arm_twist_bone_name, fno)
arm_bf.rotation = arm_bf.rotation * arm_twist_bf.rotation
arm_twist_bf.rotation = MQuaternion()
motion.regist_bf(arm_bf, arm_bone_name, fno)
motion.regist_bf(arm_twist_bf, arm_twist_bone_name, fno)
# 手首に手首捩りの結果を加算
wrist_bf = motion.calc_bf(wrist_bone_name, fno)
wrist_twist_bf = motion.calc_bf(wrist_twist_bone_name, fno)
# 手捩りの方が根元に近いので、先にかけ算
wrist_bf.rotation = wrist_twist_bf.rotation * wrist_bf.rotation
wrist_twist_bf.rotation = MQuaternion()
motion.regist_bf(wrist_bf, wrist_bone_name, fno)
motion.regist_bf(wrist_twist_bf, wrist_twist_bone_name, fno)
if fno // 500 > prev_sep_fno:
logger.count("【捩りOFF変換 - {0}】".format(bone_name), fno, fnos)
prev_sep_fno = fno // 500
# 腕捩ボーンを削除
if arm_twist_bone_name in motion.bones:
del motion.bones[arm_twist_bone_name]
# 手捩ボーンを削除
if wrist_twist_bone_name in motion.bones:
del motion.bones[wrist_twist_bone_name]
if self.options.remove_unnecessary_flg:
futures = []
with ThreadPoolExecutor(thread_name_prefix="remove", max_workers=self.options.max_workers) as executor:
for bone_name in [f"{direction}腕", f"{direction}ひじ", f"{direction}手首"]:
futures.append(executor.submit(self.remove_unnecessary_bf, bone_name))
concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)
for f in futures:
if not f.result():
return False | src/service/ConvertArmTwistOffService.py | import math
import numpy as np
import logging
import os
import traceback
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from module.MOptions import MArmTwistOffOptions, MOptionsDataSet
from mmd.PmxData import PmxModel, Bone # noqa
from mmd.VmdData import VmdMotion, VmdBoneFrame, VmdCameraFrame, VmdInfoIk, VmdLightFrame, VmdMorphFrame, VmdShadowFrame, VmdShowIkFrame # noqa
from mmd.VmdWriter import VmdWriter
from module.MParams import BoneLinks # noqa
from module.MMath import MRect, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from utils import MUtils, MServiceUtils, MBezierUtils # noqa
from utils.MLogger import MLogger # noqa
from utils.MException import SizingException, MKilledException
logger = MLogger(__name__, level=1)
RADIANS_01 = math.cos(math.radians(0.1))
RADIANS_05 = math.cos(math.radians(0.5))
RADIANS_1 = math.cos(math.radians(1))
RADIANS_2 = math.cos(math.radians(2))
RADIANS_5 = math.cos(math.radians(5))
RADIANS_8 = math.cos(math.radians(8))
RADIANS_12 = math.cos(math.radians(12))
RADIANS_15 = math.cos(math.radians(15))
class ConvertArmTwistOffService():
def __init__(self, options: MArmTwistOffOptions):
self.options = options
def execute(self):
logging.basicConfig(level=self.options.logging_level, format="%(message)s [%(module_name)s]")
try:
service_data_txt = "捩りOFF変換処理実行\n------------------------\nexeバージョン: {version_name}\n".format(version_name=self.options.version_name) \
service_data_txt = "{service_data_txt} VMD: {vmd}\n".format(service_data_txt=service_data_txt,
vmd=os.path.basename(self.options.motion.path)) # noqa
service_data_txt = "{service_data_txt} モデル: {model}({model_name})\n".format(service_data_txt=service_data_txt,
model=os.path.basename(self.options.motion.path), model_name=self.options.model.name) # noqa
service_data_txt = "{service_data_txt} 不要キー削除: {center_rotation}\n".format(service_data_txt=service_data_txt,
center_rotation=self.options.remove_unnecessary_flg) # noqa
logger.info(service_data_txt, decoration=MLogger.DECORATION_BOX)
# 処理に成功しているか
result = self.convert_twist_off()
# 最後に出力
VmdWriter(MOptionsDataSet(self.options.motion, None, self.options.model, self.options.output_path, False, False, [], None, 0, [])).write()
logger.info("出力終了: %s", os.path.basename(self.options.output_path), decoration=MLogger.DECORATION_BOX, title="成功")
return result
except SizingException as se:
logger.error("捩りOFF変換処理が処理できないデータで終了しました。\n\n%s", se.message, decoration=MLogger.DECORATION_BOX)
except Exception:
logger.critical("捩りOFF変換処理が意図せぬエラーで終了しました。\n\n%s", traceback.format_exc(), decoration=MLogger.DECORATION_BOX)
finally:
logging.shutdown()
# 捩りOFF変換処理実行
def convert_twist_off(self):
futures = []
with ThreadPoolExecutor(thread_name_prefix="twist_off", max_workers=self.options.max_workers) as executor:
futures.append(executor.submit(self.convert_target_twist_off, "右"))
futures.append(executor.submit(self.convert_target_twist_off, "左"))
concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)
for f in futures:
if not f.result():
return False
return True
# 不要キー削除
def remove_unnecessary_bf(self, bone_name: str):
try:
self.options.motion.remove_unnecessary_bf(0, bone_name, self.options.model.bones[bone_name].getRotatable(), \
self.options.model.bones[bone_name].getTranslatable())
return True
except MKilledException as ke:
raise ke
except SizingException as se:
logger.error("捩りOFF変換処理が処理できないデータで終了しました。\n\n%s", se.message, decoration=MLogger.DECORATION_BOX)
return se
except Exception as e:
import traceback
logger.critical("捩りOFF変換処理が意図せぬエラーで終了しました。\n\n%s", traceback.print_exc(), decoration=MLogger.DECORATION_BOX)
raise e
# 1つのボーンに対する捩りOFF変換処理
def convert_target_twist_off(self, direction: str):
motion = self.options.motion
model = self.options.model
bone_name = f"{direction}腕系"
arm_bone_name = f"{direction}腕"
arm_twist_bone_name = f"{direction}腕捩"
elbow_bone_name = f"{direction}ひじ"
wrist_twist_bone_name = f"{direction}手捩"
wrist_bone_name = f"{direction}手首"
finger_bone_name = f"{direction}人指先実体"
finger2_bone_name = f"{direction}小指先実体"
logger.info(f"-- 捩りOFF変換準備:開始【{bone_name}】")
# モデルの手首までのボーンのリンク
finger_links = model.create_link_2_top_one(finger_bone_name, is_defined=False)
finger2_links = model.create_link_2_top_one(finger2_bone_name, is_defined=False)
arm2wrist_links = finger_links.to_links(arm_bone_name)
# 差異の大きい箇所にFKキーフレ追加
fnos = motion.get_differ_fnos(0, list(arm2wrist_links.all().keys()), limit_degrees=20, limit_length=0.5)
# 先に空のキーを登録しておく
prev_sep_fno = 0
for fno in fnos:
for link_name in list(arm2wrist_links.all().keys()):
if link_name in motion.bones:
bf = motion.calc_bf(link_name, fno)
motion.regist_bf(bf, link_name, fno)
if fno // 500 > prev_sep_fno:
logger.count(f"【キーフレ追加 - {bone_name}】", fno, fnos)
prev_sep_fno = fno // 500
logger.info("-- 捩りOFF変換準備:終了【%s】", bone_name)
# 捩りありの状態で一旦保持
org_motion = motion.copy()
prev_sep_fno = 0
for fno in fnos:
# 腕に腕捩りの結果を加算
arm_bf = motion.calc_bf(arm_bone_name, fno)
arm_twist_bf = motion.calc_bf(arm_twist_bone_name, fno)
arm_bf.rotation = arm_bf.rotation * arm_twist_bf.rotation
arm_twist_bf.rotation = MQuaternion()
motion.regist_bf(arm_bf, arm_bone_name, fno)
motion.regist_bf(arm_twist_bf, arm_twist_bone_name, fno)
# 手首に手首捩りの結果を加算
wrist_bf = motion.calc_bf(wrist_bone_name, fno)
wrist_twist_bf = motion.calc_bf(wrist_twist_bone_name, fno)
# 手捩りの方が根元に近いので、先にかけ算
wrist_bf.rotation = wrist_twist_bf.rotation * wrist_bf.rotation
wrist_twist_bf.rotation = MQuaternion()
motion.regist_bf(wrist_bf, wrist_bone_name, fno)
motion.regist_bf(wrist_twist_bf, wrist_twist_bone_name, fno)
if fno // 500 > prev_sep_fno:
logger.count("【捩りOFF変換 - {0}】".format(bone_name), fno, fnos)
prev_sep_fno = fno // 500
# 腕捩ボーンを削除
if arm_twist_bone_name in motion.bones:
del motion.bones[arm_twist_bone_name]
# 手捩ボーンを削除
if wrist_twist_bone_name in motion.bones:
del motion.bones[wrist_twist_bone_name]
if self.options.remove_unnecessary_flg:
futures = []
with ThreadPoolExecutor(thread_name_prefix="remove", max_workers=self.options.max_workers) as executor:
for bone_name in [f"{direction}腕", f"{direction}ひじ", f"{direction}手首"]:
futures.append(executor.submit(self.remove_unnecessary_bf, bone_name))
concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)
for f in futures:
if not f.result():
return False | 0.34621 | 0.168788 |
class NetworkDevice():
def __init__(self, name, ip, user='cisco', pw='cisco'):
self.name = name
self.ip_address = ip
self.username = user
self.password = pw
self.os_type = 'unknown'
#---- Class to hold information about an IOS-XE network device --------
class NetworkDeviceIOS(NetworkDevice):
def __init__(self, name, ip, user='cisco', pw='cisco'):
NetworkDevice.__init__(self, name, ip, user, pw)
self.os_type = 'ios'
#---- Class to hold information about an IOS-XR network device --------
class NetworkDeviceXR(NetworkDevice):
def __init__(self, name, ip, user='cisco', pw='cisco'):
NetworkDevice.__init__(self, name, ip, user, pw)
self.os_type = 'ios-xr'
#---- Function to read device information from file -------------------
def read_device_info(devices_file):
devices_list = []
# Read in the devices from the file
file = open(devices_file,'r')
for line in file:
device_info = line.strip().split(',') # Get device info into list
# Create a device object with this data
if device_info[1] == 'ios':
device = NetworkDeviceIOS(device_info[0],device_info[2],
device_info[3],device_info[4])
elif device_info[1] == 'ios-xr':
device = NetworkDeviceXR(device_info[0],device_info[2],
device_info[3],device_info[4])
else:
continue # go to the next device in the file
devices_list.append(device) # add this device object to list
file.close() # Close the file since we are done with it
return devices_list
#---- Function to go through devices printing them to table -----------
def print_device_info(devices_list):
print ''
print 'Name OS-type IP address Username Password'
print '------ ------- -------------- -------- --------'
# Go through the list of devices, printing out values in nice format
for device in devices_list:
print '{0:8} {1:8} {2:16} {3:8} {4:8}'.format(device.name,
device.os_type,
device.ip_address,
device.username,
device.password)
print ''
#---- Main: read device info, then print ------------------------------
devices = read_device_info('devices')
print_device_info(devices) | Cisco_PRNE_Exercises/getDevicesTypes_Class_inh.py | class NetworkDevice():
def __init__(self, name, ip, user='cisco', pw='cisco'):
self.name = name
self.ip_address = ip
self.username = user
self.password = pw
self.os_type = 'unknown'
#---- Class to hold information about an IOS-XE network device --------
class NetworkDeviceIOS(NetworkDevice):
def __init__(self, name, ip, user='cisco', pw='cisco'):
NetworkDevice.__init__(self, name, ip, user, pw)
self.os_type = 'ios'
#---- Class to hold information about an IOS-XR network device --------
class NetworkDeviceXR(NetworkDevice):
def __init__(self, name, ip, user='cisco', pw='cisco'):
NetworkDevice.__init__(self, name, ip, user, pw)
self.os_type = 'ios-xr'
#---- Function to read device information from file -------------------
def read_device_info(devices_file):
devices_list = []
# Read in the devices from the file
file = open(devices_file,'r')
for line in file:
device_info = line.strip().split(',') # Get device info into list
# Create a device object with this data
if device_info[1] == 'ios':
device = NetworkDeviceIOS(device_info[0],device_info[2],
device_info[3],device_info[4])
elif device_info[1] == 'ios-xr':
device = NetworkDeviceXR(device_info[0],device_info[2],
device_info[3],device_info[4])
else:
continue # go to the next device in the file
devices_list.append(device) # add this device object to list
file.close() # Close the file since we are done with it
return devices_list
#---- Function to go through devices printing them to table -----------
def print_device_info(devices_list):
print ''
print 'Name OS-type IP address Username Password'
print '------ ------- -------------- -------- --------'
# Go through the list of devices, printing out values in nice format
for device in devices_list:
print '{0:8} {1:8} {2:16} {3:8} {4:8}'.format(device.name,
device.os_type,
device.ip_address,
device.username,
device.password)
print ''
#---- Main: read device info, then print ------------------------------
devices = read_device_info('devices')
print_device_info(devices) | 0.420243 | 0.161916 |
import math
import statistics
import sys
import gzip
import itertools
import csv
def kmers(k, init=0, alph='ACGT'):
kmers = {}
for tup in itertools.product(alph, repeat=k):
kmer = ''.join(tup)
kmers[kmer] = init
return kmers
def count2freq(count):
freq = {}
total = 0
for k in count: total += count[k]
for k in count: freq[k] = count[k] / total
return freq
def train_imeter1(filename, k=5, d=5, a=10, t=400):
# deal with gzip or std files
if filename.endswith('.gz'): fp = gzip.open(filename, 'rt')
else: fp = open(filename)
# key parameters, as defaults for function
#k = 5 # kmer size
#d = 5 # length of donor site
#a = 10 # length of acceptor site
#t = 400 # proximal-distal threshold
# counts
prox = kmers(k)
dist = kmers(k)
for line in fp.readlines():
f = line.split()
beg = int(f[1])
seq = f[-1]
for i in range(d, len(seq) -k + 1 + a):
kmer = seq[i:i+k]
if kmer not in prox: continue
if beg <= t: prox[kmer] += 1
else: dist[kmer] += 1
# freqs
pfreq = count2freq(prox)
dfreq = count2freq(dist)
imeter = {}
for kmer in pfreq:
imeter[kmer] = math.log2(pfreq[kmer] / dfreq[kmer])
# done
return imeter, pfreq, dfreq
def generatekmers(k): #generates a list of legal kmers. in the future, user specified exceptions could be handled
filter = {}
kmers = list(itertools.product('ACTG', repeat=5))
for i in range(0, len(kmers)):
kmers[i] = ''.join(kmers[i])
filter[kmers[i]] = 1
return(filter)
def readinfile(filename): #Reads the entire file into memory.
records = []
with gzip.open(filename, 'rt') as fp:
for line in fp.readlines():
f = line.split()
entry = {"Name":f[0], "beg":int(f[1]), "end":int(f[2]), "polarity":f[3], \
"Aerial":int(f[4]), "Carpel":int(f[5]), "DG Seed":int(f[6]), \
"LG Seed":int(f[7]), "Leaf":int(f[8]), "Pollen":int(f[9]), \
"Receptacle":int(f[10]), "RAM":int(f[11]), "Root":int(f[12]), \
"SAM":int(f[13]), "S12Flower":int(f[14]), "Gene":f[15]} #this is an unholy abomination. abandon hope, all ye who enter here.
records.append(entry)
fp.close()
return(records)
def cutoffsplit(records, cutoff): #splits sequences into proximal and distal based on start
prox = []
dist = []
for i in range(0, len(records)):
if records[i]['polarity'] == "+":
if records[i]['beg'] <= cutoff:
prox.append((i, records[i]['Gene']))
else:
dist.append((i, records[i]['Gene']))
return(prox, dist) #returns sequences split into two
def kmercount(seqs, k): #determines the total count of kmers in the sequences
don = 5 #donor seqeuence
acc = 10 #acceptor sequence. we're...hardcoding these, I guess. Not a fan. Will these ever need to change?
total = 0
decay = 1
count = {}
filter = kmers(k) #a dictionary of all the possible legal kmers. reformatting to dictionary drastically cuts down processing time
for s in seqs:
for i in range(don, len(s[1])-k+1-acc):
kmer = s[1][i:i+k]
if kmer in filter: #this is a CPU sink, really big O
if kmer not in count:
count[kmer] = 0
count[kmer] += decay #to factor in geom decay, to account for intron significance dropping off as it lengthens
total += decay #pretty sure we want to make the total with the decay as well
#some sort of decay function goes here...
return(count, total) #returns a tuple, a dictionary keyed with unique kmers and their counts, and the total number)
def kmerfreqs(counts): #determines frequencies of kmers across an introns
#print(counts)
freqs= {}
for kmer in counts[0]:
freqs[kmer] = counts[0][kmer] / counts[1]
return(freqs)
def training(proxfreqs, distalfreqs, xfold): #calculates the log odd probability for our kmerfreqs
trained = {}
for kmer in sorted(proxfreqs):
trained[kmer] = math.log2(proxfreqs[kmer] / distalfreqs[kmer])
#write out training set: kmer, proxfreq, distalfreq, score
with open("trainedimeter.txt", "w", newline='') as outcsv:
trwriter = csv.writer(outcsv, delimiter=',', quotechar = '"', \
quoting = csv.QUOTE_NONE, escapechar='|')
trwriter.writerow(['kmer','proximal frequency','distal frequency',\
'log-odds score'])
for kmer in sorted(proxfreqs):
trwriter.writerow([kmer,proxfreqs[kmer],distalfreqs[kmer],trained[kmer]])
outcsv.close()
return(trained)
def scoring(prox, distal, records, trained, k): #calculates the score of a query. Possible support for dynamic queries
proxscores = []
distalscores = []
don = 5
acc = 10
for seq in prox:
score = 0
for i in range(don, len(seq[1]) -k +1 -acc):
kmer = seq[1][i:i+k]
if kmer in trained:
score += trained[kmer]
proxscores.append((seq, score))
for seq in distal:
score = 0
for i in range(don, len(seq[1]) -k +1 -acc):
kmer = seq[1][i:i+k]
if kmer in trained:
score += trained[kmer]
distalscores.append((seq, score))
return(proxscores, distalscores)
def printscores(records, proxscores, distalscores, outfile):
if outfile is None:
print("Proximal Scores")
print(f'Name \tBeg\tEnd\tScore')
for score in proxscores:
i = score[0][0]
print(f'{records[i]["Name"]}\t{records[i]["beg"]}\t{records[i]["end"]}\t{score[1]:.4f}')
print("Distal Scores")
print(f'Name \tBeg\tEnd\tScore')
for score in distalscores:
i = score[0][0]
print(f'{records[i]["Name"]}\t{records[i]["beg"]}\t{records[i]["end"]}\t{score[1]:.4f}')
else:
with open(outfile, "w", newline='') as csvfile:
scorewriter = csv.writer(csvfile, delimiter=',', quotechar='"', \
quoting = csv.QUOTE_NONE, escapechar='|')
scorewriter.writerow(['Name','Beg','End','Location','Score'])
for score in proxscores:
i = score[0][0]
scorewriter.writerow([records[i]["Name"],records[i]["beg"],records[i]["end"],'proximal',score[1]])
for score in distalscores:
i = score[0][0]
scorewriter.writerow([records[i]["Name"],records[i]["beg"],records[i]["end"],'distal',score[1]])
csvfile.close() | ime_drafting/v1/imelib.py | import math
import statistics
import sys
import gzip
import itertools
import csv
def kmers(k, init=0, alph='ACGT'):
kmers = {}
for tup in itertools.product(alph, repeat=k):
kmer = ''.join(tup)
kmers[kmer] = init
return kmers
def count2freq(count):
freq = {}
total = 0
for k in count: total += count[k]
for k in count: freq[k] = count[k] / total
return freq
def train_imeter1(filename, k=5, d=5, a=10, t=400):
# deal with gzip or std files
if filename.endswith('.gz'): fp = gzip.open(filename, 'rt')
else: fp = open(filename)
# key parameters, as defaults for function
#k = 5 # kmer size
#d = 5 # length of donor site
#a = 10 # length of acceptor site
#t = 400 # proximal-distal threshold
# counts
prox = kmers(k)
dist = kmers(k)
for line in fp.readlines():
f = line.split()
beg = int(f[1])
seq = f[-1]
for i in range(d, len(seq) -k + 1 + a):
kmer = seq[i:i+k]
if kmer not in prox: continue
if beg <= t: prox[kmer] += 1
else: dist[kmer] += 1
# freqs
pfreq = count2freq(prox)
dfreq = count2freq(dist)
imeter = {}
for kmer in pfreq:
imeter[kmer] = math.log2(pfreq[kmer] / dfreq[kmer])
# done
return imeter, pfreq, dfreq
def generatekmers(k): #generates a list of legal kmers. in the future, user specified exceptions could be handled
filter = {}
kmers = list(itertools.product('ACTG', repeat=5))
for i in range(0, len(kmers)):
kmers[i] = ''.join(kmers[i])
filter[kmers[i]] = 1
return(filter)
def readinfile(filename): #Reads the entire file into memory.
records = []
with gzip.open(filename, 'rt') as fp:
for line in fp.readlines():
f = line.split()
entry = {"Name":f[0], "beg":int(f[1]), "end":int(f[2]), "polarity":f[3], \
"Aerial":int(f[4]), "Carpel":int(f[5]), "DG Seed":int(f[6]), \
"LG Seed":int(f[7]), "Leaf":int(f[8]), "Pollen":int(f[9]), \
"Receptacle":int(f[10]), "RAM":int(f[11]), "Root":int(f[12]), \
"SAM":int(f[13]), "S12Flower":int(f[14]), "Gene":f[15]} #this is an unholy abomination. abandon hope, all ye who enter here.
records.append(entry)
fp.close()
return(records)
def cutoffsplit(records, cutoff): #splits sequences into proximal and distal based on start
prox = []
dist = []
for i in range(0, len(records)):
if records[i]['polarity'] == "+":
if records[i]['beg'] <= cutoff:
prox.append((i, records[i]['Gene']))
else:
dist.append((i, records[i]['Gene']))
return(prox, dist) #returns sequences split into two
def kmercount(seqs, k): #determines the total count of kmers in the sequences
don = 5 #donor seqeuence
acc = 10 #acceptor sequence. we're...hardcoding these, I guess. Not a fan. Will these ever need to change?
total = 0
decay = 1
count = {}
filter = kmers(k) #a dictionary of all the possible legal kmers. reformatting to dictionary drastically cuts down processing time
for s in seqs:
for i in range(don, len(s[1])-k+1-acc):
kmer = s[1][i:i+k]
if kmer in filter: #this is a CPU sink, really big O
if kmer not in count:
count[kmer] = 0
count[kmer] += decay #to factor in geom decay, to account for intron significance dropping off as it lengthens
total += decay #pretty sure we want to make the total with the decay as well
#some sort of decay function goes here...
return(count, total) #returns a tuple, a dictionary keyed with unique kmers and their counts, and the total number)
def kmerfreqs(counts): #determines frequencies of kmers across an introns
#print(counts)
freqs= {}
for kmer in counts[0]:
freqs[kmer] = counts[0][kmer] / counts[1]
return(freqs)
def training(proxfreqs, distalfreqs, xfold): #calculates the log odd probability for our kmerfreqs
trained = {}
for kmer in sorted(proxfreqs):
trained[kmer] = math.log2(proxfreqs[kmer] / distalfreqs[kmer])
#write out training set: kmer, proxfreq, distalfreq, score
with open("trainedimeter.txt", "w", newline='') as outcsv:
trwriter = csv.writer(outcsv, delimiter=',', quotechar = '"', \
quoting = csv.QUOTE_NONE, escapechar='|')
trwriter.writerow(['kmer','proximal frequency','distal frequency',\
'log-odds score'])
for kmer in sorted(proxfreqs):
trwriter.writerow([kmer,proxfreqs[kmer],distalfreqs[kmer],trained[kmer]])
outcsv.close()
return(trained)
def scoring(prox, distal, records, trained, k): #calculates the score of a query. Possible support for dynamic queries
proxscores = []
distalscores = []
don = 5
acc = 10
for seq in prox:
score = 0
for i in range(don, len(seq[1]) -k +1 -acc):
kmer = seq[1][i:i+k]
if kmer in trained:
score += trained[kmer]
proxscores.append((seq, score))
for seq in distal:
score = 0
for i in range(don, len(seq[1]) -k +1 -acc):
kmer = seq[1][i:i+k]
if kmer in trained:
score += trained[kmer]
distalscores.append((seq, score))
return(proxscores, distalscores)
def printscores(records, proxscores, distalscores, outfile):
if outfile is None:
print("Proximal Scores")
print(f'Name \tBeg\tEnd\tScore')
for score in proxscores:
i = score[0][0]
print(f'{records[i]["Name"]}\t{records[i]["beg"]}\t{records[i]["end"]}\t{score[1]:.4f}')
print("Distal Scores")
print(f'Name \tBeg\tEnd\tScore')
for score in distalscores:
i = score[0][0]
print(f'{records[i]["Name"]}\t{records[i]["beg"]}\t{records[i]["end"]}\t{score[1]:.4f}')
else:
with open(outfile, "w", newline='') as csvfile:
scorewriter = csv.writer(csvfile, delimiter=',', quotechar='"', \
quoting = csv.QUOTE_NONE, escapechar='|')
scorewriter.writerow(['Name','Beg','End','Location','Score'])
for score in proxscores:
i = score[0][0]
scorewriter.writerow([records[i]["Name"],records[i]["beg"],records[i]["end"],'proximal',score[1]])
for score in distalscores:
i = score[0][0]
scorewriter.writerow([records[i]["Name"],records[i]["beg"],records[i]["end"],'distal',score[1]])
csvfile.close() | 0.153644 | 0.244448 |
from django.urls import reverse_lazy
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from starry_students.manager.forms import (
StudentAddUpdateForm,
TeacherAddUpdateForm,
)
from starry_students.manager.models import Student, Teacher, TeacherStudent
class StudentListView(ListView):
"""View to list all in/active students"""
model = Student
template_name = 'manager/student/list.html'
class StudentDetailView(DetailView):
"""View to list all in/active students"""
model = Student
pk_url_kwarg = 'id'
template_name = 'manager/student/detail.html'
class StudentUpdateView(UpdateView):
"""View to update a student."""
pk_url_kwarg = 'id'
model = Student
form_class = StudentAddUpdateForm
success_url = reverse_lazy("manager:student_list")
template_name = 'manager/student/update.html'
class StudentAddView(CreateView):
"""View to list all in/active students."""
form_class = StudentAddUpdateForm
success_url = reverse_lazy("manager:student_list")
template_name = 'manager/student/add.html'
class StudentDeleteView(DeleteView):
"""View to delete student."""
pk_url_kwarg = 'id'
queryset = Student.objects.all()
success_url = reverse_lazy("manager:student_list")
template_name = 'manager/student/delete.html'
# ------------------------ Teacher Views --------------------------------------
class TeacherListView(ListView):
"""View to list all in/active teachers."""
model = Teacher
queryset = Teacher.objects.all().prefetch_related('students')
template_name = 'manager/teacher/list.html'
class TeacherDetailView(DetailView):
"""View to list all in/active teachers"""
model = Teacher
pk_url_kwarg = 'id'
queryset = Teacher.objects.all().prefetch_related('students')
template_name = 'manager/teacher/detail.html'
class TeacherUpdateView(UpdateView):
"""View to update a teacher."""
pk_url_kwarg = 'id'
model = Teacher
form_class = TeacherAddUpdateForm
queryset = Teacher.objects.all().prefetch_related('students')
success_url = reverse_lazy("manager:teacher_list")
template_name = 'manager/teacher/update.html'
class TeacherAddView(CreateView):
"""View to list all in/active Teachers."""
form_class = TeacherAddUpdateForm
success_url = reverse_lazy("manager:teacher_list")
template_name = 'manager/teacher/add.html'
class TeacherDeleteView(DeleteView):
"""View to delete student."""
pk_url_kwarg = 'id'
queryset = Teacher.objects.all()
success_url = reverse_lazy("manager:teacher_list")
template_name = 'manager/teacher/delete.html'
class StudentStarView(UpdateView):
"""View to delete student."""
model = TeacherStudent
queryset = TeacherStudent.objects.all()
success_url = reverse_lazy("manager:teacher_list")
def get_object(self, queryset=None):
"""Exact teacher student match."""
return TeacherStudent.objects.filter(
teacher_id=self.kwargs.get('id'),
student_id=self.kwargs.get('student_id'),
).first() | starry_students/manager/views.py | from django.urls import reverse_lazy
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from starry_students.manager.forms import (
StudentAddUpdateForm,
TeacherAddUpdateForm,
)
from starry_students.manager.models import Student, Teacher, TeacherStudent
class StudentListView(ListView):
"""View to list all in/active students"""
model = Student
template_name = 'manager/student/list.html'
class StudentDetailView(DetailView):
"""View to list all in/active students"""
model = Student
pk_url_kwarg = 'id'
template_name = 'manager/student/detail.html'
class StudentUpdateView(UpdateView):
"""View to update a student."""
pk_url_kwarg = 'id'
model = Student
form_class = StudentAddUpdateForm
success_url = reverse_lazy("manager:student_list")
template_name = 'manager/student/update.html'
class StudentAddView(CreateView):
"""View to list all in/active students."""
form_class = StudentAddUpdateForm
success_url = reverse_lazy("manager:student_list")
template_name = 'manager/student/add.html'
class StudentDeleteView(DeleteView):
"""View to delete student."""
pk_url_kwarg = 'id'
queryset = Student.objects.all()
success_url = reverse_lazy("manager:student_list")
template_name = 'manager/student/delete.html'
# ------------------------ Teacher Views --------------------------------------
class TeacherListView(ListView):
"""View to list all in/active teachers."""
model = Teacher
queryset = Teacher.objects.all().prefetch_related('students')
template_name = 'manager/teacher/list.html'
class TeacherDetailView(DetailView):
"""View to list all in/active teachers"""
model = Teacher
pk_url_kwarg = 'id'
queryset = Teacher.objects.all().prefetch_related('students')
template_name = 'manager/teacher/detail.html'
class TeacherUpdateView(UpdateView):
"""View to update a teacher."""
pk_url_kwarg = 'id'
model = Teacher
form_class = TeacherAddUpdateForm
queryset = Teacher.objects.all().prefetch_related('students')
success_url = reverse_lazy("manager:teacher_list")
template_name = 'manager/teacher/update.html'
class TeacherAddView(CreateView):
"""View to list all in/active Teachers."""
form_class = TeacherAddUpdateForm
success_url = reverse_lazy("manager:teacher_list")
template_name = 'manager/teacher/add.html'
class TeacherDeleteView(DeleteView):
"""View to delete student."""
pk_url_kwarg = 'id'
queryset = Teacher.objects.all()
success_url = reverse_lazy("manager:teacher_list")
template_name = 'manager/teacher/delete.html'
class StudentStarView(UpdateView):
"""View to delete student."""
model = TeacherStudent
queryset = TeacherStudent.objects.all()
success_url = reverse_lazy("manager:teacher_list")
def get_object(self, queryset=None):
"""Exact teacher student match."""
return TeacherStudent.objects.filter(
teacher_id=self.kwargs.get('id'),
student_id=self.kwargs.get('student_id'),
).first() | 0.590543 | 0.070464 |
import os
import argparse
import json
import time
from datetime import datetime
import ipc
ipcSocketPath = os.path.split(os.path.realpath(__file__))[0] + os.sep + 'ipcSocket'
################################################################################
# Logging
################################################################################
def appendFile(path, fileText):
try:
fileId = open(path, 'a')
fileId.write(fileText)
fileId.close()
except:
pass
def logToFile(printMsg, unimportantMsg = False):
logNewLine = '\n'
shortTimeFormat = "%H:%M %m/%d" # 24 hour value
#shortTimeFormat = "%I:%M%p %m/%d" # 12 hour value with AM/PM
shortTimeStr = "{}".format(time.strftime(shortTimeFormat))
longTimeStr = str(datetime.now())
logMsg = '[' + longTimeStr + "] " + printMsg
logPath = os.path.splitext(os.path.realpath(__file__))[0] + '.log'
appendFile(logPath, logMsg + logNewLine)
################################################################################
# IPC Client For New Json Settings
################################################################################
class Event(ipc.Message):
def __init__(self, event_type, **properties):
self.type = event_type
self.properties = properties
def _get_args(self):
return [self.type], self.properties
class Response(ipc.Message):
def __init__(self, text):
self.text = text
def _get_args(self):
return [self.text], {}
def setGet(newSettings = None): # Returns dictionary
global ipcSocketPath
if newSettings == None:
kwargs = {'query':''} # This can really be anything other than 'json'
else:
kwargs = {'json':json.dumps(newSettings)}
user_input = [{'class': 'Event', 'args': ['newSettings'], 'kwargs': kwargs}]
objects = ipc.Message.deserialize(user_input)
try:
with ipc.Client(ipcSocketPath) as client:
response = client.send(objects)
except Exception as e:
logToFile("failed to send: " + str(e))
pass
try:
responseDict = json.loads(response[0].text)
except Exception as e:
logToFile("failed to load json: " + str(e))
responseDict = None
pass
return responseDict
# Json Keywords
SettingsKeyWords = [
"TimeOfDayToStart",
"TimeOfDayToStop",
"SwitchTemperature",
"SwitchComfortRange",
"SwitchHeatCool",
"SmartPlugIpAddr",
"MinTimeBetweenChangingSwitchState",
"MinTimeBetweenRetryingSwitchChange",
"TimeBetweenTempCheck",
"InvalidTempLow",
"InvalidTempHigh",
"SwitchStateAfterTimeOfDayStop",
"DeviceName",
"DeviceColor"
]
StatusKeyWords = [
"Temp",
"SwitchState"
]
################################################################################
# Program Start
################################################################################
#logToFile("start")
#logToFile(os.getcwd())
# Config argparse
parser = argparse.ArgumentParser()
for keyword in SettingsKeyWords:
cmd = '--' + keyword
parser.add_argument(cmd, type=str, action="store", dest=keyword, help=keyword)
args = parser.parse_args()
# Update Dict with values from command line.
settingsDict = dict()
needToChange = False
for arg in vars(args):
argVal = getattr(args, arg)
if argVal != None:
needToChange = True
settingsDict[arg] = str(argVal)
# Apply the changes.
if needToChange:
responseDict = setGet(settingsDict)
else:
responseDict = setGet()
# Print string with all the new values.
printStr = ''
for keyword in SettingsKeyWords:
try:
printStr += (responseDict["settings"][keyword] + "|")
except:
printStr += " |"
for keyword in StatusKeyWords:
try:
printStr += (responseDict["status"][keyword] + "|")
except:
printStr += " |"
print(printStr) | changeSettings.py | import os
import argparse
import json
import time
from datetime import datetime
import ipc
ipcSocketPath = os.path.split(os.path.realpath(__file__))[0] + os.sep + 'ipcSocket'
################################################################################
# Logging
################################################################################
def appendFile(path, fileText):
try:
fileId = open(path, 'a')
fileId.write(fileText)
fileId.close()
except:
pass
def logToFile(printMsg, unimportantMsg = False):
logNewLine = '\n'
shortTimeFormat = "%H:%M %m/%d" # 24 hour value
#shortTimeFormat = "%I:%M%p %m/%d" # 12 hour value with AM/PM
shortTimeStr = "{}".format(time.strftime(shortTimeFormat))
longTimeStr = str(datetime.now())
logMsg = '[' + longTimeStr + "] " + printMsg
logPath = os.path.splitext(os.path.realpath(__file__))[0] + '.log'
appendFile(logPath, logMsg + logNewLine)
################################################################################
# IPC Client For New Json Settings
################################################################################
class Event(ipc.Message):
def __init__(self, event_type, **properties):
self.type = event_type
self.properties = properties
def _get_args(self):
return [self.type], self.properties
class Response(ipc.Message):
def __init__(self, text):
self.text = text
def _get_args(self):
return [self.text], {}
def setGet(newSettings = None): # Returns dictionary
global ipcSocketPath
if newSettings == None:
kwargs = {'query':''} # This can really be anything other than 'json'
else:
kwargs = {'json':json.dumps(newSettings)}
user_input = [{'class': 'Event', 'args': ['newSettings'], 'kwargs': kwargs}]
objects = ipc.Message.deserialize(user_input)
try:
with ipc.Client(ipcSocketPath) as client:
response = client.send(objects)
except Exception as e:
logToFile("failed to send: " + str(e))
pass
try:
responseDict = json.loads(response[0].text)
except Exception as e:
logToFile("failed to load json: " + str(e))
responseDict = None
pass
return responseDict
# Json Keywords
SettingsKeyWords = [
"TimeOfDayToStart",
"TimeOfDayToStop",
"SwitchTemperature",
"SwitchComfortRange",
"SwitchHeatCool",
"SmartPlugIpAddr",
"MinTimeBetweenChangingSwitchState",
"MinTimeBetweenRetryingSwitchChange",
"TimeBetweenTempCheck",
"InvalidTempLow",
"InvalidTempHigh",
"SwitchStateAfterTimeOfDayStop",
"DeviceName",
"DeviceColor"
]
StatusKeyWords = [
"Temp",
"SwitchState"
]
################################################################################
# Program Start
################################################################################
#logToFile("start")
#logToFile(os.getcwd())
# Config argparse
parser = argparse.ArgumentParser()
for keyword in SettingsKeyWords:
cmd = '--' + keyword
parser.add_argument(cmd, type=str, action="store", dest=keyword, help=keyword)
args = parser.parse_args()
# Update Dict with values from command line.
settingsDict = dict()
needToChange = False
for arg in vars(args):
argVal = getattr(args, arg)
if argVal != None:
needToChange = True
settingsDict[arg] = str(argVal)
# Apply the changes.
if needToChange:
responseDict = setGet(settingsDict)
else:
responseDict = setGet()
# Print string with all the new values.
printStr = ''
for keyword in SettingsKeyWords:
try:
printStr += (responseDict["settings"][keyword] + "|")
except:
printStr += " |"
for keyword in StatusKeyWords:
try:
printStr += (responseDict["status"][keyword] + "|")
except:
printStr += " |"
print(printStr) | 0.152001 | 0.056262 |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from warnings import warn
class CategoricalColumnTransformer(BaseEstimator, TransformerMixin):
"""
This transformer is useful for describing an object as a bag of the categorical values that
have been used to represent it within a pandas DataFrame.
It takes an categorical column name to groupby, object_column_name, and one
or more categorical columns to be used to describe these objects,
descriptor_column_name. Then it returns a Series with an index being the
unique entries of your object_column_name and the values being a list of
the appropriate categorical values from your descriptor_column_name.
It can be thought of as a PivotTableTransformer if you'd like.
Parameters
----------
object_column_name: string
The column name from the DataFrame where our object values can be found.
This will be the thing we are grouping by.
descriptor_column_name: string or list
The name or names of the categorical column(s) who's values will be used for describing our
objects. If you are using multiple names it's recommended that you set include_column_name=True.
include_column_name: bool (default = False)
Should the column name be appended at the beginning of each value?
This is useful if you intend to combine values from multiple categorical columns
after the fact.
unique_values: bool (default = False)
Should we apply a unique to the values in column before building our list representation?
"""
def __init__(
self,
object_column_name,
descriptor_column_name,
include_column_name=False,
unique_values=False,
):
self.object_column_name = object_column_name
self.descriptor_column_name = descriptor_column_name
# Get everything on consistent footing so we don't have to handle multiple cases.
if type(self.descriptor_column_name) == str:
self.descriptor_column_name_ = [self.descriptor_column_name]
else:
self.descriptor_column_name_ = self.descriptor_column_name
self.include_column_name = include_column_name
self.unique_values = unique_values
if (
(self.include_column_name is False)
and (type(self.descriptor_column_name) == list)
and (len(self.descriptor_column_name) > 1)
):
warn(
"It is recommended that if you are aggregating "
"multiple columns that you set include_column_name=True"
)
def fit_transform(self, X, y=None, **fit_params):
"""
This transformer is useful for describing an object as a bag of the categorical values that
have been used to represent it within a pandas DataFrame.
It takes an categorical column name to groupby, object_column_name, and one or more
categorical columns to be used to describe these objects, descriptor_column_name.
Then it returns a Series with an index being the unique entries of your object_column_name
and the values being a list of the appropriate categorical values from your descriptor_column_name.
Parameters
----------
X: pd.DataFrame
a pandas dataframe with columns who's names match those specified in the object_column_name and
descriptor_column_name of the constructor.
Returns
-------
pandas Series
Series with an index being the unique entries of your object_column_name
and the values being a list of the appropriate categorical values from your descriptor_column_name.
"""
# Check that the dataframe has the appropriate columns
required_columns = set([self.object_column_name] + self.descriptor_column_name_)
if not required_columns.issubset(X.columns):
raise ValueError(
f"Sorry the required column(s) {set(required_columns).difference(set(X.columns))} are not "
f"present in your data frame. \n"
f"Please either specify a new instance or apply to a different data frame. "
)
# Compute a single groupby ahead of time to save on compute
grouped_frame = X.groupby(self.object_column_name)
aggregated_columns = []
for column in self.descriptor_column_name_:
if self.include_column_name:
if self.unique_values:
aggregated_columns.append(
grouped_frame[column].agg(
lambda x: [
column + ":" + value
for value in x.unique()
if pd.notna(value)
]
)
)
else:
aggregated_columns.append(
grouped_frame[column].agg(
lambda x: [
column + ":" + value for value in x if pd.notna(value)
]
)
)
else:
if self.unique_values:
aggregated_columns.append(
grouped_frame[column].agg(
lambda x: [value for value in x.unique() if pd.notna(value)]
)
)
else:
aggregated_columns.append(
grouped_frame[column].agg(
lambda x: [value for value in x if pd.notna(value)]
)
)
reduced = pd.concat(aggregated_columns, axis="columns").sum(axis=1)
return reduced
def fit(self, X, y=None, **fit_params):
self.fit_transform(X, y, **fit_params)
return self | vectorizers/transformers/categorical_columns.py | import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from warnings import warn
class CategoricalColumnTransformer(BaseEstimator, TransformerMixin):
"""
This transformer is useful for describing an object as a bag of the categorical values that
have been used to represent it within a pandas DataFrame.
It takes an categorical column name to groupby, object_column_name, and one
or more categorical columns to be used to describe these objects,
descriptor_column_name. Then it returns a Series with an index being the
unique entries of your object_column_name and the values being a list of
the appropriate categorical values from your descriptor_column_name.
It can be thought of as a PivotTableTransformer if you'd like.
Parameters
----------
object_column_name: string
The column name from the DataFrame where our object values can be found.
This will be the thing we are grouping by.
descriptor_column_name: string or list
The name or names of the categorical column(s) who's values will be used for describing our
objects. If you are using multiple names it's recommended that you set include_column_name=True.
include_column_name: bool (default = False)
Should the column name be appended at the beginning of each value?
This is useful if you intend to combine values from multiple categorical columns
after the fact.
unique_values: bool (default = False)
Should we apply a unique to the values in column before building our list representation?
"""
def __init__(
self,
object_column_name,
descriptor_column_name,
include_column_name=False,
unique_values=False,
):
self.object_column_name = object_column_name
self.descriptor_column_name = descriptor_column_name
# Get everything on consistent footing so we don't have to handle multiple cases.
if type(self.descriptor_column_name) == str:
self.descriptor_column_name_ = [self.descriptor_column_name]
else:
self.descriptor_column_name_ = self.descriptor_column_name
self.include_column_name = include_column_name
self.unique_values = unique_values
if (
(self.include_column_name is False)
and (type(self.descriptor_column_name) == list)
and (len(self.descriptor_column_name) > 1)
):
warn(
"It is recommended that if you are aggregating "
"multiple columns that you set include_column_name=True"
)
def fit_transform(self, X, y=None, **fit_params):
"""
This transformer is useful for describing an object as a bag of the categorical values that
have been used to represent it within a pandas DataFrame.
It takes an categorical column name to groupby, object_column_name, and one or more
categorical columns to be used to describe these objects, descriptor_column_name.
Then it returns a Series with an index being the unique entries of your object_column_name
and the values being a list of the appropriate categorical values from your descriptor_column_name.
Parameters
----------
X: pd.DataFrame
a pandas dataframe with columns who's names match those specified in the object_column_name and
descriptor_column_name of the constructor.
Returns
-------
pandas Series
Series with an index being the unique entries of your object_column_name
and the values being a list of the appropriate categorical values from your descriptor_column_name.
"""
# Check that the dataframe has the appropriate columns
required_columns = set([self.object_column_name] + self.descriptor_column_name_)
if not required_columns.issubset(X.columns):
raise ValueError(
f"Sorry the required column(s) {set(required_columns).difference(set(X.columns))} are not "
f"present in your data frame. \n"
f"Please either specify a new instance or apply to a different data frame. "
)
# Compute a single groupby ahead of time to save on compute
grouped_frame = X.groupby(self.object_column_name)
aggregated_columns = []
for column in self.descriptor_column_name_:
if self.include_column_name:
if self.unique_values:
aggregated_columns.append(
grouped_frame[column].agg(
lambda x: [
column + ":" + value
for value in x.unique()
if pd.notna(value)
]
)
)
else:
aggregated_columns.append(
grouped_frame[column].agg(
lambda x: [
column + ":" + value for value in x if pd.notna(value)
]
)
)
else:
if self.unique_values:
aggregated_columns.append(
grouped_frame[column].agg(
lambda x: [value for value in x.unique() if pd.notna(value)]
)
)
else:
aggregated_columns.append(
grouped_frame[column].agg(
lambda x: [value for value in x if pd.notna(value)]
)
)
reduced = pd.concat(aggregated_columns, axis="columns").sum(axis=1)
return reduced
def fit(self, X, y=None, **fit_params):
self.fit_transform(X, y, **fit_params)
return self | 0.86267 | 0.681548 |
from datetime import datetime, date, timedelta
import isodate
import re
def convert_to_interval_and_resolution(values):
# Remove fractional seconds if any
values = list(map(lambda v : v.split('.')[0], values))
# Convert to datetime
times = list(map(lambda t : datetime.strptime(t, "%Y-%m-%dT%H:%M:%S"), values))
expected_timedelta = (times[1] - times[0])
ranges = []
end = None
start = times[0]
for i in range(len(times[:-1])):
if (times[i+1] - times[i]) > expected_timedelta:
end = times[i]
ranges.append((start, end))
start = times[i+1]
end = times[-1]
ranges.append((start, end))
if expected_timedelta == timedelta(seconds=1):
inteval_string = 'PT1S'
elif expected_timedelta == timedelta(seconds=60):
inteval_string = 'PT1M'
elif expected_timedelta == timedelta(hours=1):
inteval_string = 'PT60M'
elif expected_timedelta == timedelta(hours=24):
inteval_string = 'P1D'
else:
assert False, "Unsupported time interval."
results = []
for r in ranges:
res = '{}Z/{}Z/{}'.format(r[0], r[-1], inteval_string)
results.append(res)
return results
def to_list(val):
return [val] if not hasattr(val, 'reverse') else val
# Add duration to end date using
# ISO 8601 duration keys
def determine_end_date(key, date):
return date + isodate.parse_duration(key)
# This method takes a layer and a temporal
# value and tranlates it to start and end dates
def process_temporal(wv_layer, value):
try:
if (type(value) is type(list())) and '/' not in value[0]:
value = convert_to_interval_and_resolution(value)
ranges = to_list(value)
if "T" in ranges[0]:
wv_layer["period"] = "subdaily"
else:
if ranges[0].endswith("Y"):
wv_layer["period"] = "yearly"
elif ranges[0].endswith("M"):
wv_layer["period"] = "monthly"
else:
wv_layer["period"] = "daily"
start_date = datetime.max
end_date = datetime.min
date_range_start, date_range_end, range_interval = [], [], []
for range in ranges:
times = range.split('/')
if wv_layer["period"] == "daily" \
or wv_layer["period"] == "monthly" \
or wv_layer["period"] == "yearly":
start_date = min(start_date,
datetime.strptime(times[0], "%Y-%m-%d"))
end_date = max(end_date,
datetime.strptime(times[1], "%Y-%m-%d"))
if start_date:
startDateParse = datetime.strptime(times[0], "%Y-%m-%d")
date_range_start.append(startDateParse.strftime("%Y-%m-%d") + "T" + startDateParse.strftime("%H:%M:%S") + "Z")
if end_date:
endDateParse = datetime.strptime(times[1], "%Y-%m-%d")
date_range_end.append(endDateParse.strftime("%Y-%m-%d") + "T" + endDateParse.strftime("%H:%M:%S") + "Z")
if times[2] != "P1D":
end_date = determine_end_date(times[2], end_date)
range_interval.append(re.search(r'\d+', times[2]).group())
else:
startTime = times[0].replace('T', ' ').replace('Z', '')
endTime = times[1].replace('T', ' ').replace('Z', '')
start_date = min(start_date,
datetime.strptime(startTime, "%Y-%m-%d %H:%M:%S"))
end_date = max(end_date,
datetime.strptime(endTime, "%Y-%m-%d %H:%M:%S"))
if start_date:
startTimeParse = datetime.strptime(startTime, "%Y-%m-%d %H:%M:%S")
date_range_start.append(startTimeParse.strftime("%Y-%m-%d") + "T" + startTimeParse.strftime("%H:%M:%S") + "Z")
if end_date:
endTimeParse = datetime.strptime(endTime, "%Y-%m-%d %H:%M:%S")
date_range_end.append(endTimeParse.strftime("%Y-%m-%d") + "T" + endTimeParse.strftime("%H:%M:%S") + "Z")
range_interval.append(re.search(r'\d+', times[2]).group())
wv_layer["startDate"] = start_date.strftime("%Y-%m-%d") + "T" + start_date.strftime("%H:%M:%S") + "Z"
if end_date != datetime.min:
wv_layer["endDate"] = end_date.strftime("%Y-%m-%d") + "T" + end_date.strftime("%H:%M:%S") + "Z"
if date_range_start and date_range_end:
wv_layer["dateRanges"] = [{"startDate": s, "endDate": e, "dateInterval": i} for s, e, i in zip(date_range_start, date_range_end, range_interval)]
except ValueError:
raise
raise Exception("Invalid time: {0}".format(range))
return wv_layer | tasks/python3/processTemporalLayer.py | from datetime import datetime, date, timedelta
import isodate
import re
def convert_to_interval_and_resolution(values):
# Remove fractional seconds if any
values = list(map(lambda v : v.split('.')[0], values))
# Convert to datetime
times = list(map(lambda t : datetime.strptime(t, "%Y-%m-%dT%H:%M:%S"), values))
expected_timedelta = (times[1] - times[0])
ranges = []
end = None
start = times[0]
for i in range(len(times[:-1])):
if (times[i+1] - times[i]) > expected_timedelta:
end = times[i]
ranges.append((start, end))
start = times[i+1]
end = times[-1]
ranges.append((start, end))
if expected_timedelta == timedelta(seconds=1):
inteval_string = 'PT1S'
elif expected_timedelta == timedelta(seconds=60):
inteval_string = 'PT1M'
elif expected_timedelta == timedelta(hours=1):
inteval_string = 'PT60M'
elif expected_timedelta == timedelta(hours=24):
inteval_string = 'P1D'
else:
assert False, "Unsupported time interval."
results = []
for r in ranges:
res = '{}Z/{}Z/{}'.format(r[0], r[-1], inteval_string)
results.append(res)
return results
def to_list(val):
return [val] if not hasattr(val, 'reverse') else val
# Add duration to end date using
# ISO 8601 duration keys
def determine_end_date(key, date):
return date + isodate.parse_duration(key)
# This method takes a layer and a temporal
# value and tranlates it to start and end dates
def process_temporal(wv_layer, value):
try:
if (type(value) is type(list())) and '/' not in value[0]:
value = convert_to_interval_and_resolution(value)
ranges = to_list(value)
if "T" in ranges[0]:
wv_layer["period"] = "subdaily"
else:
if ranges[0].endswith("Y"):
wv_layer["period"] = "yearly"
elif ranges[0].endswith("M"):
wv_layer["period"] = "monthly"
else:
wv_layer["period"] = "daily"
start_date = datetime.max
end_date = datetime.min
date_range_start, date_range_end, range_interval = [], [], []
for range in ranges:
times = range.split('/')
if wv_layer["period"] == "daily" \
or wv_layer["period"] == "monthly" \
or wv_layer["period"] == "yearly":
start_date = min(start_date,
datetime.strptime(times[0], "%Y-%m-%d"))
end_date = max(end_date,
datetime.strptime(times[1], "%Y-%m-%d"))
if start_date:
startDateParse = datetime.strptime(times[0], "%Y-%m-%d")
date_range_start.append(startDateParse.strftime("%Y-%m-%d") + "T" + startDateParse.strftime("%H:%M:%S") + "Z")
if end_date:
endDateParse = datetime.strptime(times[1], "%Y-%m-%d")
date_range_end.append(endDateParse.strftime("%Y-%m-%d") + "T" + endDateParse.strftime("%H:%M:%S") + "Z")
if times[2] != "P1D":
end_date = determine_end_date(times[2], end_date)
range_interval.append(re.search(r'\d+', times[2]).group())
else:
startTime = times[0].replace('T', ' ').replace('Z', '')
endTime = times[1].replace('T', ' ').replace('Z', '')
start_date = min(start_date,
datetime.strptime(startTime, "%Y-%m-%d %H:%M:%S"))
end_date = max(end_date,
datetime.strptime(endTime, "%Y-%m-%d %H:%M:%S"))
if start_date:
startTimeParse = datetime.strptime(startTime, "%Y-%m-%d %H:%M:%S")
date_range_start.append(startTimeParse.strftime("%Y-%m-%d") + "T" + startTimeParse.strftime("%H:%M:%S") + "Z")
if end_date:
endTimeParse = datetime.strptime(endTime, "%Y-%m-%d %H:%M:%S")
date_range_end.append(endTimeParse.strftime("%Y-%m-%d") + "T" + endTimeParse.strftime("%H:%M:%S") + "Z")
range_interval.append(re.search(r'\d+', times[2]).group())
wv_layer["startDate"] = start_date.strftime("%Y-%m-%d") + "T" + start_date.strftime("%H:%M:%S") + "Z"
if end_date != datetime.min:
wv_layer["endDate"] = end_date.strftime("%Y-%m-%d") + "T" + end_date.strftime("%H:%M:%S") + "Z"
if date_range_start and date_range_end:
wv_layer["dateRanges"] = [{"startDate": s, "endDate": e, "dateInterval": i} for s, e, i in zip(date_range_start, date_range_end, range_interval)]
except ValueError:
raise
raise Exception("Invalid time: {0}".format(range))
return wv_layer | 0.415373 | 0.472805 |
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from .prior import Prior
from src.modules.nn_layers import *
from src.modules.distributions import *
from src.utils import args
class VampPrior(Prior):
"""
VAE with a VampPrior
https://arxiv.org/abs/1705.07120
Parameters
----------
• n_components: number of compoments. Default: 512
• p_mean: Initialization of pseudo inputs mean. Default: -0.05
• p_std: Initialization of pseudo inputs std. Default: 0.01
"""
def __init__(self, output_shape, n_components=512, p_mean=0.5, p_std=0.5):
super().__init__()
self.output_shape = output_shape
self.n_components = n_components
# init pseudo-inputs
self.means = nn.Sequential(
nn.Linear(self.n_components, np.prod(self.output_shape)),
nn.Hardtanh(min_val=-1., max_val=1.)
)
self.normal_init(self.means[0], p_mean, p_std)
# create an idle input for calling pseudo-inputs
self.idle_input = Variable(torch.eye(self.n_components, self.n_components),
requires_grad=False).to(args.device)
def normal_init(self, m, mean=-0.05, std=0.01):
m.weight.data.normal_(mean, std)
def sample(self, n_samples, encoder):
means = self.means(self.idle_input)[0:n_samples]
means = means.view(means.shape[0], *self.output_shape)
z_sample_gen_mean, z_sample_gen_logvar = encoder.forward(means)
z_sample_rand = reparameterize(z_sample_gen_mean, z_sample_gen_logvar)
return z_sample_rand
def forward(self, x, encoder, dim=1):
y = self.means(self.idle_input)
y = y.view(y.shape[0], *self.output_shape)
u_q_mean, u_q_logvar = encoder.forward(y)
# expand z
u_expand = x.unsqueeze(1)
means = u_q_mean.unsqueeze(0)
logvars = u_q_logvar.unsqueeze(0)
a = log_normal_diag(u_expand, means, logvars, dim=2) \
- math.log(self.n_components)
a_max, _ = torch.max(a, 1)
log_prior = a_max + torch.log(torch.sum(torch.exp(a - a_max.unsqueeze(1)), dim=dim))
return log_prior
def __str__(self):
return "VampPrior"
if __name__ == "__main__":
pass | vae/src/modules/priors/vampprior.py | import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from .prior import Prior
from src.modules.nn_layers import *
from src.modules.distributions import *
from src.utils import args
class VampPrior(Prior):
"""
VAE with a VampPrior
https://arxiv.org/abs/1705.07120
Parameters
----------
• n_components: number of compoments. Default: 512
• p_mean: Initialization of pseudo inputs mean. Default: -0.05
• p_std: Initialization of pseudo inputs std. Default: 0.01
"""
def __init__(self, output_shape, n_components=512, p_mean=0.5, p_std=0.5):
super().__init__()
self.output_shape = output_shape
self.n_components = n_components
# init pseudo-inputs
self.means = nn.Sequential(
nn.Linear(self.n_components, np.prod(self.output_shape)),
nn.Hardtanh(min_val=-1., max_val=1.)
)
self.normal_init(self.means[0], p_mean, p_std)
# create an idle input for calling pseudo-inputs
self.idle_input = Variable(torch.eye(self.n_components, self.n_components),
requires_grad=False).to(args.device)
def normal_init(self, m, mean=-0.05, std=0.01):
m.weight.data.normal_(mean, std)
def sample(self, n_samples, encoder):
means = self.means(self.idle_input)[0:n_samples]
means = means.view(means.shape[0], *self.output_shape)
z_sample_gen_mean, z_sample_gen_logvar = encoder.forward(means)
z_sample_rand = reparameterize(z_sample_gen_mean, z_sample_gen_logvar)
return z_sample_rand
def forward(self, x, encoder, dim=1):
y = self.means(self.idle_input)
y = y.view(y.shape[0], *self.output_shape)
u_q_mean, u_q_logvar = encoder.forward(y)
# expand z
u_expand = x.unsqueeze(1)
means = u_q_mean.unsqueeze(0)
logvars = u_q_logvar.unsqueeze(0)
a = log_normal_diag(u_expand, means, logvars, dim=2) \
- math.log(self.n_components)
a_max, _ = torch.max(a, 1)
log_prior = a_max + torch.log(torch.sum(torch.exp(a - a_max.unsqueeze(1)), dim=dim))
return log_prior
def __str__(self):
return "VampPrior"
if __name__ == "__main__":
pass | 0.916157 | 0.472197 |
from rest_framework.decorators import api_view, permission_classes
from rest_framework import status
from rest_framework.response import Response
import datetime
from .models import User
from .serializer import AccountRegistrationSerializer, UserSigninSerializer
@api_view(['POST'])
@permission_classes([])
def signup(request):
serializer = AccountRegistrationSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
user = User.objects.get(email=request.data['email'])
user.active = True
user.last_login = datetime.datetime.now()
data = {}
data['id'] = user.id
data['email'] = user.email
data['username'] = user.username
src = open('media/profile/image/default.txt', 'r')
default = src.read()
src.close()
newsrc = open('media/profile/image/' + str(user.id) + '.txt', 'a')
newsrc.write(default)
newsrc.close()
user.profile_picture = 'media/profile/image/' + str(user.id) + '.txt'
return Response({'message': 'New user created' , 'user' : data}, status=status.HTTP_201_CREATED)
return Response({'message':'user with this email address already exists.'})
@api_view(['POST'])
@permission_classes([])
def signin(request):
if request.method == 'POST':
post_data = dict(request.POST)
check_user = User.objects.filter(email=post_data['email'][0])
if len(check_user) == 0:
return Response({"message":"this email does not exist!"} )
else:
if check_user[0].check_password(post_data['password'][0]):
check_user[0].active = True
check_user[0].last_login = datetime.datetime.now()
serializer = UserSigninSerializer(check_user[0])
data = serializer.data
data['username'] = check_user[0].username
return Response({"message":"wellcome" , 'user': data} , status=status.HTTP_202_ACCEPTED)
else:
return Response({"message": "password or email is not correct"} ) | Back/registeration/views.py | from rest_framework.decorators import api_view, permission_classes
from rest_framework import status
from rest_framework.response import Response
import datetime
from .models import User
from .serializer import AccountRegistrationSerializer, UserSigninSerializer
@api_view(['POST'])
@permission_classes([])
def signup(request):
serializer = AccountRegistrationSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
user = User.objects.get(email=request.data['email'])
user.active = True
user.last_login = datetime.datetime.now()
data = {}
data['id'] = user.id
data['email'] = user.email
data['username'] = user.username
src = open('media/profile/image/default.txt', 'r')
default = src.read()
src.close()
newsrc = open('media/profile/image/' + str(user.id) + '.txt', 'a')
newsrc.write(default)
newsrc.close()
user.profile_picture = 'media/profile/image/' + str(user.id) + '.txt'
return Response({'message': 'New user created' , 'user' : data}, status=status.HTTP_201_CREATED)
return Response({'message':'user with this email address already exists.'})
@api_view(['POST'])
@permission_classes([])
def signin(request):
if request.method == 'POST':
post_data = dict(request.POST)
check_user = User.objects.filter(email=post_data['email'][0])
if len(check_user) == 0:
return Response({"message":"this email does not exist!"} )
else:
if check_user[0].check_password(post_data['password'][0]):
check_user[0].active = True
check_user[0].last_login = datetime.datetime.now()
serializer = UserSigninSerializer(check_user[0])
data = serializer.data
data['username'] = check_user[0].username
return Response({"message":"wellcome" , 'user': data} , status=status.HTTP_202_ACCEPTED)
else:
return Response({"message": "password or email is not correct"} ) | 0.362179 | 0.051083 |
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import json
import os
from mock import patch
from citest.gcp_testing import GcpAgent
def _method_spec(parameter_order, optional=None):
parameters = {key: {'required': True} for key in parameter_order}
parameters.update({key: {} for key in optional or []})
return {'parameters': parameters, 'parameterOrder': parameter_order}
class TestGcpAgent(GcpAgent):
@staticmethod
@patch('apiclient.discovery.build')
def make_test_agent(mock_discovery, service=None, default_variables=None):
doc = TestGcpAgent.generate_discovery_document()
if service is not None:
return TestGcpAgent(service, doc, default_variables=default_variables)
fake_discovery = FakeGcpDiscovery(doc)
mock_discovery.return_value = fake_discovery
return TestGcpAgent.make_agent(default_variables)
@classmethod
def default_discovery_name_and_version(cls):
return 'TEST_API', 'TEST_VERSION'
@staticmethod
def load_discovery_document(filename):
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, filename)) as f:
doc = f.read()
return json.JSONDecoder().decode(doc)
@staticmethod
def generate_discovery_document(version='TEST_VERSION'):
if version != 'TEST_VERSION':
raise ValueError()
return {
'title': 'MockCompute',
'name': 'mock-compute',
'resources' : {
'projects': {
'methods': {'get': _method_spec(['project'])}},
'regions': {
'methods': {'get': _method_spec(['project', 'region']),
'list': _method_spec(['project'])}},
'my_test': {
'methods': {'get': _method_spec(['r'], ['o']),
'list': _method_spec([], ['o'])}}
}}
class FakeGcpDiscovery(object):
@property
def calls(self):
return self.__calls
def __init__(self, doc):
self.__doc = doc
self.__calls = []
def apis(self):
self.__calls.append('apis')
return self
def getRest(self, api, version):
# pylint: disable=unused-argument
self.__calls.append('getRest')
return self
def execute(self):
self.__calls.append('execute')
return self.__doc
class FakeGcpService(object):
@property
def calls(self):
return self.__calls
def __init__(self, execute_response_list):
self.__calls = []
self.__execute_response_list = list(execute_response_list)
self.__execute_response_list.reverse()
self.my_test = self._my_test # needs to be a variable
self.last_list_args = None
self.last_get_args = None
def _my_test(self):
self.__calls.append('my_test')
return self
def get(self, **kwargs):
self.__calls.append('get({0})'.format(kwargs))
self.last_get_args = dict(**kwargs)
return self
def list(self, **kwargs):
self.__calls.append('list({0})'.format(kwargs))
self.last_list_args = dict(**kwargs)
return self
def execute(self):
self.__calls.append('execute')
result = self.__execute_response_list.pop()
if isinstance(result, Exception):
raise result
return result
def list_next(self, request, response):
# pylint: disable=unused-argument
self.__calls.append('list_next')
return request if self.__execute_response_list else None | tests/gcp_testing/test_gcp_agent.py |
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import json
import os
from mock import patch
from citest.gcp_testing import GcpAgent
def _method_spec(parameter_order, optional=None):
parameters = {key: {'required': True} for key in parameter_order}
parameters.update({key: {} for key in optional or []})
return {'parameters': parameters, 'parameterOrder': parameter_order}
class TestGcpAgent(GcpAgent):
@staticmethod
@patch('apiclient.discovery.build')
def make_test_agent(mock_discovery, service=None, default_variables=None):
doc = TestGcpAgent.generate_discovery_document()
if service is not None:
return TestGcpAgent(service, doc, default_variables=default_variables)
fake_discovery = FakeGcpDiscovery(doc)
mock_discovery.return_value = fake_discovery
return TestGcpAgent.make_agent(default_variables)
@classmethod
def default_discovery_name_and_version(cls):
return 'TEST_API', 'TEST_VERSION'
@staticmethod
def load_discovery_document(filename):
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, filename)) as f:
doc = f.read()
return json.JSONDecoder().decode(doc)
@staticmethod
def generate_discovery_document(version='TEST_VERSION'):
if version != 'TEST_VERSION':
raise ValueError()
return {
'title': 'MockCompute',
'name': 'mock-compute',
'resources' : {
'projects': {
'methods': {'get': _method_spec(['project'])}},
'regions': {
'methods': {'get': _method_spec(['project', 'region']),
'list': _method_spec(['project'])}},
'my_test': {
'methods': {'get': _method_spec(['r'], ['o']),
'list': _method_spec([], ['o'])}}
}}
class FakeGcpDiscovery(object):
@property
def calls(self):
return self.__calls
def __init__(self, doc):
self.__doc = doc
self.__calls = []
def apis(self):
self.__calls.append('apis')
return self
def getRest(self, api, version):
# pylint: disable=unused-argument
self.__calls.append('getRest')
return self
def execute(self):
self.__calls.append('execute')
return self.__doc
class FakeGcpService(object):
@property
def calls(self):
return self.__calls
def __init__(self, execute_response_list):
self.__calls = []
self.__execute_response_list = list(execute_response_list)
self.__execute_response_list.reverse()
self.my_test = self._my_test # needs to be a variable
self.last_list_args = None
self.last_get_args = None
def _my_test(self):
self.__calls.append('my_test')
return self
def get(self, **kwargs):
self.__calls.append('get({0})'.format(kwargs))
self.last_get_args = dict(**kwargs)
return self
def list(self, **kwargs):
self.__calls.append('list({0})'.format(kwargs))
self.last_list_args = dict(**kwargs)
return self
def execute(self):
self.__calls.append('execute')
result = self.__execute_response_list.pop()
if isinstance(result, Exception):
raise result
return result
def list_next(self, request, response):
# pylint: disable=unused-argument
self.__calls.append('list_next')
return request if self.__execute_response_list else None | 0.493164 | 0.127544 |
from tinydb import Query # TinyDB is a lightweight document oriented database
from t_system.db_fetching import DBFetcher
from t_system.motion.action import Position
from t_system.administration import is_admin
from t_system import dot_t_system_dir, T_SYSTEM_PATH
from t_system import mission_manager, emotion_manager
from t_system import log_manager
logger = log_manager.get_logger(__name__, "DEBUG")
def create_position(admin_id, root, db_name, data):
"""Method to create new position.
Args:
admin_id (str): Admin privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
data (dict): Position data structure.
"""
try:
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
position = Position(name=data['name'], cartesian_coords=data['cartesian_coords'], polar_params=data['polar_params'], root=root, db_name=db_name)
position_id = position.id
deterfresh_manager(root, db_name)
result = True
except Exception:
result = False
position_id = None
return result, position_id
def get_positions(admin_id, root, db_name):
"""Method to return existing positions.
Args:
admin_id (str): Root privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
"""
try:
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
table = get_db_table(root, db_name)
result = table.all() # result = positions
except Exception as e:
logger.error(e)
result = []
return result
def get_position(admin_id, root, db_name, position_id):
"""Method to return existing position with given id.
Args:
admin_id (str): Root privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
position_id (str): The id of the position.
"""
try:
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
table = get_db_table(root, db_name)
position = table.search((Query().id == position_id))
if not position:
result = []
else:
result = [position[0]]
except Exception as e:
logger.error(e)
result = []
return result
def update_position(admin_id, root, db_name, position_id, data):
"""Method to update the position that is recorded in database with given parameters.
Args:
admin_id (str): Root privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
position_id (str): The id of the position.
data (dict): Position data structure.
"""
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
table = get_db_table(root, db_name)
position = table.search((Query().id == position_id))
if not position:
result = False
else:
try:
table.update({'name': data['name'], 'cartesian_coords': data['cartesian_coords'], 'polar_coords': data['polar_coords']}, Query().id == position_id)
deterfresh_manager(root, db_name)
result = True
except Exception:
result = False
return result
def delete_position(admin_id, root, db_name, position_id):
"""Method to remove existing position with given id.
Args:
admin_id (str): Root privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
position_id (str): The id of the position.
"""
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
table = get_db_table(root, db_name)
if table.search((Query().id == position_id)):
table.remove((Query().id == position_id))
deterfresh_manager(root, db_name)
result = True
else:
result = False
return result
def get_db_table(root, db_name):
"""Method to set work database by root.
Args:
root (bool): Root privileges flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
"""
table = "positions"
if root:
db_folder = f'{T_SYSTEM_PATH}/motion/action'
return DBFetcher(db_folder, db_name, table).fetch()
else:
db_folder = dot_t_system_dir
db_name = 'missions'
return DBFetcher(db_folder, db_name, table).fetch()
def deterfresh_manager(root, db_name):
"""Method to determine the manager that is mission or emotion manager and refresh it with using given database name and administration flag.
Args:
root (bool): Root privileges flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
"""
if root:
if db_name in ["predicted_missions", "missions"]:
mission_manager.refresh_memebers()
elif db_name == "emotions":
emotion_manager.refresh_members()
else:
mission_manager.refresh_members() | t_system/remote_ui/modules/position.py | from tinydb import Query # TinyDB is a lightweight document oriented database
from t_system.db_fetching import DBFetcher
from t_system.motion.action import Position
from t_system.administration import is_admin
from t_system import dot_t_system_dir, T_SYSTEM_PATH
from t_system import mission_manager, emotion_manager
from t_system import log_manager
logger = log_manager.get_logger(__name__, "DEBUG")
def create_position(admin_id, root, db_name, data):
"""Method to create new position.
Args:
admin_id (str): Admin privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
data (dict): Position data structure.
"""
try:
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
position = Position(name=data['name'], cartesian_coords=data['cartesian_coords'], polar_params=data['polar_params'], root=root, db_name=db_name)
position_id = position.id
deterfresh_manager(root, db_name)
result = True
except Exception:
result = False
position_id = None
return result, position_id
def get_positions(admin_id, root, db_name):
"""Method to return existing positions.
Args:
admin_id (str): Root privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
"""
try:
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
table = get_db_table(root, db_name)
result = table.all() # result = positions
except Exception as e:
logger.error(e)
result = []
return result
def get_position(admin_id, root, db_name, position_id):
"""Method to return existing position with given id.
Args:
admin_id (str): Root privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
position_id (str): The id of the position.
"""
try:
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
table = get_db_table(root, db_name)
position = table.search((Query().id == position_id))
if not position:
result = []
else:
result = [position[0]]
except Exception as e:
logger.error(e)
result = []
return result
def update_position(admin_id, root, db_name, position_id, data):
"""Method to update the position that is recorded in database with given parameters.
Args:
admin_id (str): Root privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
position_id (str): The id of the position.
data (dict): Position data structure.
"""
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
table = get_db_table(root, db_name)
position = table.search((Query().id == position_id))
if not position:
result = False
else:
try:
table.update({'name': data['name'], 'cartesian_coords': data['cartesian_coords'], 'polar_coords': data['polar_coords']}, Query().id == position_id)
deterfresh_manager(root, db_name)
result = True
except Exception:
result = False
return result
def delete_position(admin_id, root, db_name, position_id):
"""Method to remove existing position with given id.
Args:
admin_id (str): Root privileges flag.
root (str): Root privileges activation flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
position_id (str): The id of the position.
"""
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
table = get_db_table(root, db_name)
if table.search((Query().id == position_id)):
table.remove((Query().id == position_id))
deterfresh_manager(root, db_name)
result = True
else:
result = False
return result
def get_db_table(root, db_name):
"""Method to set work database by root.
Args:
root (bool): Root privileges flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
"""
table = "positions"
if root:
db_folder = f'{T_SYSTEM_PATH}/motion/action'
return DBFetcher(db_folder, db_name, table).fetch()
else:
db_folder = dot_t_system_dir
db_name = 'missions'
return DBFetcher(db_folder, db_name, table).fetch()
def deterfresh_manager(root, db_name):
"""Method to determine the manager that is mission or emotion manager and refresh it with using given database name and administration flag.
Args:
root (bool): Root privileges flag.
db_name (str): Name of the registered Database. It uses if administration privileges activated.
"""
if root:
if db_name in ["predicted_missions", "missions"]:
mission_manager.refresh_memebers()
elif db_name == "emotions":
emotion_manager.refresh_members()
else:
mission_manager.refresh_members() | 0.676299 | 0.157169 |
import os
from collections import defaultdict
import numpy as np
from anago.models import SeqLabeling
from anago.data.metrics import get_entities
class Tagger(object):
def __init__(self,
config,
weights,
save_path='',
preprocessor=None,
tokenizer=str.split):
self.preprocessor = preprocessor
self.tokenizer = tokenizer
# Build the model
self.model = SeqLabeling(config, ntags=len(self.preprocessor.vocab_tag))
self.model.load(filepath=os.path.join(save_path, weights))
def predict(self, words):
sequence_lengths = [len(words)]
X = self.preprocessor.transform([words])
pred = self.model.predict(X, sequence_lengths)
pred = np.argmax(pred, -1)
pred = self.preprocessor.inverse_transform(pred[0])
return pred
def tag(self, sent):
"""Tags a sentence named entities.
Args:
sent: a sentence
Return:
labels_pred: list of (word, tag) for a sentence
Example:
>>> sent = '<NAME> is speaking at the White House.'
>>> print(self.tag(sent))
[('President', 'O'), ('Obama', 'PERSON'), ('is', 'O'),
('speaking', 'O'), ('at', 'O'), ('the', 'O'),
('White', 'LOCATION'), ('House', 'LOCATION'), ('.', 'O')]
"""
assert isinstance(sent, str)
words = self.tokenizer(sent)
pred = self.predict(words)
pred = [t.split('-')[-1] for t in pred] # remove prefix: e.g. B-Person -> Person
return list(zip(words, pred))
def get_entities(self, sent):
"""Gets entities from a sentence.
Args:
sent: a sentence
Return:
labels_pred: dict of entities for a sentence
Example:
sent = '<NAME> is speaking at the White House.'
result = {'Person': ['Obama'], 'LOCATION': ['White House']}
"""
assert isinstance(sent, str)
words = self.tokenizer(sent)
pred = self.predict(words)
entities = self._get_chunks(words, pred)
return entities
def _get_chunks(self, words, tags):
"""
Args:
words: sequence of word
tags: sequence of labels
Returns:
dict of entities for a sequence
Example:
words = ['President', 'Obama', 'is', 'speaking', 'at', 'the', 'White', 'House', '.']
tags = ['O', 'B-Person', 'O', 'O', 'O', 'O', 'B-Location', 'I-Location', 'O']
result = {'Person': ['Obama'], 'LOCATION': ['White House']}
"""
chunks = get_entities(tags)
res = defaultdict(list)
for chunk_type, chunk_start, chunk_end in chunks:
res[chunk_type].append(' '.join(words[chunk_start: chunk_end])) # todo delimiter changeable
return res | anago/tagger.py | import os
from collections import defaultdict
import numpy as np
from anago.models import SeqLabeling
from anago.data.metrics import get_entities
class Tagger(object):
def __init__(self,
config,
weights,
save_path='',
preprocessor=None,
tokenizer=str.split):
self.preprocessor = preprocessor
self.tokenizer = tokenizer
# Build the model
self.model = SeqLabeling(config, ntags=len(self.preprocessor.vocab_tag))
self.model.load(filepath=os.path.join(save_path, weights))
def predict(self, words):
sequence_lengths = [len(words)]
X = self.preprocessor.transform([words])
pred = self.model.predict(X, sequence_lengths)
pred = np.argmax(pred, -1)
pred = self.preprocessor.inverse_transform(pred[0])
return pred
def tag(self, sent):
"""Tags a sentence named entities.
Args:
sent: a sentence
Return:
labels_pred: list of (word, tag) for a sentence
Example:
>>> sent = '<NAME> is speaking at the White House.'
>>> print(self.tag(sent))
[('President', 'O'), ('Obama', 'PERSON'), ('is', 'O'),
('speaking', 'O'), ('at', 'O'), ('the', 'O'),
('White', 'LOCATION'), ('House', 'LOCATION'), ('.', 'O')]
"""
assert isinstance(sent, str)
words = self.tokenizer(sent)
pred = self.predict(words)
pred = [t.split('-')[-1] for t in pred] # remove prefix: e.g. B-Person -> Person
return list(zip(words, pred))
def get_entities(self, sent):
"""Gets entities from a sentence.
Args:
sent: a sentence
Return:
labels_pred: dict of entities for a sentence
Example:
sent = '<NAME> is speaking at the White House.'
result = {'Person': ['Obama'], 'LOCATION': ['White House']}
"""
assert isinstance(sent, str)
words = self.tokenizer(sent)
pred = self.predict(words)
entities = self._get_chunks(words, pred)
return entities
def _get_chunks(self, words, tags):
"""
Args:
words: sequence of word
tags: sequence of labels
Returns:
dict of entities for a sequence
Example:
words = ['President', 'Obama', 'is', 'speaking', 'at', 'the', 'White', 'House', '.']
tags = ['O', 'B-Person', 'O', 'O', 'O', 'O', 'B-Location', 'I-Location', 'O']
result = {'Person': ['Obama'], 'LOCATION': ['White House']}
"""
chunks = get_entities(tags)
res = defaultdict(list)
for chunk_type, chunk_start, chunk_end in chunks:
res[chunk_type].append(' '.join(words[chunk_start: chunk_end])) # todo delimiter changeable
return res | 0.682891 | 0.313564 |
from utils_plus.router import url
from utils_plus.views import return_path_view as view
str_iter = lambda x: list(map(str, x))
def test_nesting_levels():
urls = list(
url("home")[
url("p1", view, "report1"),
url("p2", view, "report2"),
url("level1", view, "sub1")[
url("p1", view, "level1"),
url("p2", view, "level2"),
url("level2", view, "sub2")[url("p1", view, "lp1"), url("p2", view, "lp2")],
],
url("p3", view, "report3"),
]
)
assert str_iter(urls) == [
"<URLPattern 'home/p1/' [name='report1']>",
"<URLPattern 'home/p2/' [name='report2']>",
"<URLPattern 'home/level1/' [name='sub1']>",
"<URLPattern 'home/level1/p1/' [name='level1']>",
"<URLPattern 'home/level1/p2/' [name='level2']>",
"<URLPattern 'home/level1/level2/' [name='sub2']>",
"<URLPattern 'home/level1/level2/p1/' [name='lp1']>",
"<URLPattern 'home/level1/level2/p2/' [name='lp2']>",
"<URLPattern 'home/p3/' [name='report3']>",
]
assert len(urls) == 9
def test_variable_regex():
g = url("home")[
url.pk(view, "pk"),
url.int("int_var", view, "int"),
url.var("str_var", view, "str"),
url.re("reg_x", r"[x]+", view, "regex"),
]
assert str_iter(g) == [
"<URLPattern 'home/<int:pk>/' [name='pk']>",
"<URLPattern 'home/<int:int_var>/' [name='int']>",
"<URLPattern 'home/<str_var>/' [name='str']>",
"<URLPattern 'home/(?P<reg_x>[x]+)/' [name='regex']>",
]
def test_same_level_urls():
g = url("home", view)[
url.pk(view)
] + url("about", view)[
url.pk(view)[
url("pdf", view)
]
] + url("contact", view)[
url.pk(view)
]
assert str_iter(g) == ["<URLPattern 'home/'>",
"<URLPattern 'home/<int:pk>/'>",
"<URLPattern 'about/'>",
"<URLPattern 'about/<int:pk>/'>",
"<URLPattern 'about/<int:pk>/pdf/'>",
"<URLPattern 'contact/'>",
"<URLPattern 'contact/<int:pk>/'>"]
def test_paths_without_views():
g = url("home")[
url.pk()[url("edit", view)],
url.int("integer")[url("edit", view)],
url.var("variable")[url("edit", view)],
url.re("regex", r"\.+")[url("edit", view)],
url("home2")[url.pk(view, "pk")],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern 'home/<int:pk>/edit/'>",
"<URLPattern 'home/<int:integer>/edit/'>",
"<URLPattern 'home/<variable>/edit/'>",
"<URLPattern 'home/(?P<regex>\\.+)/edit/'>",
"<URLPattern 'home/home2/<int:pk>/' [name='pk']>",
"<URLPattern 'home/home3/'>",
]
def test_include_patterns():
# app1 urls
app2 = url("app2/")[url("post")[url.pk(view)[url("edit", view)]]]
# app1 urls
app1 = url("app1/")[url("post")[url.pk(view)[url("edit", view)]]]
# root url definition
app = url("app/")[app1, app2]
assert str_iter(app) == [
"<URLPattern 'app/app1/post/<int:pk>/'>",
"<URLPattern 'app/app1/post/<int:pk>/edit/'>",
"<URLPattern 'app/app2/post/<int:pk>/'>",
"<URLPattern 'app/app2/post/<int:pk>/edit/'>",
]
def test_paths_that_starts_with_a_blank_root():
g = url("", view)[
url("home")[url.pk()[url("edit", view)], url.int("integer")[url("edit", view)]],
url("home2")[url.pk(view, "pk")],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern ''>",
"<URLPattern 'home/<int:pk>/edit/'>",
"<URLPattern 'home/<int:integer>/edit/'>",
"<URLPattern 'home2/<int:pk>/' [name='pk']>",
"<URLPattern 'home3/'>"
]
def test_multi_part_single_entry():
g = url("nest1/nest2", view)[
url("home/coming")[
url.pk()[
url("edit", view)
],
url.int("integer")[
url("edit", view),
],
],
url("first/nest3")[
url.pk(view, "pk")
],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern 'nest1/nest2/'>",
"<URLPattern 'nest1/nest2/home/coming/<int:pk>/edit/'>",
"<URLPattern 'nest1/nest2/home/coming/<int:integer>/edit/'>",
"<URLPattern 'nest1/nest2/first/nest3/<int:pk>/' [name='pk']>",
"<URLPattern 'nest1/nest2/home3/'>"
] | tests/test_router.py | from utils_plus.router import url
from utils_plus.views import return_path_view as view
str_iter = lambda x: list(map(str, x))
def test_nesting_levels():
urls = list(
url("home")[
url("p1", view, "report1"),
url("p2", view, "report2"),
url("level1", view, "sub1")[
url("p1", view, "level1"),
url("p2", view, "level2"),
url("level2", view, "sub2")[url("p1", view, "lp1"), url("p2", view, "lp2")],
],
url("p3", view, "report3"),
]
)
assert str_iter(urls) == [
"<URLPattern 'home/p1/' [name='report1']>",
"<URLPattern 'home/p2/' [name='report2']>",
"<URLPattern 'home/level1/' [name='sub1']>",
"<URLPattern 'home/level1/p1/' [name='level1']>",
"<URLPattern 'home/level1/p2/' [name='level2']>",
"<URLPattern 'home/level1/level2/' [name='sub2']>",
"<URLPattern 'home/level1/level2/p1/' [name='lp1']>",
"<URLPattern 'home/level1/level2/p2/' [name='lp2']>",
"<URLPattern 'home/p3/' [name='report3']>",
]
assert len(urls) == 9
def test_variable_regex():
g = url("home")[
url.pk(view, "pk"),
url.int("int_var", view, "int"),
url.var("str_var", view, "str"),
url.re("reg_x", r"[x]+", view, "regex"),
]
assert str_iter(g) == [
"<URLPattern 'home/<int:pk>/' [name='pk']>",
"<URLPattern 'home/<int:int_var>/' [name='int']>",
"<URLPattern 'home/<str_var>/' [name='str']>",
"<URLPattern 'home/(?P<reg_x>[x]+)/' [name='regex']>",
]
def test_same_level_urls():
g = url("home", view)[
url.pk(view)
] + url("about", view)[
url.pk(view)[
url("pdf", view)
]
] + url("contact", view)[
url.pk(view)
]
assert str_iter(g) == ["<URLPattern 'home/'>",
"<URLPattern 'home/<int:pk>/'>",
"<URLPattern 'about/'>",
"<URLPattern 'about/<int:pk>/'>",
"<URLPattern 'about/<int:pk>/pdf/'>",
"<URLPattern 'contact/'>",
"<URLPattern 'contact/<int:pk>/'>"]
def test_paths_without_views():
g = url("home")[
url.pk()[url("edit", view)],
url.int("integer")[url("edit", view)],
url.var("variable")[url("edit", view)],
url.re("regex", r"\.+")[url("edit", view)],
url("home2")[url.pk(view, "pk")],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern 'home/<int:pk>/edit/'>",
"<URLPattern 'home/<int:integer>/edit/'>",
"<URLPattern 'home/<variable>/edit/'>",
"<URLPattern 'home/(?P<regex>\\.+)/edit/'>",
"<URLPattern 'home/home2/<int:pk>/' [name='pk']>",
"<URLPattern 'home/home3/'>",
]
def test_include_patterns():
# app1 urls
app2 = url("app2/")[url("post")[url.pk(view)[url("edit", view)]]]
# app1 urls
app1 = url("app1/")[url("post")[url.pk(view)[url("edit", view)]]]
# root url definition
app = url("app/")[app1, app2]
assert str_iter(app) == [
"<URLPattern 'app/app1/post/<int:pk>/'>",
"<URLPattern 'app/app1/post/<int:pk>/edit/'>",
"<URLPattern 'app/app2/post/<int:pk>/'>",
"<URLPattern 'app/app2/post/<int:pk>/edit/'>",
]
def test_paths_that_starts_with_a_blank_root():
g = url("", view)[
url("home")[url.pk()[url("edit", view)], url.int("integer")[url("edit", view)]],
url("home2")[url.pk(view, "pk")],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern ''>",
"<URLPattern 'home/<int:pk>/edit/'>",
"<URLPattern 'home/<int:integer>/edit/'>",
"<URLPattern 'home2/<int:pk>/' [name='pk']>",
"<URLPattern 'home3/'>"
]
def test_multi_part_single_entry():
g = url("nest1/nest2", view)[
url("home/coming")[
url.pk()[
url("edit", view)
],
url.int("integer")[
url("edit", view),
],
],
url("first/nest3")[
url.pk(view, "pk")
],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern 'nest1/nest2/'>",
"<URLPattern 'nest1/nest2/home/coming/<int:pk>/edit/'>",
"<URLPattern 'nest1/nest2/home/coming/<int:integer>/edit/'>",
"<URLPattern 'nest1/nest2/first/nest3/<int:pk>/' [name='pk']>",
"<URLPattern 'nest1/nest2/home3/'>"
] | 0.377655 | 0.246477 |
import mintapi.api
import mintapi.cli
import mintapi.signIn
import json
import unittest
import requests
import tempfile
from mintapi import constants
from unittest.mock import patch, DEFAULT
accounts_example = {
"Account": [
{
"type": "CreditAccount",
"userCardType": "UNKNOWN",
"creditAccountType": "CREDIT_CARD",
"creditLimit": 2222.0,
"availableCredit": 1111.0,
"interestRate": 0.444,
"minPayment": 111.0,
"absoluteMinPayment": 111.0,
"statementMinPayment": 22.0,
"statementDueDate": "2022-04-19T07:00:00Z",
"statementDueAmount": 0.0,
"metaData": {
"createdDate": "2017-01-05T17:12:15Z",
"lastUpdatedDate": "2022-03-27T16:46:41Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/accounts/id",
"rel": "self",
}
],
},
"id": "id",
"name": "name",
"value": -555.55,
"isVisible": True,
"isDeleted": False,
"planningTrendsVisible": True,
"accountStatus": "ACTIVE",
"systemStatus": "ACTIVE",
"currency": "USD",
"fiLoginId": "fiLoginId",
"fiLoginStatus": "OK",
"currentBalance": 555.55,
"cpId": "cpId",
"cpAccountName": "cpAccountName",
"cpAccountNumberLast4": "cpAccountNumberLast4",
"hostAccount": False,
"fiName": "fiName",
"accountTypeInt": 0,
"isAccountClosedByMint": False,
"isAccountNotFound": False,
"isActive": True,
"isClosed": False,
"isError": False,
"isHiddenFromPlanningTrends": True,
"isTerminal": True,
"credentialSetId": "credentialSetId",
"ccAggrStatus": "0",
}
]
}
category_example = [
{
"type": "Category",
"name": "Entertainment",
"depth": 1,
"categoryType": "EXPENSE",
"isBusiness": "false",
"isCustom": "false",
"isUnassignable": "false",
"isUnbudgetable": "false",
"isUntrendable": "false",
"isIgnored": "false",
"isEditable": "false",
"isDeleted": "false",
"discretionaryType": "DISCRETIONARY",
"metaData": {
"lastUpdatedDate": "2020-11-18T07:31:47Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/categories/10740790_1",
"rel": "self",
}
],
},
"id": "10740790_14",
},
{
"type": "Category",
"name": "Auto Insurance",
"depth": 2,
"categoryType": "EXPENSE",
"parentId": "10740790_14",
"isBusiness": False,
"isCustom": False,
"isUnassignable": False,
"isUnbudgetable": False,
"isUntrendable": False,
"isIgnored": False,
"isEditable": False,
"isDeleted": False,
"discretionaryType": "NON_DISCRETIONARY",
"metaData": {
"lastUpdatedDate": "2020-11-18T07:31:47Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/categories/10740790_1405",
"rel": "self",
}
],
},
"id": "10740790_1405",
},
]
transactions_example = {
"Transaction": [
{
"type": "CashAndCreditTransaction",
"metaData": {
"lastUpdatedDate": "2022-03-25T00:11:08Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/transactions/id",
"rel": "self",
}
],
},
"id": "id",
"accountId": "accountId",
"accountRef": {
"id": "id",
"name": "name",
"type": "BankAccount",
"hiddenFromPlanningAndTrends": False,
},
"date": "2022-03-24",
"description": "description",
"category": {
"id": "id",
"name": "Income",
"categoryType": "INCOME",
"parentId": "parentId",
"parentName": "Root",
},
"amount": 420.0,
"status": "MANUAL",
"matchState": "NOT_MATCHED",
"fiData": {
"id": "id",
"date": "2022-03-24",
"amount": 420.0,
"description": "description",
"inferredDescription": "inferredDescription",
"inferredCategory": {"id": "id", "name": "name"},
},
"etag": "etag",
"isExpense": False,
"isPending": False,
"discretionaryType": "NONE",
"isLinkedToRule": False,
"transactionReviewState": "NOT_APPLICABLE",
},
]
}
investments_example = {
"Investment": [
{
"accountId": "1",
"cpSrcElementId": "2",
"description": "TEST",
"cpAssetClass": "UNKNOWN",
"holdingType": "UNKNOWN",
"initialTotalCost": 0.0,
"inceptionDate": "2011-01-03T07:00:00Z",
"initialQuantity": 0.0,
"currentQuantity": 0.0,
"currentPrice": 10.0,
"currentValue": 1414.12,
"averagePricePaid": 0.0,
"id": "3",
"metaData": {
"lastUpdatedDate": "2011-11-03T07:00:00Z",
"link": [{"id": "4", "description": "METADATA TEST"}],
},
}
]
}
budgets_example = {
"Budget": [
{
"type": "MonthlyBudget",
"budgetAdjustmentAmount": -75.00,
"rollover": "true",
"reset": "false",
"rolloverResetAmount": 0.0,
"metaData": {
"createdDate": "2022-03-01T08:00:00Z",
"lastUpdatedDate": "2022-02-28T08:32:50Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/budgets/10740790_2123123684",
"rel": "self",
}
],
},
"id": "10740790_2123123684",
"budgetDate": "2022-03-01",
"amount": 75.00,
"budgetAmount": 50.0,
"category": {
"id": "10740790_11235",
"name": "Auto Insurance",
"categoryType": "EXPENSE",
"parentId": "14",
"parentName": "Auto & Transport",
},
"subsumed": "false",
"performanceStatus": "OVERBUDGET",
},
]
}
class Attribute:
text = json.dumps({"response": {"42": {"response": accounts_example}}})
class Element:
@staticmethod
def get_attribute(test):
return json.dumps({"token": "123"})
class TestMock:
@staticmethod
def find_element_by_name(test):
return Element()
@staticmethod
def request(a, b, **c):
return Attribute()
class MintApiTests(unittest.TestCase):
def test_chrome_driver_links(self):
latest_version = mintapi.signIn.get_latest_chrome_driver_version()
for platform in mintapi.signIn.CHROME_ZIP_TYPES:
request = requests.get(
mintapi.signIn.get_chrome_driver_url(latest_version, platform)
)
self.assertEqual(request.status_code, 200)
@patch.object(mintapi.api, "_create_web_driver_at_mint_com")
@patch.object(mintapi.api, "logger")
@patch.object(mintapi.api, "sign_in")
def test_when_sign_in_fails_then_logs_exception(
self, mock_sign_in, mock_logger, *_
):
test_exception = Exception()
mock_sign_in.side_effect = test_exception
with self.assertRaises(Exception) as context:
mintapi.Mint("test", "test")
mock_logger.exception.assert_called_with(test_exception)
self.assertTrue("Could not sign in to Mint" in str(context.exception))
@patch.multiple(
mintapi.Mint,
_get_api_key_header=DEFAULT,
_load_mint_credit_url=DEFAULT,
_get_credit_reports=DEFAULT,
get_credit_accounts=DEFAULT,
get_credit_inquiries=DEFAULT,
get_credit_utilization=DEFAULT,
)
def test_exclude_credit_details(self, **_):
mint = mintapi.Mint()
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_inquiries=True
)
self.assertFalse("inquiries" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_inquiries=False
)
self.assertTrue("inquiries" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_accounts=True
)
self.assertFalse("accounts" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_accounts=False
)
self.assertTrue("accounts" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_utilization=True
)
self.assertFalse("utilization" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_utilization=False
)
self.assertTrue("utilization" in credit_report)
def test_config_file(self):
# verify parsing from config file
config_file = write_transactions_file()
arguments = parse_arguments_file(config_file)
self.assertEqual(arguments.transactions, True)
config_file.close()
@patch.object(mintapi.Mint, "_Mint__call_mint_endpoint")
def test_get_account_data(self, mock_call_accounts_endpoint):
mock_call_accounts_endpoint.return_value = accounts_example
account_data = mintapi.Mint().get_account_data()[0]
self.assertFalse("metaData" in account_data)
self.assertTrue("createdDate" in account_data)
self.assertTrue("lastUpdatedDate" in account_data)
@patch.object(mintapi.Mint, "_Mint__call_mint_endpoint")
def test_get_transaction_data(self, mock_call_transactions_endpoint):
mock_call_transactions_endpoint.return_value = transactions_example
transaction_data = mintapi.Mint().get_transaction_data()[0]
self.assertFalse("metaData" in transaction_data)
self.assertFalse("createdDate" in transaction_data)
self.assertTrue("lastUpdatedDate" in transaction_data)
self.assertTrue("parentId" in transaction_data["category"])
self.assertTrue("parentName" in transaction_data["category"])
@patch.object(mintapi.Mint, "_Mint__call_mint_endpoint")
def test_get_investment_data(self, mock_call_investments_endpoint):
mock_call_investments_endpoint.return_value = investments_example
investment_data = mintapi.Mint().get_investment_data()[0]
self.assertFalse("metaData" in investment_data)
self.assertFalse("createdDate" in investment_data)
self.assertTrue("lastUpdatedDate" in investment_data)
@patch.object(mintapi.Mint, "_Mint__call_mint_endpoint")
def test_get_budgets(self, mock_call_budgets_endpoint):
mock_call_budgets_endpoint.return_value = budgets_example
budgets = mintapi.Mint().get_budget_data()[0]
self.assertFalse("metaData" in budgets)
self.assertTrue("createdDate" in budgets)
self.assertTrue("lastUpdatedDate" in budgets)
def test_format_filename(self):
config_file = write_transactions_file()
arguments = parse_arguments_file(config_file)
type = constants.TRANSACTION_KEY.lower()
filename = mintapi.cli.format_filename(arguments, type)
self.assertEqual(filename, "current_{}.csv".format(type))
config_file = write_accounts_file()
arguments = parse_arguments_file(config_file)
type = constants.ACCOUNT_KEY.lower()
filename = mintapi.cli.format_filename(arguments, type)
self.assertEqual(filename, "current_{}.json".format(type))
config_file = write_investments_file()
arguments = parse_arguments_file(config_file)
filename = mintapi.cli.format_filename(arguments, None)
self.assertEqual(filename, None)
def write_transactions_file():
config_file = tempfile.NamedTemporaryFile(mode="wt")
config_file.write("transactions\nformat=csv\nfilename=current")
return config_file
def write_accounts_file():
config_file = tempfile.NamedTemporaryFile(mode="wt")
config_file.write("accounts\nformat=json\nfilename=current")
return config_file
def write_investments_file():
config_file = tempfile.NamedTemporaryFile(mode="wt")
config_file.write("investments")
return config_file
def parse_arguments_file(config_file):
config_file.flush()
return mintapi.cli.parse_arguments(["-c", config_file.name])
if __name__ == "__main__":
unittest.main() | tests/test_driver.py | import mintapi.api
import mintapi.cli
import mintapi.signIn
import json
import unittest
import requests
import tempfile
from mintapi import constants
from unittest.mock import patch, DEFAULT
accounts_example = {
"Account": [
{
"type": "CreditAccount",
"userCardType": "UNKNOWN",
"creditAccountType": "CREDIT_CARD",
"creditLimit": 2222.0,
"availableCredit": 1111.0,
"interestRate": 0.444,
"minPayment": 111.0,
"absoluteMinPayment": 111.0,
"statementMinPayment": 22.0,
"statementDueDate": "2022-04-19T07:00:00Z",
"statementDueAmount": 0.0,
"metaData": {
"createdDate": "2017-01-05T17:12:15Z",
"lastUpdatedDate": "2022-03-27T16:46:41Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/accounts/id",
"rel": "self",
}
],
},
"id": "id",
"name": "name",
"value": -555.55,
"isVisible": True,
"isDeleted": False,
"planningTrendsVisible": True,
"accountStatus": "ACTIVE",
"systemStatus": "ACTIVE",
"currency": "USD",
"fiLoginId": "fiLoginId",
"fiLoginStatus": "OK",
"currentBalance": 555.55,
"cpId": "cpId",
"cpAccountName": "cpAccountName",
"cpAccountNumberLast4": "cpAccountNumberLast4",
"hostAccount": False,
"fiName": "fiName",
"accountTypeInt": 0,
"isAccountClosedByMint": False,
"isAccountNotFound": False,
"isActive": True,
"isClosed": False,
"isError": False,
"isHiddenFromPlanningTrends": True,
"isTerminal": True,
"credentialSetId": "credentialSetId",
"ccAggrStatus": "0",
}
]
}
category_example = [
{
"type": "Category",
"name": "Entertainment",
"depth": 1,
"categoryType": "EXPENSE",
"isBusiness": "false",
"isCustom": "false",
"isUnassignable": "false",
"isUnbudgetable": "false",
"isUntrendable": "false",
"isIgnored": "false",
"isEditable": "false",
"isDeleted": "false",
"discretionaryType": "DISCRETIONARY",
"metaData": {
"lastUpdatedDate": "2020-11-18T07:31:47Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/categories/10740790_1",
"rel": "self",
}
],
},
"id": "10740790_14",
},
{
"type": "Category",
"name": "Auto Insurance",
"depth": 2,
"categoryType": "EXPENSE",
"parentId": "10740790_14",
"isBusiness": False,
"isCustom": False,
"isUnassignable": False,
"isUnbudgetable": False,
"isUntrendable": False,
"isIgnored": False,
"isEditable": False,
"isDeleted": False,
"discretionaryType": "NON_DISCRETIONARY",
"metaData": {
"lastUpdatedDate": "2020-11-18T07:31:47Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/categories/10740790_1405",
"rel": "self",
}
],
},
"id": "10740790_1405",
},
]
transactions_example = {
"Transaction": [
{
"type": "CashAndCreditTransaction",
"metaData": {
"lastUpdatedDate": "2022-03-25T00:11:08Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/transactions/id",
"rel": "self",
}
],
},
"id": "id",
"accountId": "accountId",
"accountRef": {
"id": "id",
"name": "name",
"type": "BankAccount",
"hiddenFromPlanningAndTrends": False,
},
"date": "2022-03-24",
"description": "description",
"category": {
"id": "id",
"name": "Income",
"categoryType": "INCOME",
"parentId": "parentId",
"parentName": "Root",
},
"amount": 420.0,
"status": "MANUAL",
"matchState": "NOT_MATCHED",
"fiData": {
"id": "id",
"date": "2022-03-24",
"amount": 420.0,
"description": "description",
"inferredDescription": "inferredDescription",
"inferredCategory": {"id": "id", "name": "name"},
},
"etag": "etag",
"isExpense": False,
"isPending": False,
"discretionaryType": "NONE",
"isLinkedToRule": False,
"transactionReviewState": "NOT_APPLICABLE",
},
]
}
investments_example = {
"Investment": [
{
"accountId": "1",
"cpSrcElementId": "2",
"description": "TEST",
"cpAssetClass": "UNKNOWN",
"holdingType": "UNKNOWN",
"initialTotalCost": 0.0,
"inceptionDate": "2011-01-03T07:00:00Z",
"initialQuantity": 0.0,
"currentQuantity": 0.0,
"currentPrice": 10.0,
"currentValue": 1414.12,
"averagePricePaid": 0.0,
"id": "3",
"metaData": {
"lastUpdatedDate": "2011-11-03T07:00:00Z",
"link": [{"id": "4", "description": "METADATA TEST"}],
},
}
]
}
budgets_example = {
"Budget": [
{
"type": "MonthlyBudget",
"budgetAdjustmentAmount": -75.00,
"rollover": "true",
"reset": "false",
"rolloverResetAmount": 0.0,
"metaData": {
"createdDate": "2022-03-01T08:00:00Z",
"lastUpdatedDate": "2022-02-28T08:32:50Z",
"link": [
{
"otherAttributes": {},
"href": "/v1/budgets/10740790_2123123684",
"rel": "self",
}
],
},
"id": "10740790_2123123684",
"budgetDate": "2022-03-01",
"amount": 75.00,
"budgetAmount": 50.0,
"category": {
"id": "10740790_11235",
"name": "Auto Insurance",
"categoryType": "EXPENSE",
"parentId": "14",
"parentName": "Auto & Transport",
},
"subsumed": "false",
"performanceStatus": "OVERBUDGET",
},
]
}
class Attribute:
text = json.dumps({"response": {"42": {"response": accounts_example}}})
class Element:
@staticmethod
def get_attribute(test):
return json.dumps({"token": "123"})
class TestMock:
@staticmethod
def find_element_by_name(test):
return Element()
@staticmethod
def request(a, b, **c):
return Attribute()
class MintApiTests(unittest.TestCase):
def test_chrome_driver_links(self):
latest_version = mintapi.signIn.get_latest_chrome_driver_version()
for platform in mintapi.signIn.CHROME_ZIP_TYPES:
request = requests.get(
mintapi.signIn.get_chrome_driver_url(latest_version, platform)
)
self.assertEqual(request.status_code, 200)
@patch.object(mintapi.api, "_create_web_driver_at_mint_com")
@patch.object(mintapi.api, "logger")
@patch.object(mintapi.api, "sign_in")
def test_when_sign_in_fails_then_logs_exception(
self, mock_sign_in, mock_logger, *_
):
test_exception = Exception()
mock_sign_in.side_effect = test_exception
with self.assertRaises(Exception) as context:
mintapi.Mint("test", "test")
mock_logger.exception.assert_called_with(test_exception)
self.assertTrue("Could not sign in to Mint" in str(context.exception))
@patch.multiple(
mintapi.Mint,
_get_api_key_header=DEFAULT,
_load_mint_credit_url=DEFAULT,
_get_credit_reports=DEFAULT,
get_credit_accounts=DEFAULT,
get_credit_inquiries=DEFAULT,
get_credit_utilization=DEFAULT,
)
def test_exclude_credit_details(self, **_):
mint = mintapi.Mint()
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_inquiries=True
)
self.assertFalse("inquiries" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_inquiries=False
)
self.assertTrue("inquiries" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_accounts=True
)
self.assertFalse("accounts" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_accounts=False
)
self.assertTrue("accounts" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_utilization=True
)
self.assertFalse("utilization" in credit_report)
credit_report = mint.get_credit_report_data(
limit=2, details=True, exclude_utilization=False
)
self.assertTrue("utilization" in credit_report)
def test_config_file(self):
# verify parsing from config file
config_file = write_transactions_file()
arguments = parse_arguments_file(config_file)
self.assertEqual(arguments.transactions, True)
config_file.close()
@patch.object(mintapi.Mint, "_Mint__call_mint_endpoint")
def test_get_account_data(self, mock_call_accounts_endpoint):
mock_call_accounts_endpoint.return_value = accounts_example
account_data = mintapi.Mint().get_account_data()[0]
self.assertFalse("metaData" in account_data)
self.assertTrue("createdDate" in account_data)
self.assertTrue("lastUpdatedDate" in account_data)
@patch.object(mintapi.Mint, "_Mint__call_mint_endpoint")
def test_get_transaction_data(self, mock_call_transactions_endpoint):
mock_call_transactions_endpoint.return_value = transactions_example
transaction_data = mintapi.Mint().get_transaction_data()[0]
self.assertFalse("metaData" in transaction_data)
self.assertFalse("createdDate" in transaction_data)
self.assertTrue("lastUpdatedDate" in transaction_data)
self.assertTrue("parentId" in transaction_data["category"])
self.assertTrue("parentName" in transaction_data["category"])
@patch.object(mintapi.Mint, "_Mint__call_mint_endpoint")
def test_get_investment_data(self, mock_call_investments_endpoint):
mock_call_investments_endpoint.return_value = investments_example
investment_data = mintapi.Mint().get_investment_data()[0]
self.assertFalse("metaData" in investment_data)
self.assertFalse("createdDate" in investment_data)
self.assertTrue("lastUpdatedDate" in investment_data)
@patch.object(mintapi.Mint, "_Mint__call_mint_endpoint")
def test_get_budgets(self, mock_call_budgets_endpoint):
mock_call_budgets_endpoint.return_value = budgets_example
budgets = mintapi.Mint().get_budget_data()[0]
self.assertFalse("metaData" in budgets)
self.assertTrue("createdDate" in budgets)
self.assertTrue("lastUpdatedDate" in budgets)
def test_format_filename(self):
config_file = write_transactions_file()
arguments = parse_arguments_file(config_file)
type = constants.TRANSACTION_KEY.lower()
filename = mintapi.cli.format_filename(arguments, type)
self.assertEqual(filename, "current_{}.csv".format(type))
config_file = write_accounts_file()
arguments = parse_arguments_file(config_file)
type = constants.ACCOUNT_KEY.lower()
filename = mintapi.cli.format_filename(arguments, type)
self.assertEqual(filename, "current_{}.json".format(type))
config_file = write_investments_file()
arguments = parse_arguments_file(config_file)
filename = mintapi.cli.format_filename(arguments, None)
self.assertEqual(filename, None)
def write_transactions_file():
config_file = tempfile.NamedTemporaryFile(mode="wt")
config_file.write("transactions\nformat=csv\nfilename=current")
return config_file
def write_accounts_file():
config_file = tempfile.NamedTemporaryFile(mode="wt")
config_file.write("accounts\nformat=json\nfilename=current")
return config_file
def write_investments_file():
config_file = tempfile.NamedTemporaryFile(mode="wt")
config_file.write("investments")
return config_file
def parse_arguments_file(config_file):
config_file.flush()
return mintapi.cli.parse_arguments(["-c", config_file.name])
if __name__ == "__main__":
unittest.main() | 0.343452 | 0.387285 |
from collections import namedtuple
from . import ast
WorldModel = namedtuple("WorldModel", ["individuals", "assignments"])
def interpret_formula(formula, model):
"""Given a logical formula and a model of the world, return the formula's denotation
in the model.
"""
if isinstance(formula, ast.Var):
return model.assignments[formula.value]
elif isinstance(formula, ast.And):
return interpret_formula(formula.left, model) and interpret_formula(
formula.right, model
)
elif isinstance(formula, ast.Or):
return interpret_formula(formula.left, model) or interpret_formula(
formula.right, model
)
elif isinstance(formula, ast.IfThen):
return not interpret_formula(formula.left, model) or interpret_formula(
formula.right, model
)
elif isinstance(formula, ast.Call):
caller = interpret_formula(formula.caller, model)
arg = interpret_formula(formula.arg, model)
return arg in caller
elif isinstance(formula, ast.ForAll):
return len(satisfiers(formula.body, model, formula.symbol)) == len(
model.individuals
)
elif isinstance(formula, ast.Exists):
return len(satisfiers(formula.body, model, formula.symbol)) > 0
elif isinstance(formula, ast.Not):
return not interpret_formula(formula.operand, model)
elif isinstance(formula, ast.Iota):
sset = satisfiers(formula.body, model, formula.symbol)
if len(sset) == 1:
return sset.pop()
else:
return None
else:
# TODO: Handle LambdaNodes differently (they can't be interpreted, but they
# should give a better error message).
raise NotImplementedError(formula.__class__)
def satisfiers(formula, model, variable):
individuals = set()
old_value = model.assignments.get(variable)
for individual in model.individuals:
model.assignments[variable] = individual
if interpret_formula(formula, model):
individuals.add(individual)
if old_value is None:
del model.assignments[variable]
else:
model.assignments[variable] = old_value
return individuals | montague/interpreter.py | from collections import namedtuple
from . import ast
WorldModel = namedtuple("WorldModel", ["individuals", "assignments"])
def interpret_formula(formula, model):
"""Given a logical formula and a model of the world, return the formula's denotation
in the model.
"""
if isinstance(formula, ast.Var):
return model.assignments[formula.value]
elif isinstance(formula, ast.And):
return interpret_formula(formula.left, model) and interpret_formula(
formula.right, model
)
elif isinstance(formula, ast.Or):
return interpret_formula(formula.left, model) or interpret_formula(
formula.right, model
)
elif isinstance(formula, ast.IfThen):
return not interpret_formula(formula.left, model) or interpret_formula(
formula.right, model
)
elif isinstance(formula, ast.Call):
caller = interpret_formula(formula.caller, model)
arg = interpret_formula(formula.arg, model)
return arg in caller
elif isinstance(formula, ast.ForAll):
return len(satisfiers(formula.body, model, formula.symbol)) == len(
model.individuals
)
elif isinstance(formula, ast.Exists):
return len(satisfiers(formula.body, model, formula.symbol)) > 0
elif isinstance(formula, ast.Not):
return not interpret_formula(formula.operand, model)
elif isinstance(formula, ast.Iota):
sset = satisfiers(formula.body, model, formula.symbol)
if len(sset) == 1:
return sset.pop()
else:
return None
else:
# TODO: Handle LambdaNodes differently (they can't be interpreted, but they
# should give a better error message).
raise NotImplementedError(formula.__class__)
def satisfiers(formula, model, variable):
individuals = set()
old_value = model.assignments.get(variable)
for individual in model.individuals:
model.assignments[variable] = individual
if interpret_formula(formula, model):
individuals.add(individual)
if old_value is None:
del model.assignments[variable]
else:
model.assignments[variable] = old_value
return individuals | 0.633297 | 0.621053 |
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._experiments_operations import build_create_request, build_delete_request_initial, build_delete_tags_request, build_get_by_id_request, build_get_by_query_request, build_get_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExperimentsOperations:
"""ExperimentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
**kwargs: Any
) -> "_models.Experiment":
"""Get details of an Experiment.
Get details of an Experiment with specific Experiment name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name: The experiment name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Experiment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}'} # type: ignore
@distributed_trace_async
async def create(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
**kwargs: Any
) -> "_models.Experiment":
"""Create an Experiment.
Create a new Experiment.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name: The experiment name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Experiment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}'} # type: ignore
@distributed_trace_async
async def get_by_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
**kwargs: Any
) -> "_models.Experiment":
"""Get details of an Experiment.
Get details of an Experiment with specific Experiment Id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: The identifier of the experiment.
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Experiment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
template_url=self.get_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}'} # type: ignore
@distributed_trace_async
async def update(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
body: Optional["_models.ModifyExperiment"] = None,
**kwargs: Any
) -> "_models.Experiment":
"""Update details of an Experiment.
Update details of an Experiment with specific Experiment Id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: The identifier of the experiment.
:type experiment_id: str
:param body: Experiment details which needs to be updated.
:type body: ~azure.mgmt.machinelearningservices.models.ModifyExperiment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Experiment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'ModifyExperiment')
else:
_json = None
request = build_update_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}'} # type: ignore
async def _delete_initial(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
**kwargs: Any
) -> Any:
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
**kwargs: Any
) -> AsyncLROPoller[Any]:
"""Delete an Experiment.
Delete an existing Empty Experiment.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: The identifier of the experiment.
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either any or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[Any]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}'} # type: ignore
@distributed_trace
def get_by_query(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
url_safe_experiment_names_only: Optional[bool] = True,
body: Optional["_models.ExperimentQueryParams"] = None,
**kwargs: Any
) -> AsyncIterable["_models.PaginatedExperimentList"]:
"""Get all Experiments in a specific workspace.
Get all experiments in a specific workspace with the specified query filters.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param url_safe_experiment_names_only:
:type url_safe_experiment_names_only: bool
:param body: Query parameters for data sorting and filtering.
:type body: ~azure.mgmt.machinelearningservices.models.ExperimentQueryParams
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedExperimentList or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedExperimentList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedExperimentList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
if body is not None:
_json = self._serialize.body(body, 'ExperimentQueryParams')
else:
_json = None
request = build_get_by_query_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
url_safe_experiment_names_only=url_safe_experiment_names_only,
template_url=self.get_by_query.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
if body is not None:
_json = self._serialize.body(body, 'ExperimentQueryParams')
else:
_json = None
request = build_get_by_query_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
url_safe_experiment_names_only=url_safe_experiment_names_only,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedExperimentList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_by_query.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments:query'} # type: ignore
@distributed_trace_async
async def delete_tags(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
body: Optional["_models.DeleteTagsCommand"] = None,
**kwargs: Any
) -> "_models.DeleteExperimentTagsResult":
"""Delete list of Tags in an Experiment.
Delete list of Tags from a specific Experiment Id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: The identifier of the experiment.
:type experiment_id: str
:param body: The requested tags list to be deleted.
:type body: ~azure.mgmt.machinelearningservices.models.DeleteTagsCommand
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeleteExperimentTagsResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeleteExperimentTagsResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeleteExperimentTagsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteTagsCommand')
else:
_json = None
request = build_delete_tags_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.delete_tags.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeleteExperimentTagsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_tags.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/tags:delete'} # type: ignore | sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/runhistory/aio/operations/_experiments_operations.py | import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._experiments_operations import build_create_request, build_delete_request_initial, build_delete_tags_request, build_get_by_id_request, build_get_by_query_request, build_get_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExperimentsOperations:
"""ExperimentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
**kwargs: Any
) -> "_models.Experiment":
"""Get details of an Experiment.
Get details of an Experiment with specific Experiment name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name: The experiment name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Experiment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}'} # type: ignore
@distributed_trace_async
async def create(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
**kwargs: Any
) -> "_models.Experiment":
"""Create an Experiment.
Create a new Experiment.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name: The experiment name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Experiment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}'} # type: ignore
@distributed_trace_async
async def get_by_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
**kwargs: Any
) -> "_models.Experiment":
"""Get details of an Experiment.
Get details of an Experiment with specific Experiment Id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: The identifier of the experiment.
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Experiment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
template_url=self.get_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}'} # type: ignore
@distributed_trace_async
async def update(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
body: Optional["_models.ModifyExperiment"] = None,
**kwargs: Any
) -> "_models.Experiment":
"""Update details of an Experiment.
Update details of an Experiment with specific Experiment Id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: The identifier of the experiment.
:type experiment_id: str
:param body: Experiment details which needs to be updated.
:type body: ~azure.mgmt.machinelearningservices.models.ModifyExperiment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Experiment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'ModifyExperiment')
else:
_json = None
request = build_update_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}'} # type: ignore
async def _delete_initial(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
**kwargs: Any
) -> Any:
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
**kwargs: Any
) -> AsyncLROPoller[Any]:
"""Delete an Experiment.
Delete an existing Empty Experiment.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: The identifier of the experiment.
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either any or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[Any]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}'} # type: ignore
@distributed_trace
def get_by_query(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
url_safe_experiment_names_only: Optional[bool] = True,
body: Optional["_models.ExperimentQueryParams"] = None,
**kwargs: Any
) -> AsyncIterable["_models.PaginatedExperimentList"]:
"""Get all Experiments in a specific workspace.
Get all experiments in a specific workspace with the specified query filters.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param url_safe_experiment_names_only:
:type url_safe_experiment_names_only: bool
:param body: Query parameters for data sorting and filtering.
:type body: ~azure.mgmt.machinelearningservices.models.ExperimentQueryParams
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedExperimentList or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedExperimentList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedExperimentList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
if body is not None:
_json = self._serialize.body(body, 'ExperimentQueryParams')
else:
_json = None
request = build_get_by_query_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
url_safe_experiment_names_only=url_safe_experiment_names_only,
template_url=self.get_by_query.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
if body is not None:
_json = self._serialize.body(body, 'ExperimentQueryParams')
else:
_json = None
request = build_get_by_query_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
url_safe_experiment_names_only=url_safe_experiment_names_only,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedExperimentList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_by_query.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments:query'} # type: ignore
@distributed_trace_async
async def delete_tags(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
body: Optional["_models.DeleteTagsCommand"] = None,
**kwargs: Any
) -> "_models.DeleteExperimentTagsResult":
"""Delete list of Tags in an Experiment.
Delete list of Tags from a specific Experiment Id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: The identifier of the experiment.
:type experiment_id: str
:param body: The requested tags list to be deleted.
:type body: ~azure.mgmt.machinelearningservices.models.DeleteTagsCommand
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeleteExperimentTagsResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeleteExperimentTagsResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeleteExperimentTagsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteTagsCommand')
else:
_json = None
request = build_delete_tags_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.delete_tags.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeleteExperimentTagsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_tags.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/tags:delete'} # type: ignore | 0.822332 | 0.112747 |
import distutils
import os
import py2exe
import shutil
import sys
def run_py2exe():
py2exe.__version__
sys.argv.append('py2exe')
distutils.core.setup(
options = {"py2exe":{
"compressed": True,
"optimize": 1,
"bundle_files": 1,
"excludes": ['Tkconstants', 'Tkinter', 'tcl'],
"dll_excludes": ['msvcp90.dll'],
}},
windows = [{
"script": "main.py",
"dest_base": "dcpu16",
"icon_resources": [(1, "icons/icon.ico")],
"other_resources": [(24, 1, MANIFEST)],
}],
zipfile=None,
)
def copy_file(src):
print 'Copying:', src
dst = os.path.join('dist', src)
try:
os.makedirs(os.path.split(dst)[0])
except Exception:
pass
shutil.copyfile(src, dst)
def copy_directory(src):
for path, _, files in os.walk(src):
if '.svn' in path:
continue
for filename in files:
copy_file(os.path.join(path, filename))
def main():
run_py2exe()
copy_directory('Microsoft.VC90.CRT')
copy_directory('programs')
copy_file('_emulator.dll')
MANIFEST = '''
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<assemblyIdentity
version="2.0.0.0"
processorArchitecture="x86"
name="Star Rocket Level Editor"
type="win32"
/>
<description>Star Rocket Level Editor 1.0</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false"
/>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="<KEY>"
/>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="<KEY>"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
if __name__ == '__main__':
main() | setup.py | import distutils
import os
import py2exe
import shutil
import sys
def run_py2exe():
py2exe.__version__
sys.argv.append('py2exe')
distutils.core.setup(
options = {"py2exe":{
"compressed": True,
"optimize": 1,
"bundle_files": 1,
"excludes": ['Tkconstants', 'Tkinter', 'tcl'],
"dll_excludes": ['msvcp90.dll'],
}},
windows = [{
"script": "main.py",
"dest_base": "dcpu16",
"icon_resources": [(1, "icons/icon.ico")],
"other_resources": [(24, 1, MANIFEST)],
}],
zipfile=None,
)
def copy_file(src):
print 'Copying:', src
dst = os.path.join('dist', src)
try:
os.makedirs(os.path.split(dst)[0])
except Exception:
pass
shutil.copyfile(src, dst)
def copy_directory(src):
for path, _, files in os.walk(src):
if '.svn' in path:
continue
for filename in files:
copy_file(os.path.join(path, filename))
def main():
run_py2exe()
copy_directory('Microsoft.VC90.CRT')
copy_directory('programs')
copy_file('_emulator.dll')
MANIFEST = '''
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<assemblyIdentity
version="2.0.0.0"
processorArchitecture="x86"
name="Star Rocket Level Editor"
type="win32"
/>
<description>Star Rocket Level Editor 1.0</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false"
/>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="<KEY>"
/>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="<KEY>"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
if __name__ == '__main__':
main() | 0.155015 | 0.130009 |
import re
import sys
from typing import Optional
import ensightreader
import numpy as np
import argparse
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("ensight_case", metavar="*.case", help="input EnSight Gold case (C Binary)")
parser.add_argument("output_obj", metavar="*.obj", help="output OBJ file (text)")
parser.add_argument("--only-parts", metavar="regex", help="only export parts matching given "
"regular expression (Python re.search)")
args = parser.parse_args()
ensight_case_path = args.ensight_case
output_obj_path = args.output_obj
part_name_regex = args.only_parts
return ensight2obj(ensight_case_path, output_obj_path, part_name_regex)
def ensight2obj(ensight_case_path: str, output_obj_path: str, part_name_regex: Optional[str] = None) -> int:
"""Main function of ensight2obj.py"""
print("Reading input EnSight case", ensight_case_path)
case = ensightreader.read_case(ensight_case_path)
geofile = case.get_geometry_model()
print("I see", len(geofile.get_part_names()), "parts in case")
parts = []
for part_id, part in geofile.parts.items():
if not part.is_surface():
print("Skipping part", part.part_name, "(not a surface part)")
elif part_name_regex and not re.search(part_name_regex, part.part_name):
print("Skipping part", part.part_name, "(name doesn't match)")
else:
parts.append(part)
print("Reading nodes...")
node_arrays = []
with open(geofile.file_path, "rb") as fp_geo:
for part in parts:
node_array = part.read_nodes(fp_geo)
node_arrays.append(node_array)
all_nodes = np.vstack(node_arrays)
number_of_nodes = all_nodes.shape[0]
print("Writing output OBJ", output_obj_path)
# OBJ uses uses global vertex numbering, starting from 1.
# EnSight uses per-part vertex numbering, starting from 1.
# To accommodate this, we need to increment the IDs for subsequent EnSight parts.
node_id_offset = 0
with open(output_obj_path, "w") as fp_obj, open(geofile.file_path, "rb") as fp_geo:
print(f"Writing {number_of_nodes} nodes...", flush=True)
for i in range(number_of_nodes):
print("v", *all_nodes[i], file=fp_obj)
for i, part in enumerate(parts):
print(f"Writing part {part.part_name}...", flush=True)
print("g", part.part_name, file=fp_obj) # translate EnSight parts to OBJ groups
for block in part.element_blocks:
if block.element_type.dimension != 2:
print(f"\tSkipping {block.number_of_elements} {block.element_type.value} elements", flush=True)
continue
print(f"\tWriting {block.number_of_elements} {block.element_type.value} elements", flush=True)
print("#", block.element_type.value, file=fp_obj)
if block.element_type == block.element_type.NSIDED:
polygon_node_counts, polygon_connectivity = block.read_connectivity_nsided(fp_geo)
polygon_connectivity += node_id_offset
k = 0
for j in range(len(polygon_node_counts)):
node_count = polygon_node_counts[j]
print("f", *(polygon_connectivity[k:k + node_count]), file=fp_obj)
k += node_count
else:
connectivity = block.read_connectivity(fp_geo)
connectivity += node_id_offset
for j in range(connectivity.shape[0]):
print("f", *(connectivity[j]), file=fp_obj)
node_id_offset += part.number_of_nodes
print("\nAll done.")
return 0
if __name__ == "__main__":
sys.exit(main()) | ensight2obj.py | import re
import sys
from typing import Optional
import ensightreader
import numpy as np
import argparse
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("ensight_case", metavar="*.case", help="input EnSight Gold case (C Binary)")
parser.add_argument("output_obj", metavar="*.obj", help="output OBJ file (text)")
parser.add_argument("--only-parts", metavar="regex", help="only export parts matching given "
"regular expression (Python re.search)")
args = parser.parse_args()
ensight_case_path = args.ensight_case
output_obj_path = args.output_obj
part_name_regex = args.only_parts
return ensight2obj(ensight_case_path, output_obj_path, part_name_regex)
def ensight2obj(ensight_case_path: str, output_obj_path: str, part_name_regex: Optional[str] = None) -> int:
"""Main function of ensight2obj.py"""
print("Reading input EnSight case", ensight_case_path)
case = ensightreader.read_case(ensight_case_path)
geofile = case.get_geometry_model()
print("I see", len(geofile.get_part_names()), "parts in case")
parts = []
for part_id, part in geofile.parts.items():
if not part.is_surface():
print("Skipping part", part.part_name, "(not a surface part)")
elif part_name_regex and not re.search(part_name_regex, part.part_name):
print("Skipping part", part.part_name, "(name doesn't match)")
else:
parts.append(part)
print("Reading nodes...")
node_arrays = []
with open(geofile.file_path, "rb") as fp_geo:
for part in parts:
node_array = part.read_nodes(fp_geo)
node_arrays.append(node_array)
all_nodes = np.vstack(node_arrays)
number_of_nodes = all_nodes.shape[0]
print("Writing output OBJ", output_obj_path)
# OBJ uses uses global vertex numbering, starting from 1.
# EnSight uses per-part vertex numbering, starting from 1.
# To accommodate this, we need to increment the IDs for subsequent EnSight parts.
node_id_offset = 0
with open(output_obj_path, "w") as fp_obj, open(geofile.file_path, "rb") as fp_geo:
print(f"Writing {number_of_nodes} nodes...", flush=True)
for i in range(number_of_nodes):
print("v", *all_nodes[i], file=fp_obj)
for i, part in enumerate(parts):
print(f"Writing part {part.part_name}...", flush=True)
print("g", part.part_name, file=fp_obj) # translate EnSight parts to OBJ groups
for block in part.element_blocks:
if block.element_type.dimension != 2:
print(f"\tSkipping {block.number_of_elements} {block.element_type.value} elements", flush=True)
continue
print(f"\tWriting {block.number_of_elements} {block.element_type.value} elements", flush=True)
print("#", block.element_type.value, file=fp_obj)
if block.element_type == block.element_type.NSIDED:
polygon_node_counts, polygon_connectivity = block.read_connectivity_nsided(fp_geo)
polygon_connectivity += node_id_offset
k = 0
for j in range(len(polygon_node_counts)):
node_count = polygon_node_counts[j]
print("f", *(polygon_connectivity[k:k + node_count]), file=fp_obj)
k += node_count
else:
connectivity = block.read_connectivity(fp_geo)
connectivity += node_id_offset
for j in range(connectivity.shape[0]):
print("f", *(connectivity[j]), file=fp_obj)
node_id_offset += part.number_of_nodes
print("\nAll done.")
return 0
if __name__ == "__main__":
sys.exit(main()) | 0.521959 | 0.217919 |
import discord
from discord.ext import commands
import random
from discord.commands import slash_command
class media(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(description="Doge smile")
async def smile(self, ctx):
await ctx.respond('https://tenor.com/view/perro-xd-xd-moises-xd-gif-18316386')
@slash_command(description="Mr Krab's bruh-ing")
async def mrkrabsbruh(self, ctx):
await ctx.respond('https://tenor.com/view/mr-krabs-bruh-cringe-meme-gif-20821912')
@slash_command(description="shrimp")
async def shrimp(self, ctx):
await ctx.respond('https://cdn.discordapp.com/attachments/879172329450643488/879717503784345651/bruh_1.mp4')
@slash_command(description="lesss gooooooooo")
async def dababy(self, ctx):
await ctx.respond('https://tenor.com/view/dababy-lets-go-car-vroom-vroom-gif-21874738')
@slash_command(description="OK BOOMers")
async def latityintro(self, ctx):
await ctx.respond('https://cdn.discordapp.com/attachments/875395171880165398/879641942131097620/ElectroBOOM_LATITY.mp4')
@slash_command(description="mind if i praise the lord?")
async def thelord(self, ctx):
await ctx.respond('https://cdn.discordapp.com/attachments/875395171880165398/879641696432947210/PRAISE_THE_LAWD.mp4')
@slash_command(description="chicken mcnuggets")
async def chickennugget(self, ctx):
await ctx.respond('https://tenor.com/view/chicken-nuggets-pics-art-chicken-chicken-nugget-yeah-gif-16426997')
@slash_command(description="phishe being phishe")
async def phishe(self, ctx):
await ctx.respond('https://cdn.discordapp.com/attachments/875395171880165398/879642028462448650/phishe.mp4')
@slash_command(description="cat gifs. its cute")
async def cat(self, ctx):
cat_stuff = ['https://cdn.discordapp.com/attachments/733869939227624457/876518209342308362/cat.gif',
'https://tenor.com/view/post-this-cat-instantly-gif-21407907',
'https://tenor.com/view/cat-dance-cat-dance-gif-20491618',
'https://tenor.com/view/caracal-big-floppa-flop-fo-gif-18296053',
'https://cdn.discordapp.com/attachments/880251530723356762/880447528108183572/20210729_103016.jpg']
await ctx.respond(random.choice(cat_stuff))
@slash_command(description="cmon, who doesnt love hampter?")
async def hampter(self, ctx):
hampter_list = ['https://tenor.com/view/bootythehamster-booty-hamster-syrian-syrian-hamster-gif-20948949',
'https://tenor.com/view/shummer-hamster-gif-13082806',
'https://tenor.com/view/hamster-pet-cute-adorable-bff-hamsters-gif-17730896',
'https://tenor.com/view/hamster-chase-cuddles-gif-4372189']
await ctx.respond(random.choice(hampter_list))
@slash_command(description="All kitty review reports goes here (well actually no)")
async def kittyreview(self, ctx):
very_cute = ['https://tenor.com/view/kitty-review-kitty-cat-review-gif-20973771',
'https://tenor.com/view/kitty-review-performance-kittie-cute-gif-21164379',
'https://tenor.com/view/kitty-review-kitty-ballin-kitty-review-cat-gif-21145619',
'https://tenor.com/view/kitty-review-kitty-cat-cat-review-squishy-cat-gif-21193166',
'https://tenor.com/view/kitty-review-cat-kitty-review-stanky-gif-21071465',
'https://tenor.com/view/kitty-review-cat-kitty-review-gif-20973783',
'https://tenor.com/view/kitty-review-kitty-cat-cat-review-gif-21193114',
'https://tenor.com/view/seals-emporium-kitty-review-gif-21748019',
'https://tenor.com/view/kitty-review-gif-21031795',
'https://tenor.com/view/kitty-review-cat-kitty-review-gif-20973774',
'https://tenor.com/view/kitty-gif-21363562',
'https://tenor.com/view/kitty-review-kitty-review-gif-22462155',
'https://tenor.com/view/kitty-review-kitty-review-gaming-cat-gif-22352786']
await ctx.respond(random.choice(very_cute))
@slash_command(description="Cursed image, yea its kinda cursed")
async def cursedimg(self, ctx):
await ctx.respond(file=discord.File(f'./pictures/pic{random.randint(1, 14)}.jpeg'))
@slash_command(description="fumo's? hell yeah dude")
async def fumo(self, ctx):
fumos_boi = ['https://tenor.com/view/touhou-fumo-sakuya-spin-gif-18209352',
'https://tenor.com/view/touhou-touhou-fumo-fumo-touhou-yuyuko-fumo-gif-23291237',
'https://tenor.com/view/fumo-reimu-fumo-fumo-fumo-sleep-time-gif-21713443',
'https://tenor.com/view/fumo-fumofumo-touhou-touhou-fumo-alice-margatroid-gif-20710104',
'https://tenor.com/view/murder-fumo-frog-touhou-touhou-doll-gif-21576540',
'https://tenor.com/view/touhou-fumo-flandre-generator-gif-19559237',
'https://tenor.com/view/touhou-fumo-cirno-jumpscare-gif-22884418',
'https://tenor.com/view/touhou-cirno-fumo-cirno-fumo-funky-gif-22838318',
'https://tenor.com/view/hakurei-reimu-fumo-fumo-fumo-fumo-doll-el-transporte-gif-20650216',
'https://tenor.com/view/touhou-fumo-touhou-fumo-fumo-touhou-aya-gif-23193653',
'https://tenor.com/view/reimu-fumo-cry-about-it-gif-21782335',
'https://tenor.com/view/anime-touhou-gif-22815463']
await ctx.respond(random.choice(fumos_boi))
@slash_command(description="The skeleton appears")
async def skeleton(self, ctx):
skull = ["https://tenor.com/view/fart-memes-i-farted-skeleton-gif-24093226",
"https://tenor.com/view/the-skeleton-appears-theskeleton-theskeletonappears-gif-24753701",
"https://tenor.com/view/pablo-escobar-gif-22308826",
"https://tenor.com/view/skeleton-dancing-skeleton-skeleton-dancing-mental-mental-health-gif-22285576"]
await ctx.respond(random.choice(skull))
@slash_command(description="Flavored doorknob testing facility")
async def doorknob(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/529475315953696770/876700584227508234/caption.gif")
@slash_command(description="A man and a TV")
async def tvmp4(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943121237658435624/TV.mp4")
@slash_command(description="willi in a bike")
async def willi(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943121935640977408/willi.mp4")
@slash_command(description="dudes get bonked in a truck")
async def truck(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943122194584711189/truck.mp4")
@slash_command(description="Welcome to EPIC RAP BATTLE HISTORY")
async def rapbattlehistory(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943122686811459614/EMINEM_VS_IBS.mp4")
@slash_command(description="Im a gamer")
async def gamer(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943122992316174356/gamer.jpg")
@slash_command(description="lagh.mp4")
async def lagh(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943123134838624286/LAGH.mp4")
@slash_command(description="Watermelon taste good when you sleep")
async def watermelon(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943126605679722496/hmmmmm.mp4")
@slash_command(description="Sauce ?")
async def sauce(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/901972458914062457/943037052268716042/video0.mov")
def setup(bot):
bot.add_cog(media(bot)) | cogs/media.py | import discord
from discord.ext import commands
import random
from discord.commands import slash_command
class media(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(description="Doge smile")
async def smile(self, ctx):
await ctx.respond('https://tenor.com/view/perro-xd-xd-moises-xd-gif-18316386')
@slash_command(description="Mr Krab's bruh-ing")
async def mrkrabsbruh(self, ctx):
await ctx.respond('https://tenor.com/view/mr-krabs-bruh-cringe-meme-gif-20821912')
@slash_command(description="shrimp")
async def shrimp(self, ctx):
await ctx.respond('https://cdn.discordapp.com/attachments/879172329450643488/879717503784345651/bruh_1.mp4')
@slash_command(description="lesss gooooooooo")
async def dababy(self, ctx):
await ctx.respond('https://tenor.com/view/dababy-lets-go-car-vroom-vroom-gif-21874738')
@slash_command(description="OK BOOMers")
async def latityintro(self, ctx):
await ctx.respond('https://cdn.discordapp.com/attachments/875395171880165398/879641942131097620/ElectroBOOM_LATITY.mp4')
@slash_command(description="mind if i praise the lord?")
async def thelord(self, ctx):
await ctx.respond('https://cdn.discordapp.com/attachments/875395171880165398/879641696432947210/PRAISE_THE_LAWD.mp4')
@slash_command(description="chicken mcnuggets")
async def chickennugget(self, ctx):
await ctx.respond('https://tenor.com/view/chicken-nuggets-pics-art-chicken-chicken-nugget-yeah-gif-16426997')
@slash_command(description="phishe being phishe")
async def phishe(self, ctx):
await ctx.respond('https://cdn.discordapp.com/attachments/875395171880165398/879642028462448650/phishe.mp4')
@slash_command(description="cat gifs. its cute")
async def cat(self, ctx):
cat_stuff = ['https://cdn.discordapp.com/attachments/733869939227624457/876518209342308362/cat.gif',
'https://tenor.com/view/post-this-cat-instantly-gif-21407907',
'https://tenor.com/view/cat-dance-cat-dance-gif-20491618',
'https://tenor.com/view/caracal-big-floppa-flop-fo-gif-18296053',
'https://cdn.discordapp.com/attachments/880251530723356762/880447528108183572/20210729_103016.jpg']
await ctx.respond(random.choice(cat_stuff))
@slash_command(description="cmon, who doesnt love hampter?")
async def hampter(self, ctx):
hampter_list = ['https://tenor.com/view/bootythehamster-booty-hamster-syrian-syrian-hamster-gif-20948949',
'https://tenor.com/view/shummer-hamster-gif-13082806',
'https://tenor.com/view/hamster-pet-cute-adorable-bff-hamsters-gif-17730896',
'https://tenor.com/view/hamster-chase-cuddles-gif-4372189']
await ctx.respond(random.choice(hampter_list))
@slash_command(description="All kitty review reports goes here (well actually no)")
async def kittyreview(self, ctx):
very_cute = ['https://tenor.com/view/kitty-review-kitty-cat-review-gif-20973771',
'https://tenor.com/view/kitty-review-performance-kittie-cute-gif-21164379',
'https://tenor.com/view/kitty-review-kitty-ballin-kitty-review-cat-gif-21145619',
'https://tenor.com/view/kitty-review-kitty-cat-cat-review-squishy-cat-gif-21193166',
'https://tenor.com/view/kitty-review-cat-kitty-review-stanky-gif-21071465',
'https://tenor.com/view/kitty-review-cat-kitty-review-gif-20973783',
'https://tenor.com/view/kitty-review-kitty-cat-cat-review-gif-21193114',
'https://tenor.com/view/seals-emporium-kitty-review-gif-21748019',
'https://tenor.com/view/kitty-review-gif-21031795',
'https://tenor.com/view/kitty-review-cat-kitty-review-gif-20973774',
'https://tenor.com/view/kitty-gif-21363562',
'https://tenor.com/view/kitty-review-kitty-review-gif-22462155',
'https://tenor.com/view/kitty-review-kitty-review-gaming-cat-gif-22352786']
await ctx.respond(random.choice(very_cute))
@slash_command(description="Cursed image, yea its kinda cursed")
async def cursedimg(self, ctx):
await ctx.respond(file=discord.File(f'./pictures/pic{random.randint(1, 14)}.jpeg'))
@slash_command(description="fumo's? hell yeah dude")
async def fumo(self, ctx):
fumos_boi = ['https://tenor.com/view/touhou-fumo-sakuya-spin-gif-18209352',
'https://tenor.com/view/touhou-touhou-fumo-fumo-touhou-yuyuko-fumo-gif-23291237',
'https://tenor.com/view/fumo-reimu-fumo-fumo-fumo-sleep-time-gif-21713443',
'https://tenor.com/view/fumo-fumofumo-touhou-touhou-fumo-alice-margatroid-gif-20710104',
'https://tenor.com/view/murder-fumo-frog-touhou-touhou-doll-gif-21576540',
'https://tenor.com/view/touhou-fumo-flandre-generator-gif-19559237',
'https://tenor.com/view/touhou-fumo-cirno-jumpscare-gif-22884418',
'https://tenor.com/view/touhou-cirno-fumo-cirno-fumo-funky-gif-22838318',
'https://tenor.com/view/hakurei-reimu-fumo-fumo-fumo-fumo-doll-el-transporte-gif-20650216',
'https://tenor.com/view/touhou-fumo-touhou-fumo-fumo-touhou-aya-gif-23193653',
'https://tenor.com/view/reimu-fumo-cry-about-it-gif-21782335',
'https://tenor.com/view/anime-touhou-gif-22815463']
await ctx.respond(random.choice(fumos_boi))
@slash_command(description="The skeleton appears")
async def skeleton(self, ctx):
skull = ["https://tenor.com/view/fart-memes-i-farted-skeleton-gif-24093226",
"https://tenor.com/view/the-skeleton-appears-theskeleton-theskeletonappears-gif-24753701",
"https://tenor.com/view/pablo-escobar-gif-22308826",
"https://tenor.com/view/skeleton-dancing-skeleton-skeleton-dancing-mental-mental-health-gif-22285576"]
await ctx.respond(random.choice(skull))
@slash_command(description="Flavored doorknob testing facility")
async def doorknob(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/529475315953696770/876700584227508234/caption.gif")
@slash_command(description="A man and a TV")
async def tvmp4(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943121237658435624/TV.mp4")
@slash_command(description="willi in a bike")
async def willi(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943121935640977408/willi.mp4")
@slash_command(description="dudes get bonked in a truck")
async def truck(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943122194584711189/truck.mp4")
@slash_command(description="Welcome to EPIC RAP BATTLE HISTORY")
async def rapbattlehistory(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943122686811459614/EMINEM_VS_IBS.mp4")
@slash_command(description="Im a gamer")
async def gamer(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943122992316174356/gamer.jpg")
@slash_command(description="lagh.mp4")
async def lagh(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943123134838624286/LAGH.mp4")
@slash_command(description="Watermelon taste good when you sleep")
async def watermelon(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/741920768388104272/943126605679722496/hmmmmm.mp4")
@slash_command(description="Sauce ?")
async def sauce(self, ctx):
await ctx.respond("https://cdn.discordapp.com/attachments/901972458914062457/943037052268716042/video0.mov")
def setup(bot):
bot.add_cog(media(bot)) | 0.298696 | 0.146118 |
import os
import random
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
from fairtorch import ConstraintLoss, DemographicParityLoss, EqualiedOddsLoss
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(2020)
def pytest_generate_tests(metafunc):
# called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
metafunc.parametrize(
argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]
)
class SensitiveDataset(Dataset):
def __init__(self, x, y, sensitive):
self.x = x.float()
self.y = y.float()
# self.y = np.ones(shape=y.shape).astype(np.float32)
sensitive_categories = sensitive.unique().numpy()
# print(sencat)
self.category_to_index_dict = dict(
zip(list(sensitive_categories), range(len(sensitive_categories)))
)
self.index_to_category_dict = dict(
zip(range(len(sensitive_categories)), list(sensitive_categories))
)
self.sensitive = sensitive
self.sensitive_ids = [
self.category_to_index_dict[i] for i in self.sensitive.numpy().tolist()
]
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx].reshape(-1), self.sensitive_ids[idx]
class TestConstraint:
params = {"test_costraint": [dict()]}
def test_costraint(self):
consloss = ConstraintLoss()
assert isinstance(consloss, ConstraintLoss)
class TestDemographicParityLoss:
params = {
"test_dp": [dict(feature_dim=16, sample_size=128, dim_condition=2)],
"test_eo": [dict(feature_dim=16, sample_size=128, dim_condition=2)],
"test_train": [
dict(
criterion=nn.BCEWithLogitsLoss(),
constraints=None,
feature_dim=16,
sample_size=16,
dim_condition=2,
),
dict(
criterion=nn.BCEWithLogitsLoss(),
constraints=DemographicParityLoss(),
feature_dim=16,
sample_size=16,
dim_condition=2,
),
dict(
criterion=nn.BCEWithLogitsLoss(),
constraints=EqualiedOddsLoss(),
feature_dim=16,
sample_size=16,
dim_condition=2,
),
],
}
device = "cpu"
def test_dp(self, feature_dim, sample_size, dim_condition):
model = nn.Sequential(nn.Linear(feature_dim, 32), nn.ReLU(), nn.Linear(32, 1))
dp_loss = DemographicParityLoss(sensitive_classes=[0, 1])
assert isinstance(dp_loss, DemographicParityLoss)
x_train = torch.randn((sample_size, feature_dim))
sensitive_features = torch.randint(0, dim_condition, (sample_size,))
out = model(x_train)
mu = dp_loss.mu_f(x_train, out, sensitive_features)
assert int(mu.size(0)) == dim_condition + 1
loss = dp_loss(x_train, out, sensitive_features)
assert float(loss) >= 0
def test_eo(self, feature_dim, sample_size, dim_condition):
model = nn.Sequential(nn.Linear(feature_dim, 32), nn.ReLU(), nn.Linear(32, 1))
eo_loss = EqualiedOddsLoss(sensitive_classes=[0, 1])
assert isinstance(eo_loss, EqualiedOddsLoss)
x_train = torch.randn((sample_size, feature_dim))
y = torch.randint(0, 2, (sample_size,))
sensitive_features = torch.randint(0, dim_condition, (sample_size,))
out = model(x_train)
mu = eo_loss.mu_f(x_train, torch.sigmoid(out), sensitive_features, y=y)
print(mu.size(), type(mu.size()))
assert int(mu.size(0)) == (dim_condition + 1) * 2
loss = eo_loss(x_train, out, sensitive_features, y)
assert float(loss) >= 0
def test_train(self, criterion, constraints, feature_dim, sample_size, dim_condition):
torch.set_default_dtype(torch.float32)
x = torch.randn((sample_size, feature_dim))
y = torch.randint(0, 2, (sample_size,))
sensitive_features = torch.randint(0, dim_condition, (sample_size,))
dataset = SensitiveDataset(x, y, sensitive_features)
train_size = len(dataset)
train_dataset, test_dataset = torch.utils.data.random_split(
dataset, [int(0.8 * train_size), train_size - int(0.8 * train_size)]
)
print(self.device)
model = nn.Sequential(nn.Linear(feature_dim, 32), nn.ReLU(), nn.Linear(32, 1))
model.to(self.device)
optimizer = optim.Adam(model.parameters())
train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True)
model = self.__train_model(
model=model,
criterion=criterion,
constraints=constraints,
optimizer=optimizer,
data_loader=train_loader,
)
def __train_model(self, model, criterion, constraints, data_loader, optimizer, max_epoch=1):
for epoch in range(max_epoch):
for i, data in enumerate(data_loader):
x, y, sensitive_features = data
x = x.to(self.device)
y = y.to(self.device)
sensitive_features = sensitive_features.to(self.device)
optimizer.zero_grad()
print(x.device, y.device, sensitive_features.device)
print(x.shape, y.shape, sensitive_features.shape)
logit = model(x)
assert isinstance(logit, torch.Tensor)
assert isinstance(y, torch.Tensor)
print(x.device, y.device, sensitive_features.device, logit.device)
loss = criterion(logit, y)
if constraints:
penalty = constraints(x, logit, sensitive_features, y)
print(penalty.requires_grad)
loss = loss + penalty
loss.backward()
optimizer.step()
return model | tests/test_constraint.py | import os
import random
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
from fairtorch import ConstraintLoss, DemographicParityLoss, EqualiedOddsLoss
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(2020)
def pytest_generate_tests(metafunc):
# called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
metafunc.parametrize(
argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]
)
class SensitiveDataset(Dataset):
def __init__(self, x, y, sensitive):
self.x = x.float()
self.y = y.float()
# self.y = np.ones(shape=y.shape).astype(np.float32)
sensitive_categories = sensitive.unique().numpy()
# print(sencat)
self.category_to_index_dict = dict(
zip(list(sensitive_categories), range(len(sensitive_categories)))
)
self.index_to_category_dict = dict(
zip(range(len(sensitive_categories)), list(sensitive_categories))
)
self.sensitive = sensitive
self.sensitive_ids = [
self.category_to_index_dict[i] for i in self.sensitive.numpy().tolist()
]
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx].reshape(-1), self.sensitive_ids[idx]
class TestConstraint:
params = {"test_costraint": [dict()]}
def test_costraint(self):
consloss = ConstraintLoss()
assert isinstance(consloss, ConstraintLoss)
class TestDemographicParityLoss:
params = {
"test_dp": [dict(feature_dim=16, sample_size=128, dim_condition=2)],
"test_eo": [dict(feature_dim=16, sample_size=128, dim_condition=2)],
"test_train": [
dict(
criterion=nn.BCEWithLogitsLoss(),
constraints=None,
feature_dim=16,
sample_size=16,
dim_condition=2,
),
dict(
criterion=nn.BCEWithLogitsLoss(),
constraints=DemographicParityLoss(),
feature_dim=16,
sample_size=16,
dim_condition=2,
),
dict(
criterion=nn.BCEWithLogitsLoss(),
constraints=EqualiedOddsLoss(),
feature_dim=16,
sample_size=16,
dim_condition=2,
),
],
}
device = "cpu"
def test_dp(self, feature_dim, sample_size, dim_condition):
model = nn.Sequential(nn.Linear(feature_dim, 32), nn.ReLU(), nn.Linear(32, 1))
dp_loss = DemographicParityLoss(sensitive_classes=[0, 1])
assert isinstance(dp_loss, DemographicParityLoss)
x_train = torch.randn((sample_size, feature_dim))
sensitive_features = torch.randint(0, dim_condition, (sample_size,))
out = model(x_train)
mu = dp_loss.mu_f(x_train, out, sensitive_features)
assert int(mu.size(0)) == dim_condition + 1
loss = dp_loss(x_train, out, sensitive_features)
assert float(loss) >= 0
def test_eo(self, feature_dim, sample_size, dim_condition):
model = nn.Sequential(nn.Linear(feature_dim, 32), nn.ReLU(), nn.Linear(32, 1))
eo_loss = EqualiedOddsLoss(sensitive_classes=[0, 1])
assert isinstance(eo_loss, EqualiedOddsLoss)
x_train = torch.randn((sample_size, feature_dim))
y = torch.randint(0, 2, (sample_size,))
sensitive_features = torch.randint(0, dim_condition, (sample_size,))
out = model(x_train)
mu = eo_loss.mu_f(x_train, torch.sigmoid(out), sensitive_features, y=y)
print(mu.size(), type(mu.size()))
assert int(mu.size(0)) == (dim_condition + 1) * 2
loss = eo_loss(x_train, out, sensitive_features, y)
assert float(loss) >= 0
def test_train(self, criterion, constraints, feature_dim, sample_size, dim_condition):
torch.set_default_dtype(torch.float32)
x = torch.randn((sample_size, feature_dim))
y = torch.randint(0, 2, (sample_size,))
sensitive_features = torch.randint(0, dim_condition, (sample_size,))
dataset = SensitiveDataset(x, y, sensitive_features)
train_size = len(dataset)
train_dataset, test_dataset = torch.utils.data.random_split(
dataset, [int(0.8 * train_size), train_size - int(0.8 * train_size)]
)
print(self.device)
model = nn.Sequential(nn.Linear(feature_dim, 32), nn.ReLU(), nn.Linear(32, 1))
model.to(self.device)
optimizer = optim.Adam(model.parameters())
train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True)
model = self.__train_model(
model=model,
criterion=criterion,
constraints=constraints,
optimizer=optimizer,
data_loader=train_loader,
)
def __train_model(self, model, criterion, constraints, data_loader, optimizer, max_epoch=1):
for epoch in range(max_epoch):
for i, data in enumerate(data_loader):
x, y, sensitive_features = data
x = x.to(self.device)
y = y.to(self.device)
sensitive_features = sensitive_features.to(self.device)
optimizer.zero_grad()
print(x.device, y.device, sensitive_features.device)
print(x.shape, y.shape, sensitive_features.shape)
logit = model(x)
assert isinstance(logit, torch.Tensor)
assert isinstance(y, torch.Tensor)
print(x.device, y.device, sensitive_features.device, logit.device)
loss = criterion(logit, y)
if constraints:
penalty = constraints(x, logit, sensitive_features, y)
print(penalty.requires_grad)
loss = loss + penalty
loss.backward()
optimizer.step()
return model | 0.775945 | 0.483344 |
from copy import deepcopy
from math import cos, sin, sqrt, copysign
class Vector(object):
@staticmethod
def _det3x3(matrix):
det = 0.0
for inc in range(3):
m = 1.0
for r in range(3):
c = (r + inc)%3
m *= matrix[r][c]
det += m
for inc in range(3):
m = 1.0
for r in range(3):
c = 2 - (r + inc)%3
m *= matrix[r][c]
det -= m
return det
@staticmethod
def _replaceCoeffs(coeffs, terms, column):
m = deepcopy(coeffs)
for row in range(3):
m[row][column] = terms[row]
return m
@staticmethod
def _resolveCramer(coeffs, terms):
div = Vector._det3x3(coeffs)
mx = Vector._replaceCoeffs(coeffs, terms, 0)
my = Vector._replaceCoeffs(coeffs, terms, 1)
mz = Vector._replaceCoeffs(coeffs, terms, 2)
dx = Vector._det3x3(mx)
dy = Vector._det3x3(my)
dz = Vector._det3x3(mz)
x = dx/div
y = dy/div
z = dz/div
return [x,y,z]
@staticmethod
def _descomposeVector(modulus, angles):
'''
Descompose a vector according rotation angles in radians
@param modulus: Modulus (length) of the vector
@param angles: Rotation angles in radians of the XY-plane
'''
cosx = cos(angles[0])
cosy = cos(angles[1])
sin2x = sin(angles[0])**2
sin2y = sin(angles[1])**2
cos2y = cosy**2
modulus2 = modulus * modulus
t1 = modulus2 * sin2y
t2 = modulus2 * cos2y
t3 = modulus2 * sin2x
coeffs = [[1.0, sin2y, 0.0], [0.0, cos2y, 1.0], [sin2x, 1.0, 0.0]]
terms = [t1, t2, t3]
#Returns x², y², z²
desc = Vector._resolveCramer(coeffs, terms)
for i in range(3):
desc[i] = sqrt(desc[i])
#Choose the right solution depending of the passed angles sign
desc[0] = copysign(desc[0], angles[1])
#Positive axis-X angle makes negative motion along Y direction
desc[1] = copysign(desc[1], -angles[0])
if cosx < 0.0 or cosy < 0.0 or modulus < 0.0:
desc[2] = -desc[2]
return desc
@staticmethod
def rotateVectorByAngle(vector, angle):
'''
Rotates a 2-dimensional vector
@param vector: 2-dimensional vector
@param angle: angle in radians to rotate
'''
return Vector.rotateVector(vector, sin(angle), cos(angle))
@staticmethod
def rotateVector(vector, sine, cosine):
'''
Rotates a 2-dimensional vector
@param vector: 2-dimensional vector
@param sine: sine of the angle to rotate
@param cosine: cosine of the angle to rotate
'''
x = vector[0] * cosine - vector[1] * sine
y = vector[0] * sine + vector[1] * cosine
return [x, y]
@staticmethod
def _applyRotation(rotationMatrix, vector):
'''
rotation matrix's order must be [3,3] and vector order must be [3]
This restriction is not chequed due to performance.
@param rotationMatrix: matrix's order [3, 3]
@param vector: vector's order [3]
'''
product = [0.0]*3
for row in range(3):
for col in range(3):
product[row] += rotationMatrix[row][col] * vector[col]
return product
@staticmethod
def rotateVector3D(vector, angles):
'''
Rotates a 3D-vector
@param vector: 3D vector as [x,y,z]
@param angles: Rotation angles as radians within 3-dimensions [pitch, roll, yaw]
'''
cosx = cos(angles[0])
sinx = sin(angles[0])
cosy = cos(angles[1])
siny = sin(angles[1])
cosz = cos(angles[2])
sinz = sin(angles[2])
rotationMatrixX = [[1.0, 0.0, 0.0],[0.0, cosx, -sinx],[0.0, sinx, cosx]]
rotationMatrixY = [[cosy, 0.0, siny],[0.0, 1.0, 0.0],[-siny, 0.0, cosy]]
rotationMatrixZ = [[cosz, -sinz , 0.0],[sinz, cosz, 0.0],[0.0, 0.0, 1.0]]
#Rotate on X-axis
vector = Vector._applyRotation(rotationMatrixX, vector)
#Rotate on Y-axis
vector = Vector._applyRotation(rotationMatrixY, vector)
#Rotate on Z-axis
vector = Vector._applyRotation(rotationMatrixZ, vector)
return vector | bot1/sensor/vector.py | from copy import deepcopy
from math import cos, sin, sqrt, copysign
class Vector(object):
@staticmethod
def _det3x3(matrix):
det = 0.0
for inc in range(3):
m = 1.0
for r in range(3):
c = (r + inc)%3
m *= matrix[r][c]
det += m
for inc in range(3):
m = 1.0
for r in range(3):
c = 2 - (r + inc)%3
m *= matrix[r][c]
det -= m
return det
@staticmethod
def _replaceCoeffs(coeffs, terms, column):
m = deepcopy(coeffs)
for row in range(3):
m[row][column] = terms[row]
return m
@staticmethod
def _resolveCramer(coeffs, terms):
div = Vector._det3x3(coeffs)
mx = Vector._replaceCoeffs(coeffs, terms, 0)
my = Vector._replaceCoeffs(coeffs, terms, 1)
mz = Vector._replaceCoeffs(coeffs, terms, 2)
dx = Vector._det3x3(mx)
dy = Vector._det3x3(my)
dz = Vector._det3x3(mz)
x = dx/div
y = dy/div
z = dz/div
return [x,y,z]
@staticmethod
def _descomposeVector(modulus, angles):
'''
Descompose a vector according rotation angles in radians
@param modulus: Modulus (length) of the vector
@param angles: Rotation angles in radians of the XY-plane
'''
cosx = cos(angles[0])
cosy = cos(angles[1])
sin2x = sin(angles[0])**2
sin2y = sin(angles[1])**2
cos2y = cosy**2
modulus2 = modulus * modulus
t1 = modulus2 * sin2y
t2 = modulus2 * cos2y
t3 = modulus2 * sin2x
coeffs = [[1.0, sin2y, 0.0], [0.0, cos2y, 1.0], [sin2x, 1.0, 0.0]]
terms = [t1, t2, t3]
#Returns x², y², z²
desc = Vector._resolveCramer(coeffs, terms)
for i in range(3):
desc[i] = sqrt(desc[i])
#Choose the right solution depending of the passed angles sign
desc[0] = copysign(desc[0], angles[1])
#Positive axis-X angle makes negative motion along Y direction
desc[1] = copysign(desc[1], -angles[0])
if cosx < 0.0 or cosy < 0.0 or modulus < 0.0:
desc[2] = -desc[2]
return desc
@staticmethod
def rotateVectorByAngle(vector, angle):
'''
Rotates a 2-dimensional vector
@param vector: 2-dimensional vector
@param angle: angle in radians to rotate
'''
return Vector.rotateVector(vector, sin(angle), cos(angle))
@staticmethod
def rotateVector(vector, sine, cosine):
'''
Rotates a 2-dimensional vector
@param vector: 2-dimensional vector
@param sine: sine of the angle to rotate
@param cosine: cosine of the angle to rotate
'''
x = vector[0] * cosine - vector[1] * sine
y = vector[0] * sine + vector[1] * cosine
return [x, y]
@staticmethod
def _applyRotation(rotationMatrix, vector):
'''
rotation matrix's order must be [3,3] and vector order must be [3]
This restriction is not chequed due to performance.
@param rotationMatrix: matrix's order [3, 3]
@param vector: vector's order [3]
'''
product = [0.0]*3
for row in range(3):
for col in range(3):
product[row] += rotationMatrix[row][col] * vector[col]
return product
@staticmethod
def rotateVector3D(vector, angles):
'''
Rotates a 3D-vector
@param vector: 3D vector as [x,y,z]
@param angles: Rotation angles as radians within 3-dimensions [pitch, roll, yaw]
'''
cosx = cos(angles[0])
sinx = sin(angles[0])
cosy = cos(angles[1])
siny = sin(angles[1])
cosz = cos(angles[2])
sinz = sin(angles[2])
rotationMatrixX = [[1.0, 0.0, 0.0],[0.0, cosx, -sinx],[0.0, sinx, cosx]]
rotationMatrixY = [[cosy, 0.0, siny],[0.0, 1.0, 0.0],[-siny, 0.0, cosy]]
rotationMatrixZ = [[cosz, -sinz , 0.0],[sinz, cosz, 0.0],[0.0, 0.0, 1.0]]
#Rotate on X-axis
vector = Vector._applyRotation(rotationMatrixX, vector)
#Rotate on Y-axis
vector = Vector._applyRotation(rotationMatrixY, vector)
#Rotate on Z-axis
vector = Vector._applyRotation(rotationMatrixZ, vector)
return vector | 0.854854 | 0.611875 |
u"""
harmonics.py
Written by <NAME> (12/2020)
Spherical harmonic data class for processing GRACE/GRACE-FO Level-2 data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
netCDF4: Python interface to the netCDF C library
(https://unidata.github.io/netcdf4-python/netCDF4/index.html)
h5py: Pythonic interface to the HDF5 binary data format.
(https://www.h5py.org/)
PROGRAM DEPENDENCIES:
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_stokes.py: writes output spherical harmonic data to HDF5
ncdf_read_stokes.py: reads spherical harmonic data from netcdf
hdf5_read_stokes.py: reads spherical harmonic data from HDF5
read_ICGEM_harmonics.py: reads gravity model coefficients from GFZ ICGEM
destripe_harmonics.py: filters spherical harmonics for correlated errors
UPDATE HISTORY:
Updated 12/2020: added verbose option for gfc files
Updated 08/2020: added compression options for ascii, netCDF4 and HDF5 files
Updated 07/2020: added class docstring and using kwargs for output to file
added case_insensitive_filename function to search directories
Updated 06/2020: output list of filenames with from_list()
zeros_like() creates a new harmonics object with dimensions of another
add ndim and shape attributes of harmonics objects
Updated 04/2020: added from_gfc to read static gravity model coefficients
add to_ascii and iterate over temporal fields in convolve and destripe
make date optional for harmonic read functions. add more math functions
add option to sort if reading from an index or merging a list
add options to flatten and expand harmonics matrices or arrays
Written 03/2020
"""
import os
import re
import gzip
import zipfile
import numpy as np
from gravity_toolkit.ncdf_stokes import ncdf_stokes
from gravity_toolkit.hdf5_stokes import hdf5_stokes
from gravity_toolkit.ncdf_read_stokes import ncdf_read_stokes
from gravity_toolkit.hdf5_read_stokes import hdf5_read_stokes
from gravity_toolkit.read_ICGEM_harmonics import read_ICGEM_harmonics
from gravity_toolkit.destripe_harmonics import destripe_harmonics
class harmonics(object):
"""
Data class for reading, writing and processing spherical harmonic data
"""
np.seterr(invalid='ignore')
def __init__(self, lmax=None, mmax=None):
self.clm=None
self.slm=None
self.time=None
self.month=None
self.lmax=lmax
self.mmax=mmax
self.l=np.arange(self.lmax+1) if self.lmax else None
self.m=np.arange(self.mmax+1) if self.mmax else None
self.shape=None
self.ndim=None
self.filename=None
def case_insensitive_filename(self,filename):
"""
Searches a directory for a filename without case dependence
"""
self.filename = os.path.expanduser(filename)
#-- check if file presently exists with input case
if not os.access(self.filename,os.F_OK):
#-- search for filename without case dependence
basename = os.path.basename(filename)
directory = os.path.dirname(os.path.expanduser(filename))
f = [f for f in os.listdir(directory) if re.match(basename,f,re.I)]
if not f:
raise IOError('{0} not found in file system'.format(filename))
self.filename = os.path.join(directory,f.pop())
return self
def from_ascii(self, filename, date=True, compression=None, verbose=False):
"""
Read a harmonics object from an ascii file
Inputs: full path of input ascii file
Options:
ascii file contains date information
ascii file is compressed using gzip
verbose output of file information
"""
#-- set filename
self.case_insensitive_filename(filename)
print(self.filename) if verbose else None
#-- open the ascii file and extract contents
if (compression == 'gzip'):
#-- read input ascii data from gzip compressed file and split lines
with gzip.open(self.filename,'r') as f:
file_contents = f.read().decode('ISO-8859-1').splitlines()
elif (compression == 'zip'):
#-- read input ascii data from zipped file and split lines
base,extension = os.path.splitext(self.filename)
with zipfile.ZipFile(self.filename) as z:
file_contents = z.read(base).decode('ISO-8859-1').splitlines()
else:
#-- read input ascii file (.txt, .asc) and split lines
with open(self.filename,'r') as f:
file_contents = f.read().splitlines()
#-- compile regular expression operator for extracting numerical values
#-- from input ascii files of spherical harmonics
regex_pattern = r'[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[EeD][+-]?\d+)?'
rx = re.compile(regex_pattern, re.VERBOSE)
#-- find maximum degree and order of harmonics
self.lmax = 0
self.mmax = 0
#-- for each line in the file
for line in file_contents:
if date:
l1,m1,clm1,slm1,time = rx.findall(line)
else:
l1,m1,clm1,slm1 = rx.findall(line)
#-- convert line degree and order to integers
l1,m1 = np.array([l1,m1],dtype=np.int)
self.lmax = np.copy(l1) if (l1 > self.lmax) else self.lmax
self.mmax = np.copy(m1) if (m1 > self.mmax) else self.mmax
#-- output spherical harmonics dimensions array
self.l = np.arange(self.lmax+1)
self.m = np.arange(self.mmax+1)
#-- output spherical harmonics data
self.clm = np.zeros((self.lmax+1,self.mmax+1))
self.slm = np.zeros((self.lmax+1,self.mmax+1))
#-- if the ascii file contains date variables
if date:
self.time = np.float(time)
self.month = np.int(12.0*(self.time - 2002.0)) + 1
#-- extract harmonics and convert to matrix
#-- for each line in the file
for line in file_contents:
if date:
l1,m1,clm1,slm1,time = rx.findall(line)
else:
l1,m1,clm1,slm1 = rx.findall(line)
#-- convert line degree and order to integers
ll,mm = np.array([l1,m1],dtype=np.int)
#-- convert fortran exponentials if applicable
self.clm[ll,mm] = np.float(clm1.replace('D','E'))
self.slm[ll,mm] = np.float(slm1.replace('D','E'))
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def from_netCDF4(self, filename, date=True, compression=None, verbose=False):
"""
Read a harmonics object from a netCDF4 file
Inputs: full path of input netCDF4 file
Options:
netCDF4 file contains date information
netCDF4 file is compressed using gzip or zip
verbose output of file information
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- read data from netCDF4 file
Ylms = ncdf_read_stokes(self.filename, ATTRIBUTES=False,
DATE=date, COMPRESSION=compression, VERBOSE=verbose)
self.clm = Ylms['clm'].copy()
self.slm = Ylms['slm'].copy()
self.l = Ylms['l'].copy()
self.m = Ylms['m'].copy()
self.lmax = np.max(Ylms['l'])
self.mmax = np.max(Ylms['m'])
if date:
self.time = Ylms['time'].copy()
self.month = Ylms['month'].copy()
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def from_HDF5(self, filename, date=True, compression=None, verbose=False):
"""
Read a harmonics object from a HDF5 file
Inputs: full path of input HDF5 file
Options:
HDF5 file contains date information
HDF5 file is compressed using gzip or zip
verbose output of file information
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- read data from HDF5 file
Ylms = hdf5_read_stokes(self.filename, ATTRIBUTES=False,
DATE=date, COMPRESSION=compression, VERBOSE=verbose)
self.clm = Ylms['clm'].copy()
self.slm = Ylms['slm'].copy()
self.l = Ylms['l'].copy()
self.m = Ylms['m'].copy()
self.lmax = np.max(Ylms['l'])
self.mmax = np.max(Ylms['m'])
if date:
self.time = Ylms['time'].copy()
self.month = Ylms['month'].copy()
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def from_gfc(self, filename, verbose=False):
"""
Read a harmonics object from a gfc gravity model file from the GFZ ICGEM
Inputs: full path of input gfc file
Options:
verbose output of file information
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- read data from gfc file
Ylms = read_ICGEM_harmonics(self.filename)
#-- Output file information
if verbose:
print(self.filename)
print(list(Ylms.keys()))
#-- copy variables for static gravity model
self.clm = Ylms['clm'].copy()
self.slm = Ylms['slm'].copy()
self.lmax = np.int(Ylms['max_degree'])
self.mmax = np.int(Ylms['max_degree'])
self.l = np.arange(self.lmax+1)
self.m = np.arange(self.mmax+1)
#-- geophysical parameters of gravity model
self.GM = np.float(Ylms['earth_gravity_constant'])
self.R = np.float(Ylms['radius'])
self.tide = Ylms['tide_system']
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def from_index(self, filename, format=None, date=True, sort=True):
"""
Read a harmonics object from an index of ascii, netCDF4 or HDF5 files
Inputs: full path of index file to be read into a harmonics object
Options:
format of files in index (ascii, netCDF4 or HDF5)
ascii, netCDF4, or HDF5 contains date information
sort harmonics objects by date information
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- Read index file of input spherical harmonics
with open(self.filename,'r') as f:
file_list = f.read().splitlines()
#-- create a list of harmonic objects
h = []
#-- for each file in the index
for i,f in enumerate(file_list):
if (format == 'ascii'):
#-- ascii (.txt)
h.append(harmonics().from_ascii(os.path.expanduser(f),date=date))
elif (format == 'netCDF4'):
#-- netcdf (.nc)
h.append(harmonics().from_netCDF4(os.path.expanduser(f),date=date))
elif (format == 'HDF5'):
#-- HDF5 (.H5)
h.append(harmonics().from_HDF5(os.path.expanduser(f),date=date))
#-- create a single harmonic object from the list
return self.from_list(h,date=date,sort=sort)
def from_list(self, object_list, date=True, sort=True):
"""
Build a sorted harmonics object from a list of other harmonics objects
Inputs: list of harmonics object to be merged
Options:
harmonics objects contain date information
sort harmonics objects by date information
"""
#-- number of harmonic objects in list
n = len(object_list)
#-- indices to sort data objects if harmonics list contain dates
if date and sort:
list_sort = np.argsort([d.time for d in object_list],axis=None)
else:
list_sort = np.arange(n)
#-- truncate to maximum degree and order
self.lmax = np.min([d.lmax for d in object_list])
self.mmax = np.min([d.mmax for d in object_list])
#-- output degree and order
self.l = np.arange(self.lmax+1)
self.m = np.arange(self.mmax+1)
#-- create output harmonics
self.clm = np.zeros((self.lmax+1,self.mmax+1,n))
self.slm = np.zeros((self.lmax+1,self.mmax+1,n))
#-- create list of files
self.filename = []
#-- output dates
if date:
self.time = np.zeros((n))
self.month = np.zeros((n),dtype=np.int)
#-- for each indice
for t,i in enumerate(list_sort):
self.clm[:,:,t] = object_list[i].clm[:self.lmax+1,:self.mmax+1]
self.slm[:,:,t] = object_list[i].slm[:self.lmax+1,:self.mmax+1]
if date:
self.time[t] = object_list[i].time[:].copy()
self.month[t] = object_list[i].month[:].copy()
#-- append filename to list
if getattr(object_list[i], 'filename'):
self.filename.append(object_list[i].filename)
#-- assign shape and ndim attributes
self.update_dimensions()
#-- return the single harmonic object
return self
def from_dict(self, d):
"""
Convert a dict object to a harmonics object
Inputs: dictionary object to be converted
"""
#-- assign dictionary variables to self
for key in ['l','m','clm','slm','time','month']:
try:
setattr(self, key, d[key].copy())
except (AttributeError, KeyError):
pass
#-- maximum degree and order
self.lmax = np.max(d['l'])
self.mmax = np.max(d['m'])
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def to_ascii(self, filename, date=True):
"""
Write a harmonics object to ascii file
Inputs: full path of output ascii file
Options: harmonics objects contain date information
"""
self.filename = os.path.expanduser(filename)
#-- open the output file
fid = open(self.filename, 'w')
if date:
file_format = '{0:5d} {1:5d} {2:+21.12e} {3:+21.12e} {4:10.4f}'
else:
file_format = '{0:5d} {1:5d} {2:+21.12e} {3:+21.12e}'
#-- write to file for each spherical harmonic degree and order
for m in range(0, self.mmax+1):
for l in range(m, self.lmax+1):
args = (l, m, self.clm[l,m], self.slm[l,m], self.time)
print(file_format.format(*args), file=fid)
#-- close the output file
fid.close()
def to_netCDF4(self, filename, date=True, **kwargs):
"""
Write a harmonics object to netCDF4 file
Inputs: full path of output netCDF4 file
Options: harmonics objects contain date information
**kwargs: keyword arguments for ncdf_stokes
"""
self.filename = os.path.expanduser(filename)
if 'TIME_UNITS' not in kwargs.keys():
kwargs['TIME_UNITS'] = 'years'
if 'TIME_LONGNAME' not in kwargs.keys():
kwargs['TIME_LONGNAME'] = 'Date_in_Decimal_Years'
ncdf_stokes(self.clm, self.slm, self.l, self.m, self.time, self.month,
FILENAME=self.filename, DATE=date, **kwargs)
def to_HDF5(self, filename, date=True, **kwargs):
"""
Write a harmonics object to HDF5 file
Inputs: full path of output HDF5 file
Options: harmonics objects contain date information
**kwargs: keyword arguments for hdf5_stokes
"""
self.filename = os.path.expanduser(filename)
if 'TIME_UNITS' not in kwargs.keys():
kwargs['TIME_UNITS'] = 'years'
if 'TIME_LONGNAME' not in kwargs.keys():
kwargs['TIME_LONGNAME'] = 'Date_in_Decimal_Years'
hdf5_stokes(self.clm, self.slm, self.l, self.m, self.time, self.month,
FILENAME=self.filename, DATE=date, **kwargs)
def update_dimensions(self):
"""
Update the dimensions of the spatial object
"""
self.ndim = self.clm.ndim
self.shape = self.clm.shape
return self
def add(self, temp):
"""
Add two harmonics objects
Inputs: harmonic object to be added
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp.update_dimensions()
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
if (self.ndim == 2):
self.clm[:l1,:m1] += temp.clm[:l1,:m1]
self.slm[:l1,:m1] += temp.slm[:l1,:m1]
elif (self.ndim == 3) and (temp.ndim == 2):
for i,t in enumerate(self.time):
self.clm[:l1,:m1,i] += temp.clm[:l1,:m1]
self.slm[:l1,:m1,i] += temp.slm[:l1,:m1]
else:
self.clm[:l1,:m1,:] += temp.clm[:l1,:m1,:]
self.slm[:l1,:m1,:] += temp.slm[:l1,:m1,:]
return self
def subtract(self, temp):
"""
Subtract one harmonics object from another
Inputs: harmonic object to be subtracted
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp.update_dimensions()
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
if (self.ndim == 2):
self.clm[:l1,:m1] -= temp.clm[:l1,:m1]
self.slm[:l1,:m1] -= temp.slm[:l1,:m1]
elif (self.ndim == 3) and (temp.ndim == 2):
for i,t in enumerate(self.time):
self.clm[:l1,:m1,i] -= temp.clm[:l1,:m1]
self.slm[:l1,:m1,i] -= temp.slm[:l1,:m1]
else:
self.clm[:l1,:m1,:] -= temp.clm[:l1,:m1,:]
self.slm[:l1,:m1,:] -= temp.slm[:l1,:m1,:]
return self
def multiply(self, temp):
"""
Multiply two harmonics objects
Inputs: harmonic object to be multiplied
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp.update_dimensions()
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
if (self.ndim == 2):
self.clm[:l1,:m1] *= temp.clm[:l1,:m1]
self.slm[:l1,:m1] *= temp.slm[:l1,:m1]
elif (self.ndim == 3) and (temp.ndim == 2):
for i,t in enumerate(self.time):
self.clm[:l1,:m1,i] *= temp.clm[:l1,:m1]
self.slm[:l1,:m1,i] *= temp.slm[:l1,:m1]
else:
self.clm[:l1,:m1,:] *= temp.clm[:l1,:m1,:]
self.slm[:l1,:m1,:] *= temp.slm[:l1,:m1,:]
return self
def divide(self, temp):
"""
Divide one harmonics object from another
Inputs: harmonic object to be divided
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp.update_dimensions()
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
#-- indices for cosine spherical harmonics (including zonals)
lc,mc = np.tril_indices(l1, m=m1)
#-- indices for sine spherical harmonics (excluding zonals)
m0 = np.nonzero(mc != 0)
ls,ms = (lc[m0],mc[m0])
if (self.ndim == 2):
self.clm[lc,mc] /= temp.clm[lc,mc]
self.slm[ls,ms] /= temp.slm[ls,ms]
elif (self.ndim == 3) and (temp.ndim == 2):
for i,t in enumerate(self.time):
self.clm[lc,mc,i] /= temp.clm[lc,mc]
self.slm[ls,ms,i] /= temp.slm[ls,ms]
else:
self.clm[lc,mc,:] /= temp.clm[lc,mc,:]
self.slm[ls,ms,:] /= temp.slm[ls,ms,:]
return self
def copy(self):
"""
Copy a harmonics object to a new harmonics object
"""
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
#-- try to assign variables to self
for key in ['clm','slm','time','month','shape','ndim','filename']:
try:
val = getattr(self, key)
setattr(temp, key, np.copy(val))
except AttributeError:
pass
#-- assign ndim and shape attributes
temp.update_dimensions()
return temp
def zeros_like(self):
"""
Create a harmonics object using the dimensions of another
"""
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
#-- assign variables to self
for key in ['clm','slm','time','month']:
try:
val = getattr(self, key)
setattr(temp, key, np.zeros_like(val))
except AttributeError:
pass
#-- assign ndim and shape attributes
temp.update_dimensions()
return temp
def expand_dims(self):
"""
Add a singleton dimension to a harmonics object if non-existent
"""
#-- change time dimensions to be iterable
self.time = np.atleast_1d(self.time)
self.month = np.atleast_1d(self.month)
#-- output harmonics with a third dimension
if (self.ndim == 2):
self.clm = self.clm[:,:,None]
self.slm = self.slm[:,:,None]
#-- reassign ndim and shape attributes
self.update_dimensions()
return self
def squeeze(self):
"""
Remove singleton dimensions from a harmonics object
"""
#-- squeeze singleton dimensions
self.time = np.squeeze(self.time)
self.month = np.squeeze(self.month)
self.clm = np.squeeze(self.clm)
self.slm = np.squeeze(self.slm)
#-- reassign ndim and shape attributes
self.update_dimensions()
return self
def flatten(self, date=True):
"""
Flatten harmonics matrices into arrays
Options: harmonics objects contain date information
"""
n_harm = (self.lmax**2 + 3*self.lmax - (self.lmax-self.mmax)**2 -
(self.lmax-self.mmax))//2 + 1
#-- restructured degree and order
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
temp.l = np.zeros((n_harm,), dtype=np.int32)
temp.m = np.zeros((n_harm,), dtype=np.int32)
#-- copy date variables if applicable
if date:
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
#-- restructured spherical harmonic arrays
if (self.clm.ndim == 2):
temp.clm = np.zeros((n_harm))
temp.slm = np.zeros((n_harm))
else:
n = self.clm.shape[-1]
temp.clm = np.zeros((n_harm,n))
temp.slm = np.zeros((n_harm,n))
#-- create counter variable lm
lm = 0
for m in range(0,self.mmax+1):#-- MMAX+1 to include MMAX
for l in range(m,self.lmax+1):#-- LMAX+1 to include LMAX
temp.l[lm] = np.int(l)
temp.m[lm] = np.int(m)
if (self.clm.ndim == 2):
temp.clm[lm] = self.clm[l,m]
temp.slm[lm] = self.slm[l,m]
else:
temp.clm[lm,:] = self.clm[l,m,:]
temp.slm[lm,:] = self.slm[l,m,:]
#-- add 1 to lm counter variable
lm += 1
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- return the flattened arrays
return temp
def expand(self, date=True):
"""
Expand flattened harmonics into matrices
Options: harmonics objects contain date information
"""
n_harm = (self.lmax**2 + 3*self.lmax - (self.lmax-self.mmax)**2 -
(self.lmax-self.mmax))//2 + 1
#-- restructured degree and order
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
#-- copy date variables if applicable
if date:
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
#-- restructured spherical harmonic matrices
if (self.clm.ndim == 1):
temp.clm = np.zeros((self.lmax+1,self.mmax+1))
temp.slm = np.zeros((self.lmax+1,self.mmax+1))
else:
n = self.clm.shape[-1]
temp.clm = np.zeros((self.lmax+1,self.mmax+1,n))
temp.slm = np.zeros((self.lmax+1,self.mmax+1,n))
#-- create counter variable lm
for lm in range(n_harm):
l = self.l[lm]
m = self.m[lm]
if (self.clm.ndim == 1):
temp.clm[l,m] = self.clm[lm]
temp.slm[l,m] = self.slm[lm]
else:
temp.clm[l,m,:] = self.clm[lm,:]
temp.slm[l,m,:] = self.slm[lm,:]
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- return the expanded harmonics object
return temp
def index(self, indice, date=True):
"""
Subset a harmonics object to specific index
Inputs: indice in matrix to subset
Options: harmonics objects contain date information
"""
#-- output harmonics object
temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))
#-- subset output harmonics
temp.clm = self.clm[:,:,indice].copy()
temp.slm = self.slm[:,:,indice].copy()
#-- subset output dates
if date:
temp.time = self.time[indice].copy()
temp.month = self.month[indice].copy()
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- subset filenames
if getattr(self, 'filename'):
temp.filename = self.filename[indice]
return temp
def subset(self, months):
"""
Subset a harmonics object to specific GRACE/GRACE-FO months
Inputs: GRACE/GRACE-FO months
"""
#-- check if months is an array or a single value
months = np.atleast_1d(months)
#-- number of months
n = len(months)
#-- check that all months are available
months_check = list(set(months) - set(self.month))
if months_check:
m = ','.join(['{0:03d}'.format(m) for m in months_check])
raise IOError('GRACE/GRACE-FO months {0} not Found'.format(m))
#-- indices to sort data objects
months_list = [i for i,m in enumerate(self.month) if m in months]
#-- output harmonics object
temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))
#-- create output harmonics
temp.clm = np.zeros((temp.lmax+1,temp.mmax+1,n))
temp.slm = np.zeros((temp.lmax+1,temp.mmax+1,n))
temp.time = np.zeros((n))
temp.month = np.zeros((n),dtype=np.int)
temp.filename = []
#-- for each indice
for t,i in enumerate(months_list):
temp.clm[:,:,t] = self.clm[:,:,i].copy()
temp.slm[:,:,t] = self.slm[:,:,i].copy()
temp.time[t] = self.time[i].copy()
temp.month[t] = self.month[i].copy()
if getattr(self, 'filename'):
temp.filename.append(self.filename[i])
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- remove singleton dimensions if importing a single value
return temp.squeeze()
def truncate(self, lmax, lmin=0, mmax=None):
"""
Truncate or expand a harmonics object to a new degree and order
Inputs: lmax maximum degree of spherical harmonics
Options: lmin minimum degree of spherical harmonics
mmax maximum order of spherical harmonics
"""
#-- output harmonics object
mmax = np.copy(lmax) if (mmax is None) else mmax
#-- copy prior harmonics object
temp = self.copy()
#-- set new degree and order
self.lmax = np.copy(lmax)
self.mmax = np.copy(mmax) if mmax else np.copy(lmax)
#-- truncation levels
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
#-- create output harmonics
if (temp.ndim == 3):
#-- number of months
n = temp.clm.shape[-1]
self.clm = np.zeros((self.lmax+1,self.mmax+1,n))
self.slm = np.zeros((self.lmax+1,self.mmax+1,n))
self.clm[lmin:l1,:m1,:] = temp.clm[lmin:l1,:m1,:].copy()
self.slm[lmin:l1,:m1,:] = temp.slm[lmin:l1,:m1,:].copy()
else:
self.clm = np.zeros((self.lmax+1,self.mmax+1))
self.slm = np.zeros((self.lmax+1,self.mmax+1))
self.clm[lmin:l1,:m1] = temp.clm[lmin:l1,:m1].copy()
self.slm[lmin:l1,:m1] = temp.slm[lmin:l1,:m1].copy()
#-- reassign ndim and shape attributes
self.update_dimensions()
#-- return the truncated or expanded harmonics object
return self
def mean(self, apply=False):
"""
Compute mean gravitational field and remove from data if specified
Options: apply to remove the mean field from the input harmonics
"""
temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))
#-- allocate for mean field
temp.clm = np.zeros((temp.lmax+1,temp.mmax+1))
temp.slm = np.zeros((temp.lmax+1,temp.mmax+1))
#-- Computes the mean for each spherical harmonic degree and order
for m in range(0,temp.mmax+1):#-- MMAX+1 to include l
for l in range(m,temp.lmax+1):#-- LMAX+1 to include LMAX
#-- calculate mean static field
temp.clm[l,m] = np.mean(self.clm[l,m,:])
temp.slm[l,m] = np.mean(self.slm[l,m,:])
#-- calculating the time-variable gravity field by removing
#-- the static component of the gravitational field
if apply:
self.clm[l,m,:] -= temp.clm[l,m]
self.slm[l,m,:] -= temp.slm[l,m]
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- return the mean field
return temp
def scale(self, var):
"""
Multiply a harmonics object by a constant
Inputs: scalar value to which the harmonics object will be multiplied
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
#-- multiply by a single constant or a time-variable scalar
if (np.ndim(var) == 0):
temp.clm = var*self.clm
temp.slm = var*self.slm
elif (np.ndim(var) == 1) and (self.ndim == 2):
temp.clm = np.zeros((temp.lmax+1,temp.mmax+1,len(var)))
temp.slm = np.zeros((temp.lmax+1,temp.mmax+1,len(var)))
for i,v in enumerate(var):
temp.clm[:,:,i] = v*self.clm
temp.slm[:,:,i] = v*self.slm
elif (np.ndim(var) == 1) and (self.ndim == 3):
for i,v in enumerate(var):
temp.clm[:,:,i] = v*self.clm[:,:,i]
temp.slm[:,:,i] = v*self.slm[:,:,i]
#-- assign ndim and shape attributes
temp.update_dimensions()
return temp
def power(self, power):
"""
Raise a harmonics object to a power
Inputs: power to which the harmonics object will be raised
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
for key in ['clm','slm']:
val = getattr(self, key)
setattr(temp, key, np.power(val,power))
#-- assign ndim and shape attributes
temp.update_dimensions()
return temp
def convolve(self, var):
"""
Convolve spherical harmonics with a degree-dependent array
Inputs: degree dependent array for convolution
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
#-- check if a single field or a temporal field
if (self.ndim == 2):
for l in range(0,self.lmax+1):#-- LMAX+1 to include LMAX
self.clm[l,:] *= var[l]
self.slm[l,:] *= var[l]
else:
for i,t in enumerate(self.time):
for l in range(0,self.lmax+1):#-- LMAX+1 to include LMAX
self.clm[l,:,i] *= var[l]
self.slm[l,:,i] *= var[l]
#-- return the convolved field
return self
def destripe(self, **kwargs):
"""
Filters spherical harmonic coefficients for correlated "striping" errors
Options: keyword arguments for destripe_harmonics
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
#-- check if a single field or a temporal field
if (self.ndim == 2):
Ylms = destripe_harmonics(self.clm, self.slm,
LMIN=1, LMAX=self.lmax, MMAX=self.mmax, **kwargs)
temp.clm = Ylms['clm'].copy()
temp.slm = Ylms['slm'].copy()
else:
n = self.shape[-1]
temp.clm = np.zeros((self.lmax+1,self.mmax+1,n))
temp.slm = np.zeros((self.lmax+1,self.mmax+1,n))
for i in range(n):
Ylms = destripe_harmonics(self.clm[:,:,i], self.slm[:,:,i],
LMIN=1, LMAX=self.lmax, MMAX=self.mmax, **kwargs)
temp.clm[:,:,i] = Ylms['clm'].copy()
temp.slm[:,:,i] = Ylms['slm'].copy()
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- return the destriped field
return temp | gravity_toolkit/harmonics.py | u"""
harmonics.py
Written by <NAME> (12/2020)
Spherical harmonic data class for processing GRACE/GRACE-FO Level-2 data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
netCDF4: Python interface to the netCDF C library
(https://unidata.github.io/netcdf4-python/netCDF4/index.html)
h5py: Pythonic interface to the HDF5 binary data format.
(https://www.h5py.org/)
PROGRAM DEPENDENCIES:
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_stokes.py: writes output spherical harmonic data to HDF5
ncdf_read_stokes.py: reads spherical harmonic data from netcdf
hdf5_read_stokes.py: reads spherical harmonic data from HDF5
read_ICGEM_harmonics.py: reads gravity model coefficients from GFZ ICGEM
destripe_harmonics.py: filters spherical harmonics for correlated errors
UPDATE HISTORY:
Updated 12/2020: added verbose option for gfc files
Updated 08/2020: added compression options for ascii, netCDF4 and HDF5 files
Updated 07/2020: added class docstring and using kwargs for output to file
added case_insensitive_filename function to search directories
Updated 06/2020: output list of filenames with from_list()
zeros_like() creates a new harmonics object with dimensions of another
add ndim and shape attributes of harmonics objects
Updated 04/2020: added from_gfc to read static gravity model coefficients
add to_ascii and iterate over temporal fields in convolve and destripe
make date optional for harmonic read functions. add more math functions
add option to sort if reading from an index or merging a list
add options to flatten and expand harmonics matrices or arrays
Written 03/2020
"""
import os
import re
import gzip
import zipfile
import numpy as np
from gravity_toolkit.ncdf_stokes import ncdf_stokes
from gravity_toolkit.hdf5_stokes import hdf5_stokes
from gravity_toolkit.ncdf_read_stokes import ncdf_read_stokes
from gravity_toolkit.hdf5_read_stokes import hdf5_read_stokes
from gravity_toolkit.read_ICGEM_harmonics import read_ICGEM_harmonics
from gravity_toolkit.destripe_harmonics import destripe_harmonics
class harmonics(object):
"""
Data class for reading, writing and processing spherical harmonic data
"""
np.seterr(invalid='ignore')
def __init__(self, lmax=None, mmax=None):
self.clm=None
self.slm=None
self.time=None
self.month=None
self.lmax=lmax
self.mmax=mmax
self.l=np.arange(self.lmax+1) if self.lmax else None
self.m=np.arange(self.mmax+1) if self.mmax else None
self.shape=None
self.ndim=None
self.filename=None
def case_insensitive_filename(self,filename):
"""
Searches a directory for a filename without case dependence
"""
self.filename = os.path.expanduser(filename)
#-- check if file presently exists with input case
if not os.access(self.filename,os.F_OK):
#-- search for filename without case dependence
basename = os.path.basename(filename)
directory = os.path.dirname(os.path.expanduser(filename))
f = [f for f in os.listdir(directory) if re.match(basename,f,re.I)]
if not f:
raise IOError('{0} not found in file system'.format(filename))
self.filename = os.path.join(directory,f.pop())
return self
def from_ascii(self, filename, date=True, compression=None, verbose=False):
"""
Read a harmonics object from an ascii file
Inputs: full path of input ascii file
Options:
ascii file contains date information
ascii file is compressed using gzip
verbose output of file information
"""
#-- set filename
self.case_insensitive_filename(filename)
print(self.filename) if verbose else None
#-- open the ascii file and extract contents
if (compression == 'gzip'):
#-- read input ascii data from gzip compressed file and split lines
with gzip.open(self.filename,'r') as f:
file_contents = f.read().decode('ISO-8859-1').splitlines()
elif (compression == 'zip'):
#-- read input ascii data from zipped file and split lines
base,extension = os.path.splitext(self.filename)
with zipfile.ZipFile(self.filename) as z:
file_contents = z.read(base).decode('ISO-8859-1').splitlines()
else:
#-- read input ascii file (.txt, .asc) and split lines
with open(self.filename,'r') as f:
file_contents = f.read().splitlines()
#-- compile regular expression operator for extracting numerical values
#-- from input ascii files of spherical harmonics
regex_pattern = r'[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[EeD][+-]?\d+)?'
rx = re.compile(regex_pattern, re.VERBOSE)
#-- find maximum degree and order of harmonics
self.lmax = 0
self.mmax = 0
#-- for each line in the file
for line in file_contents:
if date:
l1,m1,clm1,slm1,time = rx.findall(line)
else:
l1,m1,clm1,slm1 = rx.findall(line)
#-- convert line degree and order to integers
l1,m1 = np.array([l1,m1],dtype=np.int)
self.lmax = np.copy(l1) if (l1 > self.lmax) else self.lmax
self.mmax = np.copy(m1) if (m1 > self.mmax) else self.mmax
#-- output spherical harmonics dimensions array
self.l = np.arange(self.lmax+1)
self.m = np.arange(self.mmax+1)
#-- output spherical harmonics data
self.clm = np.zeros((self.lmax+1,self.mmax+1))
self.slm = np.zeros((self.lmax+1,self.mmax+1))
#-- if the ascii file contains date variables
if date:
self.time = np.float(time)
self.month = np.int(12.0*(self.time - 2002.0)) + 1
#-- extract harmonics and convert to matrix
#-- for each line in the file
for line in file_contents:
if date:
l1,m1,clm1,slm1,time = rx.findall(line)
else:
l1,m1,clm1,slm1 = rx.findall(line)
#-- convert line degree and order to integers
ll,mm = np.array([l1,m1],dtype=np.int)
#-- convert fortran exponentials if applicable
self.clm[ll,mm] = np.float(clm1.replace('D','E'))
self.slm[ll,mm] = np.float(slm1.replace('D','E'))
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def from_netCDF4(self, filename, date=True, compression=None, verbose=False):
"""
Read a harmonics object from a netCDF4 file
Inputs: full path of input netCDF4 file
Options:
netCDF4 file contains date information
netCDF4 file is compressed using gzip or zip
verbose output of file information
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- read data from netCDF4 file
Ylms = ncdf_read_stokes(self.filename, ATTRIBUTES=False,
DATE=date, COMPRESSION=compression, VERBOSE=verbose)
self.clm = Ylms['clm'].copy()
self.slm = Ylms['slm'].copy()
self.l = Ylms['l'].copy()
self.m = Ylms['m'].copy()
self.lmax = np.max(Ylms['l'])
self.mmax = np.max(Ylms['m'])
if date:
self.time = Ylms['time'].copy()
self.month = Ylms['month'].copy()
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def from_HDF5(self, filename, date=True, compression=None, verbose=False):
"""
Read a harmonics object from a HDF5 file
Inputs: full path of input HDF5 file
Options:
HDF5 file contains date information
HDF5 file is compressed using gzip or zip
verbose output of file information
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- read data from HDF5 file
Ylms = hdf5_read_stokes(self.filename, ATTRIBUTES=False,
DATE=date, COMPRESSION=compression, VERBOSE=verbose)
self.clm = Ylms['clm'].copy()
self.slm = Ylms['slm'].copy()
self.l = Ylms['l'].copy()
self.m = Ylms['m'].copy()
self.lmax = np.max(Ylms['l'])
self.mmax = np.max(Ylms['m'])
if date:
self.time = Ylms['time'].copy()
self.month = Ylms['month'].copy()
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def from_gfc(self, filename, verbose=False):
"""
Read a harmonics object from a gfc gravity model file from the GFZ ICGEM
Inputs: full path of input gfc file
Options:
verbose output of file information
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- read data from gfc file
Ylms = read_ICGEM_harmonics(self.filename)
#-- Output file information
if verbose:
print(self.filename)
print(list(Ylms.keys()))
#-- copy variables for static gravity model
self.clm = Ylms['clm'].copy()
self.slm = Ylms['slm'].copy()
self.lmax = np.int(Ylms['max_degree'])
self.mmax = np.int(Ylms['max_degree'])
self.l = np.arange(self.lmax+1)
self.m = np.arange(self.mmax+1)
#-- geophysical parameters of gravity model
self.GM = np.float(Ylms['earth_gravity_constant'])
self.R = np.float(Ylms['radius'])
self.tide = Ylms['tide_system']
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def from_index(self, filename, format=None, date=True, sort=True):
"""
Read a harmonics object from an index of ascii, netCDF4 or HDF5 files
Inputs: full path of index file to be read into a harmonics object
Options:
format of files in index (ascii, netCDF4 or HDF5)
ascii, netCDF4, or HDF5 contains date information
sort harmonics objects by date information
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- Read index file of input spherical harmonics
with open(self.filename,'r') as f:
file_list = f.read().splitlines()
#-- create a list of harmonic objects
h = []
#-- for each file in the index
for i,f in enumerate(file_list):
if (format == 'ascii'):
#-- ascii (.txt)
h.append(harmonics().from_ascii(os.path.expanduser(f),date=date))
elif (format == 'netCDF4'):
#-- netcdf (.nc)
h.append(harmonics().from_netCDF4(os.path.expanduser(f),date=date))
elif (format == 'HDF5'):
#-- HDF5 (.H5)
h.append(harmonics().from_HDF5(os.path.expanduser(f),date=date))
#-- create a single harmonic object from the list
return self.from_list(h,date=date,sort=sort)
def from_list(self, object_list, date=True, sort=True):
"""
Build a sorted harmonics object from a list of other harmonics objects
Inputs: list of harmonics object to be merged
Options:
harmonics objects contain date information
sort harmonics objects by date information
"""
#-- number of harmonic objects in list
n = len(object_list)
#-- indices to sort data objects if harmonics list contain dates
if date and sort:
list_sort = np.argsort([d.time for d in object_list],axis=None)
else:
list_sort = np.arange(n)
#-- truncate to maximum degree and order
self.lmax = np.min([d.lmax for d in object_list])
self.mmax = np.min([d.mmax for d in object_list])
#-- output degree and order
self.l = np.arange(self.lmax+1)
self.m = np.arange(self.mmax+1)
#-- create output harmonics
self.clm = np.zeros((self.lmax+1,self.mmax+1,n))
self.slm = np.zeros((self.lmax+1,self.mmax+1,n))
#-- create list of files
self.filename = []
#-- output dates
if date:
self.time = np.zeros((n))
self.month = np.zeros((n),dtype=np.int)
#-- for each indice
for t,i in enumerate(list_sort):
self.clm[:,:,t] = object_list[i].clm[:self.lmax+1,:self.mmax+1]
self.slm[:,:,t] = object_list[i].slm[:self.lmax+1,:self.mmax+1]
if date:
self.time[t] = object_list[i].time[:].copy()
self.month[t] = object_list[i].month[:].copy()
#-- append filename to list
if getattr(object_list[i], 'filename'):
self.filename.append(object_list[i].filename)
#-- assign shape and ndim attributes
self.update_dimensions()
#-- return the single harmonic object
return self
def from_dict(self, d):
"""
Convert a dict object to a harmonics object
Inputs: dictionary object to be converted
"""
#-- assign dictionary variables to self
for key in ['l','m','clm','slm','time','month']:
try:
setattr(self, key, d[key].copy())
except (AttributeError, KeyError):
pass
#-- maximum degree and order
self.lmax = np.max(d['l'])
self.mmax = np.max(d['m'])
#-- assign shape and ndim attributes
self.update_dimensions()
return self
def to_ascii(self, filename, date=True):
"""
Write a harmonics object to ascii file
Inputs: full path of output ascii file
Options: harmonics objects contain date information
"""
self.filename = os.path.expanduser(filename)
#-- open the output file
fid = open(self.filename, 'w')
if date:
file_format = '{0:5d} {1:5d} {2:+21.12e} {3:+21.12e} {4:10.4f}'
else:
file_format = '{0:5d} {1:5d} {2:+21.12e} {3:+21.12e}'
#-- write to file for each spherical harmonic degree and order
for m in range(0, self.mmax+1):
for l in range(m, self.lmax+1):
args = (l, m, self.clm[l,m], self.slm[l,m], self.time)
print(file_format.format(*args), file=fid)
#-- close the output file
fid.close()
def to_netCDF4(self, filename, date=True, **kwargs):
"""
Write a harmonics object to netCDF4 file
Inputs: full path of output netCDF4 file
Options: harmonics objects contain date information
**kwargs: keyword arguments for ncdf_stokes
"""
self.filename = os.path.expanduser(filename)
if 'TIME_UNITS' not in kwargs.keys():
kwargs['TIME_UNITS'] = 'years'
if 'TIME_LONGNAME' not in kwargs.keys():
kwargs['TIME_LONGNAME'] = 'Date_in_Decimal_Years'
ncdf_stokes(self.clm, self.slm, self.l, self.m, self.time, self.month,
FILENAME=self.filename, DATE=date, **kwargs)
def to_HDF5(self, filename, date=True, **kwargs):
"""
Write a harmonics object to HDF5 file
Inputs: full path of output HDF5 file
Options: harmonics objects contain date information
**kwargs: keyword arguments for hdf5_stokes
"""
self.filename = os.path.expanduser(filename)
if 'TIME_UNITS' not in kwargs.keys():
kwargs['TIME_UNITS'] = 'years'
if 'TIME_LONGNAME' not in kwargs.keys():
kwargs['TIME_LONGNAME'] = 'Date_in_Decimal_Years'
hdf5_stokes(self.clm, self.slm, self.l, self.m, self.time, self.month,
FILENAME=self.filename, DATE=date, **kwargs)
def update_dimensions(self):
"""
Update the dimensions of the spatial object
"""
self.ndim = self.clm.ndim
self.shape = self.clm.shape
return self
def add(self, temp):
"""
Add two harmonics objects
Inputs: harmonic object to be added
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp.update_dimensions()
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
if (self.ndim == 2):
self.clm[:l1,:m1] += temp.clm[:l1,:m1]
self.slm[:l1,:m1] += temp.slm[:l1,:m1]
elif (self.ndim == 3) and (temp.ndim == 2):
for i,t in enumerate(self.time):
self.clm[:l1,:m1,i] += temp.clm[:l1,:m1]
self.slm[:l1,:m1,i] += temp.slm[:l1,:m1]
else:
self.clm[:l1,:m1,:] += temp.clm[:l1,:m1,:]
self.slm[:l1,:m1,:] += temp.slm[:l1,:m1,:]
return self
def subtract(self, temp):
"""
Subtract one harmonics object from another
Inputs: harmonic object to be subtracted
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp.update_dimensions()
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
if (self.ndim == 2):
self.clm[:l1,:m1] -= temp.clm[:l1,:m1]
self.slm[:l1,:m1] -= temp.slm[:l1,:m1]
elif (self.ndim == 3) and (temp.ndim == 2):
for i,t in enumerate(self.time):
self.clm[:l1,:m1,i] -= temp.clm[:l1,:m1]
self.slm[:l1,:m1,i] -= temp.slm[:l1,:m1]
else:
self.clm[:l1,:m1,:] -= temp.clm[:l1,:m1,:]
self.slm[:l1,:m1,:] -= temp.slm[:l1,:m1,:]
return self
def multiply(self, temp):
"""
Multiply two harmonics objects
Inputs: harmonic object to be multiplied
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp.update_dimensions()
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
if (self.ndim == 2):
self.clm[:l1,:m1] *= temp.clm[:l1,:m1]
self.slm[:l1,:m1] *= temp.slm[:l1,:m1]
elif (self.ndim == 3) and (temp.ndim == 2):
for i,t in enumerate(self.time):
self.clm[:l1,:m1,i] *= temp.clm[:l1,:m1]
self.slm[:l1,:m1,i] *= temp.slm[:l1,:m1]
else:
self.clm[:l1,:m1,:] *= temp.clm[:l1,:m1,:]
self.slm[:l1,:m1,:] *= temp.slm[:l1,:m1,:]
return self
def divide(self, temp):
"""
Divide one harmonics object from another
Inputs: harmonic object to be divided
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp.update_dimensions()
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
#-- indices for cosine spherical harmonics (including zonals)
lc,mc = np.tril_indices(l1, m=m1)
#-- indices for sine spherical harmonics (excluding zonals)
m0 = np.nonzero(mc != 0)
ls,ms = (lc[m0],mc[m0])
if (self.ndim == 2):
self.clm[lc,mc] /= temp.clm[lc,mc]
self.slm[ls,ms] /= temp.slm[ls,ms]
elif (self.ndim == 3) and (temp.ndim == 2):
for i,t in enumerate(self.time):
self.clm[lc,mc,i] /= temp.clm[lc,mc]
self.slm[ls,ms,i] /= temp.slm[ls,ms]
else:
self.clm[lc,mc,:] /= temp.clm[lc,mc,:]
self.slm[ls,ms,:] /= temp.slm[ls,ms,:]
return self
def copy(self):
"""
Copy a harmonics object to a new harmonics object
"""
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
#-- try to assign variables to self
for key in ['clm','slm','time','month','shape','ndim','filename']:
try:
val = getattr(self, key)
setattr(temp, key, np.copy(val))
except AttributeError:
pass
#-- assign ndim and shape attributes
temp.update_dimensions()
return temp
def zeros_like(self):
"""
Create a harmonics object using the dimensions of another
"""
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
#-- assign variables to self
for key in ['clm','slm','time','month']:
try:
val = getattr(self, key)
setattr(temp, key, np.zeros_like(val))
except AttributeError:
pass
#-- assign ndim and shape attributes
temp.update_dimensions()
return temp
def expand_dims(self):
"""
Add a singleton dimension to a harmonics object if non-existent
"""
#-- change time dimensions to be iterable
self.time = np.atleast_1d(self.time)
self.month = np.atleast_1d(self.month)
#-- output harmonics with a third dimension
if (self.ndim == 2):
self.clm = self.clm[:,:,None]
self.slm = self.slm[:,:,None]
#-- reassign ndim and shape attributes
self.update_dimensions()
return self
def squeeze(self):
"""
Remove singleton dimensions from a harmonics object
"""
#-- squeeze singleton dimensions
self.time = np.squeeze(self.time)
self.month = np.squeeze(self.month)
self.clm = np.squeeze(self.clm)
self.slm = np.squeeze(self.slm)
#-- reassign ndim and shape attributes
self.update_dimensions()
return self
def flatten(self, date=True):
"""
Flatten harmonics matrices into arrays
Options: harmonics objects contain date information
"""
n_harm = (self.lmax**2 + 3*self.lmax - (self.lmax-self.mmax)**2 -
(self.lmax-self.mmax))//2 + 1
#-- restructured degree and order
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
temp.l = np.zeros((n_harm,), dtype=np.int32)
temp.m = np.zeros((n_harm,), dtype=np.int32)
#-- copy date variables if applicable
if date:
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
#-- restructured spherical harmonic arrays
if (self.clm.ndim == 2):
temp.clm = np.zeros((n_harm))
temp.slm = np.zeros((n_harm))
else:
n = self.clm.shape[-1]
temp.clm = np.zeros((n_harm,n))
temp.slm = np.zeros((n_harm,n))
#-- create counter variable lm
lm = 0
for m in range(0,self.mmax+1):#-- MMAX+1 to include MMAX
for l in range(m,self.lmax+1):#-- LMAX+1 to include LMAX
temp.l[lm] = np.int(l)
temp.m[lm] = np.int(m)
if (self.clm.ndim == 2):
temp.clm[lm] = self.clm[l,m]
temp.slm[lm] = self.slm[l,m]
else:
temp.clm[lm,:] = self.clm[l,m,:]
temp.slm[lm,:] = self.slm[l,m,:]
#-- add 1 to lm counter variable
lm += 1
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- return the flattened arrays
return temp
def expand(self, date=True):
"""
Expand flattened harmonics into matrices
Options: harmonics objects contain date information
"""
n_harm = (self.lmax**2 + 3*self.lmax - (self.lmax-self.mmax)**2 -
(self.lmax-self.mmax))//2 + 1
#-- restructured degree and order
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
#-- copy date variables if applicable
if date:
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
#-- restructured spherical harmonic matrices
if (self.clm.ndim == 1):
temp.clm = np.zeros((self.lmax+1,self.mmax+1))
temp.slm = np.zeros((self.lmax+1,self.mmax+1))
else:
n = self.clm.shape[-1]
temp.clm = np.zeros((self.lmax+1,self.mmax+1,n))
temp.slm = np.zeros((self.lmax+1,self.mmax+1,n))
#-- create counter variable lm
for lm in range(n_harm):
l = self.l[lm]
m = self.m[lm]
if (self.clm.ndim == 1):
temp.clm[l,m] = self.clm[lm]
temp.slm[l,m] = self.slm[lm]
else:
temp.clm[l,m,:] = self.clm[lm,:]
temp.slm[l,m,:] = self.slm[lm,:]
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- return the expanded harmonics object
return temp
def index(self, indice, date=True):
"""
Subset a harmonics object to specific index
Inputs: indice in matrix to subset
Options: harmonics objects contain date information
"""
#-- output harmonics object
temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))
#-- subset output harmonics
temp.clm = self.clm[:,:,indice].copy()
temp.slm = self.slm[:,:,indice].copy()
#-- subset output dates
if date:
temp.time = self.time[indice].copy()
temp.month = self.month[indice].copy()
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- subset filenames
if getattr(self, 'filename'):
temp.filename = self.filename[indice]
return temp
def subset(self, months):
"""
Subset a harmonics object to specific GRACE/GRACE-FO months
Inputs: GRACE/GRACE-FO months
"""
#-- check if months is an array or a single value
months = np.atleast_1d(months)
#-- number of months
n = len(months)
#-- check that all months are available
months_check = list(set(months) - set(self.month))
if months_check:
m = ','.join(['{0:03d}'.format(m) for m in months_check])
raise IOError('GRACE/GRACE-FO months {0} not Found'.format(m))
#-- indices to sort data objects
months_list = [i for i,m in enumerate(self.month) if m in months]
#-- output harmonics object
temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))
#-- create output harmonics
temp.clm = np.zeros((temp.lmax+1,temp.mmax+1,n))
temp.slm = np.zeros((temp.lmax+1,temp.mmax+1,n))
temp.time = np.zeros((n))
temp.month = np.zeros((n),dtype=np.int)
temp.filename = []
#-- for each indice
for t,i in enumerate(months_list):
temp.clm[:,:,t] = self.clm[:,:,i].copy()
temp.slm[:,:,t] = self.slm[:,:,i].copy()
temp.time[t] = self.time[i].copy()
temp.month[t] = self.month[i].copy()
if getattr(self, 'filename'):
temp.filename.append(self.filename[i])
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- remove singleton dimensions if importing a single value
return temp.squeeze()
def truncate(self, lmax, lmin=0, mmax=None):
"""
Truncate or expand a harmonics object to a new degree and order
Inputs: lmax maximum degree of spherical harmonics
Options: lmin minimum degree of spherical harmonics
mmax maximum order of spherical harmonics
"""
#-- output harmonics object
mmax = np.copy(lmax) if (mmax is None) else mmax
#-- copy prior harmonics object
temp = self.copy()
#-- set new degree and order
self.lmax = np.copy(lmax)
self.mmax = np.copy(mmax) if mmax else np.copy(lmax)
#-- truncation levels
l1 = self.lmax+1 if (temp.lmax > self.lmax) else temp.lmax+1
m1 = self.mmax+1 if (temp.mmax > self.mmax) else temp.mmax+1
#-- create output harmonics
if (temp.ndim == 3):
#-- number of months
n = temp.clm.shape[-1]
self.clm = np.zeros((self.lmax+1,self.mmax+1,n))
self.slm = np.zeros((self.lmax+1,self.mmax+1,n))
self.clm[lmin:l1,:m1,:] = temp.clm[lmin:l1,:m1,:].copy()
self.slm[lmin:l1,:m1,:] = temp.slm[lmin:l1,:m1,:].copy()
else:
self.clm = np.zeros((self.lmax+1,self.mmax+1))
self.slm = np.zeros((self.lmax+1,self.mmax+1))
self.clm[lmin:l1,:m1] = temp.clm[lmin:l1,:m1].copy()
self.slm[lmin:l1,:m1] = temp.slm[lmin:l1,:m1].copy()
#-- reassign ndim and shape attributes
self.update_dimensions()
#-- return the truncated or expanded harmonics object
return self
def mean(self, apply=False):
"""
Compute mean gravitational field and remove from data if specified
Options: apply to remove the mean field from the input harmonics
"""
temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))
#-- allocate for mean field
temp.clm = np.zeros((temp.lmax+1,temp.mmax+1))
temp.slm = np.zeros((temp.lmax+1,temp.mmax+1))
#-- Computes the mean for each spherical harmonic degree and order
for m in range(0,temp.mmax+1):#-- MMAX+1 to include l
for l in range(m,temp.lmax+1):#-- LMAX+1 to include LMAX
#-- calculate mean static field
temp.clm[l,m] = np.mean(self.clm[l,m,:])
temp.slm[l,m] = np.mean(self.slm[l,m,:])
#-- calculating the time-variable gravity field by removing
#-- the static component of the gravitational field
if apply:
self.clm[l,m,:] -= temp.clm[l,m]
self.slm[l,m,:] -= temp.slm[l,m]
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- return the mean field
return temp
def scale(self, var):
"""
Multiply a harmonics object by a constant
Inputs: scalar value to which the harmonics object will be multiplied
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
#-- multiply by a single constant or a time-variable scalar
if (np.ndim(var) == 0):
temp.clm = var*self.clm
temp.slm = var*self.slm
elif (np.ndim(var) == 1) and (self.ndim == 2):
temp.clm = np.zeros((temp.lmax+1,temp.mmax+1,len(var)))
temp.slm = np.zeros((temp.lmax+1,temp.mmax+1,len(var)))
for i,v in enumerate(var):
temp.clm[:,:,i] = v*self.clm
temp.slm[:,:,i] = v*self.slm
elif (np.ndim(var) == 1) and (self.ndim == 3):
for i,v in enumerate(var):
temp.clm[:,:,i] = v*self.clm[:,:,i]
temp.slm[:,:,i] = v*self.slm[:,:,i]
#-- assign ndim and shape attributes
temp.update_dimensions()
return temp
def power(self, power):
"""
Raise a harmonics object to a power
Inputs: power to which the harmonics object will be raised
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp = harmonics(lmax=self.lmax, mmax=self.mmax)
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
for key in ['clm','slm']:
val = getattr(self, key)
setattr(temp, key, np.power(val,power))
#-- assign ndim and shape attributes
temp.update_dimensions()
return temp
def convolve(self, var):
"""
Convolve spherical harmonics with a degree-dependent array
Inputs: degree dependent array for convolution
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
#-- check if a single field or a temporal field
if (self.ndim == 2):
for l in range(0,self.lmax+1):#-- LMAX+1 to include LMAX
self.clm[l,:] *= var[l]
self.slm[l,:] *= var[l]
else:
for i,t in enumerate(self.time):
for l in range(0,self.lmax+1):#-- LMAX+1 to include LMAX
self.clm[l,:,i] *= var[l]
self.slm[l,:,i] *= var[l]
#-- return the convolved field
return self
def destripe(self, **kwargs):
"""
Filters spherical harmonic coefficients for correlated "striping" errors
Options: keyword arguments for destripe_harmonics
"""
#-- reassign shape and ndim attributes
self.update_dimensions()
temp = harmonics(lmax=np.copy(self.lmax),mmax=np.copy(self.mmax))
temp.time = np.copy(self.time)
temp.month = np.copy(self.month)
#-- check if a single field or a temporal field
if (self.ndim == 2):
Ylms = destripe_harmonics(self.clm, self.slm,
LMIN=1, LMAX=self.lmax, MMAX=self.mmax, **kwargs)
temp.clm = Ylms['clm'].copy()
temp.slm = Ylms['slm'].copy()
else:
n = self.shape[-1]
temp.clm = np.zeros((self.lmax+1,self.mmax+1,n))
temp.slm = np.zeros((self.lmax+1,self.mmax+1,n))
for i in range(n):
Ylms = destripe_harmonics(self.clm[:,:,i], self.slm[:,:,i],
LMIN=1, LMAX=self.lmax, MMAX=self.mmax, **kwargs)
temp.clm[:,:,i] = Ylms['clm'].copy()
temp.slm[:,:,i] = Ylms['slm'].copy()
#-- assign ndim and shape attributes
temp.update_dimensions()
#-- return the destriped field
return temp | 0.708313 | 0.578924 |
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
NUM_CLASSES = 1000
def dense_block(x, blocks, name):
for i in range(blocks):
x = conv_block(x, 32, name=name + '_block' + str(i + 1))
return x
def transition_block(x, reduction, name):
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(
x)
x = layers.Activation('relu', name=name + '_relu')(x)
x = layers.Conv2D(
int(backend.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + '_conv')(
x)
x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
return x
def conv_block(x, growth_rate, name):
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(
x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(
x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(
x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
def DenseNet121(input_shape=None):
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=True)
img_input = layers.Input(shape=input_shape)
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(
x)
x = layers.Activation('relu', name='conv1/relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, 6, name='conv2')
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, 12, name='conv3')
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, 24, name='conv4')
x = transition_block(x, 0.5, name='pool4')
x = dense_block(x, 16, name='conv5')
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation('softmax', None)
x = layers.Dense(NUM_CLASSES, activation='softmax',
name='predictions')(x)
# Create model.
model = training.Model(img_input, x, name='densenet121')
return model | tests/tensorflow/test_models/densenet.py | from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
NUM_CLASSES = 1000
def dense_block(x, blocks, name):
for i in range(blocks):
x = conv_block(x, 32, name=name + '_block' + str(i + 1))
return x
def transition_block(x, reduction, name):
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(
x)
x = layers.Activation('relu', name=name + '_relu')(x)
x = layers.Conv2D(
int(backend.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + '_conv')(
x)
x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
return x
def conv_block(x, growth_rate, name):
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(
x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(
x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(
x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
def DenseNet121(input_shape=None):
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=True)
img_input = layers.Input(shape=input_shape)
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(
x)
x = layers.Activation('relu', name='conv1/relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, 6, name='conv2')
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, 12, name='conv3')
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, 24, name='conv4')
x = transition_block(x, 0.5, name='pool4')
x = dense_block(x, 16, name='conv5')
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation('softmax', None)
x = layers.Dense(NUM_CLASSES, activation='softmax',
name='predictions')(x)
# Create model.
model = training.Model(img_input, x, name='densenet121')
return model | 0.900915 | 0.397997 |
import torch
import torch.nn as nn
from torch.nn import functional as F
def octonion_mul(O_1, O_2):
x0, x1, x2, x3, x4, x5, x6, x7 = O_1
y0, y1, y2, y3, y4, y5, y6, y7 = O_2
x = x0 * y0 - x1 * y1 - x2 * y2 - x3 * y3 - x4 * y4 - x5 * y5 - x6 * y6 - x7 * y7
e1 = x0 * y1 + x1 * y0 + x2 * y3 - x3 * y2 + x4 * y5 - x5 * y4 - x6 * y7 + x7 * y6
e2 = x0 * y2 - x1 * y3 + x2 * y0 + x3 * y1 + x4 * y6 + x5 * y7 - x6 * y4 - x7 * y5
e3 = x0 * y3 + x1 * y2 - x2 * y1 + x3 * y0 + x4 * y7 - x5 * y6 + x6 * y5 - x7 * y4
e4 = x0 * y4 - x1 * y5 - x2 * y6 - x3 * y7 + x4 * y0 + x5 * y1 + x6 * y2 + x7 * y3
e5 = x0 * y5 + x1 * y4 - x2 * y7 + x3 * y6 - x4 * y1 + x5 * y0 - x6 * y3 + x7 * y2
e6 = x0 * y6 + x1 * y7 + x2 * y4 - x3 * y5 - x4 * y2 + x5 * y3 + x6 * y0 - x7 * y1
e7 = x0 * y7 - x1 * y6 + x2 * y5 + x3 * y4 - x4 * y3 - x5 * y2 + x6 * y1 + x7 * y0
return x, e1, e2, e3, e4, e5, e6, e7
def octonion_mul_norm(O_1, O_2):
x0, x1, x2, x3, x4, x5, x6, x7 = O_1
y0, y1, y2, y3, y4, y5, y6, y7 = O_2
# Normalize the relation to eliminate the scaling effect, may cause Nan due to floating point.
denominator = torch.sqrt(y0 ** 2 + y1 ** 2 + y2 ** 2 + y3 ** 2 + y4 ** 2 + y5 ** 2 + y6 ** 2 + y7 ** 2)
y0 = y0 / denominator
y1 = y1 / denominator
y2 = y2 / denominator
y3 = y3 / denominator
y4 = y4 / denominator
y5 = y5 / denominator
y6 = y6 / denominator
y7 = y7 / denominator
x = x0 * y0 - x1 * y1 - x2 * y2 - x3 * y3 - x4 * y4 - x5 * y5 - x6 * y6 - x7 * y7
e1 = x0 * y1 + x1 * y0 + x2 * y3 - x3 * y2 + x4 * y5 - x5 * y4 - x6 * y7 + x7 * y6
e2 = x0 * y2 - x1 * y3 + x2 * y0 + x3 * y1 + x4 * y6 + x5 * y7 - x6 * y4 - x7 * y5
e3 = x0 * y3 + x1 * y2 - x2 * y1 + x3 * y0 + x4 * y7 - x5 * y6 + x6 * y5 - x7 * y4
e4 = x0 * y4 - x1 * y5 - x2 * y6 - x3 * y7 + x4 * y0 + x5 * y1 + x6 * y2 + x7 * y3
e5 = x0 * y5 + x1 * y4 - x2 * y7 + x3 * y6 - x4 * y1 + x5 * y0 - x6 * y3 + x7 * y2
e6 = x0 * y6 + x1 * y7 + x2 * y4 - x3 * y5 - x4 * y2 + x5 * y3 + x6 * y0 - x7 * y1
e7 = x0 * y7 - x1 * y6 + x2 * y5 + x3 * y4 - x4 * y3 - x5 * y2 + x6 * y1 + x7 * y0
return x, e1, e2, e3, e4, e5, e6, e7
class OMult(torch.nn.Module):
def __init__(self,
num_entities, num_relations,
embedding_dim,
norm_flag=False, input_dropout=0.2, hidden_dropout=0.3
):
super(OMult, self).__init__()
self.name = 'OMult'
self.embedding_dim = embedding_dim
self.num_entities = num_entities
self.num_relations = num_relations
self.loss = nn.BCELoss()
self.flag_octonion_mul_norm = norm_flag
# Octonion embeddings of entities
self.emb_ent_e0 = nn.Embedding(self.num_entities, self.embedding_dim) # real
self.emb_ent_e1 = nn.Embedding(self.num_entities, self.embedding_dim) # e1
self.emb_ent_e2 = nn.Embedding(self.num_entities, self.embedding_dim) # e2
self.emb_ent_e3 = nn.Embedding(self.num_entities, self.embedding_dim) # e3
self.emb_ent_e4 = nn.Embedding(self.num_entities, self.embedding_dim) # e3
self.emb_ent_e5 = nn.Embedding(self.num_entities, self.embedding_dim) # e4
self.emb_ent_e6 = nn.Embedding(self.num_entities, self.embedding_dim) # e6
self.emb_ent_e7 = nn.Embedding(self.num_entities, self.embedding_dim) # e7
# Octonion embeddings of relations
self.emb_rel_e0 = nn.Embedding(self.num_relations, self.embedding_dim) # real
self.emb_rel_e1 = nn.Embedding(self.num_relations, self.embedding_dim) # e1
self.emb_rel_e2 = nn.Embedding(self.num_relations, self.embedding_dim) # e2
self.emb_rel_e3 = nn.Embedding(self.num_relations, self.embedding_dim) # e3
self.emb_rel_e4 = nn.Embedding(self.num_relations, self.embedding_dim) # e4
self.emb_rel_e5 = nn.Embedding(self.num_relations, self.embedding_dim) # e5
self.emb_rel_e6 = nn.Embedding(self.num_relations, self.embedding_dim) # e6
self.emb_rel_e7 = nn.Embedding(self.num_relations, self.embedding_dim) # e7
# Dropouts for octonion embeddings of ALL entities.
self.input_dp_ent_e0 = nn.Dropout(input_dropout)
self.input_dp_ent_e1 = nn.Dropout(input_dropout)
self.input_dp_ent_e2 = nn.Dropout(input_dropout)
self.input_dp_ent_e3 = nn.Dropout(input_dropout)
self.input_dp_ent_e4 = nn.Dropout(input_dropout)
self.input_dp_ent_e5 = nn.Dropout(input_dropout)
self.input_dp_ent_e6 = nn.Dropout(input_dropout)
self.input_dp_ent_e7 = nn.Dropout(input_dropout)
# Dropouts for octonion embeddings of relations.
self.input_dp_rel_e0 = nn.Dropout(input_dropout)
self.input_dp_rel_e1 = nn.Dropout(input_dropout)
self.input_dp_rel_e2 = nn.Dropout(input_dropout)
self.input_dp_rel_e3 = nn.Dropout(input_dropout)
self.input_dp_rel_e4 = nn.Dropout(input_dropout)
self.input_dp_rel_e5 = nn.Dropout(input_dropout)
self.input_dp_rel_e6 = nn.Dropout(input_dropout)
self.input_dp_rel_e7 = nn.Dropout(input_dropout)
# Dropouts for octonion embeddings obtained from octonion multiplication.
self.hidden_dp_e0 = nn.Dropout(hidden_dropout)
self.hidden_dp_e1 = nn.Dropout(hidden_dropout)
self.hidden_dp_e2 = nn.Dropout(hidden_dropout)
self.hidden_dp_e3 = nn.Dropout(hidden_dropout)
self.hidden_dp_e4 = nn.Dropout(hidden_dropout)
self.hidden_dp_e5 = nn.Dropout(hidden_dropout)
self.hidden_dp_e6 = nn.Dropout(hidden_dropout)
self.hidden_dp_e7 = nn.Dropout(hidden_dropout)
# Batch normalization for octonion embeddings of ALL entities.
self.bn_ent_e0 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e1 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e2 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e3 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e4 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e5 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e6 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e7 = nn.BatchNorm1d(self.embedding_dim)
# Batch normalization for octonion embeddings of relations.
self.bn_rel_e0 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e1 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e2 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e3 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e4 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e5 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e6 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e7 = nn.BatchNorm1d(self.embedding_dim)
def forward(self, h_idx, r_idx):
return self.forward_head_batch(h_idx.view(-1), r_idx.view(-1))
def forward_head_batch(self, h_idx, r_idx):
"""
Given a head entity and a relation (h,r), we compute scores for all possible triples,i.e.,
[score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|)
Given a batch of head entities and relations => shape (size of batch,| Entities|)
"""
# (1)
# (1.1) Octonion embeddings of head entities
emb_head_e0 = self.emb_ent_e0(h_idx)
emb_head_e1 = self.emb_ent_e1(h_idx)
emb_head_e2 = self.emb_ent_e2(h_idx)
emb_head_e3 = self.emb_ent_e3(h_idx)
emb_head_e4 = self.emb_ent_e4(h_idx)
emb_head_e5 = self.emb_ent_e5(h_idx)
emb_head_e6 = self.emb_ent_e6(h_idx)
emb_head_e7 = self.emb_ent_e7(h_idx)
# (1.2) Octonion embeddings of relations
emb_rel_e0 = self.emb_rel_e0(r_idx)
emb_rel_e1 = self.emb_rel_e1(r_idx)
emb_rel_e2 = self.emb_rel_e2(r_idx)
emb_rel_e3 = self.emb_rel_e3(r_idx)
emb_rel_e4 = self.emb_rel_e4(r_idx)
emb_rel_e5 = self.emb_rel_e5(r_idx)
emb_rel_e6 = self.emb_rel_e6(r_idx)
emb_rel_e7 = self.emb_rel_e7(r_idx)
if self.flag_octonion_mul_norm:
# (2) Octonion multiplication of (1.1) and unit normalized (1.2).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul_norm(
O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (3) Inner product of (2) with ALL entities.
e0_score = torch.mm(e0, self.emb_ent_e0.weight.transpose(1, 0))
e1_score = torch.mm(e1, self.emb_ent_e1.weight.transpose(1, 0))
e2_score = torch.mm(e2, self.emb_ent_e2.weight.transpose(1, 0))
e3_score = torch.mm(e3, self.emb_ent_e3.weight.transpose(1, 0))
e4_score = torch.mm(e4, self.emb_ent_e4.weight.transpose(1, 0))
e5_score = torch.mm(e5, self.emb_ent_e5.weight.transpose(1, 0))
e6_score = torch.mm(e6, self.emb_ent_e6.weight.transpose(1, 0))
e7_score = torch.mm(e7, self.emb_ent_e7.weight.transpose(1, 0))
else:
# (2)
# (2.1) Apply BN + Dropout on (1.2) relations.
# (2.2.) Apply octonion multiplication of (1.1) and (2.1).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul(
O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(self.input_dp_rel_e0(self.bn_rel_e0(emb_rel_e0)),
self.input_dp_rel_e1(self.bn_rel_e1(emb_rel_e1)),
self.input_dp_rel_e2(self.bn_rel_e2(emb_rel_e2)),
self.input_dp_rel_e3(self.bn_rel_e3(emb_rel_e3)),
self.input_dp_rel_e4(self.bn_rel_e4(emb_rel_e4)),
self.input_dp_rel_e5(self.bn_rel_e5(emb_rel_e5)),
self.input_dp_rel_e6(self.bn_rel_e6(emb_rel_e6)),
self.input_dp_rel_e7(self.bn_rel_e7(emb_rel_e7))))
# (3)
# (3.1) Dropout on (2)-result of octonion multiplication.
# (3.2) Apply BN + DP on ALL entities.
# (3.3) Inner product
e0_score = torch.mm(self.hidden_dp_e0(e0),
self.input_dp_ent_e0(self.bn_ent_e0(self.emb_ent_e0.weight)).transpose(1, 0))
e1_score = torch.mm(self.hidden_dp_e1(e1),
self.input_dp_ent_e1(self.bn_ent_e1(self.emb_ent_e1.weight)).transpose(1, 0))
e2_score = torch.mm(self.hidden_dp_e2(e2),
self.input_dp_ent_e2(self.bn_ent_e2(self.emb_ent_e2.weight)).transpose(1, 0))
e3_score = torch.mm(self.hidden_dp_e3(e3),
self.input_dp_ent_e3(self.bn_ent_e3(self.emb_ent_e3.weight)).transpose(1, 0))
e4_score = torch.mm(self.hidden_dp_e4(e4),
self.input_dp_ent_e4(self.bn_ent_e4(self.emb_ent_e4.weight)).transpose(1, 0))
e5_score = torch.mm(self.hidden_dp_e5(e5),
self.input_dp_ent_e5(self.bn_ent_e5(self.emb_ent_e5.weight)).transpose(1, 0))
e6_score = torch.mm(self.hidden_dp_e6(e6),
self.input_dp_ent_e6(self.bn_ent_e6(self.emb_ent_e6.weight)).transpose(1, 0))
e7_score = torch.mm(self.hidden_dp_e7(e7),
self.input_dp_ent_e7(self.bn_ent_e7(self.emb_ent_e7.weight)).transpose(1, 0))
score = e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
return torch.sigmoid(score)
def forward_tail_batch(self, r_idx, e2_idx):
"""
KvsN reverse.
Given a relation and a tail entity (r,t).
1) Quaternion mul with all entities with r. (E,r)
2) Inner product of (1) with t.
scores=[[0.0,0.1,...,0.8],
...,
[0.8,0.3,...,0.9]]
by quaternion mul of all entities with r a
scores.shape => ( batchsize, number of entities.)
"""
# (1)
# (1.1) Octonion embeddings of relations
emb_rel_e0 = self.emb_rel_e0(r_idx)
emb_rel_e1 = self.emb_rel_e1(r_idx)
emb_rel_e2 = self.emb_rel_e2(r_idx)
emb_rel_e3 = self.emb_rel_e3(r_idx)
emb_rel_e4 = self.emb_rel_e4(r_idx)
emb_rel_e5 = self.emb_rel_e5(r_idx)
emb_rel_e6 = self.emb_rel_e6(r_idx)
emb_rel_e7 = self.emb_rel_e7(r_idx)
# (1.2) Reshape octonion embeddings of tail entities.
emb_tail_e0 = self.emb_ent_e0(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e1 = self.emb_ent_e1(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e2 = self.emb_ent_e2(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e3 = self.emb_ent_e3(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e4 = self.emb_ent_e4(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e5 = self.emb_ent_e5(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e6 = self.emb_ent_e6(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e7 = self.emb_ent_e7(e2_idx).view(-1, self.embedding_dim, 1)
if self.flag_octonion_mul_norm:
# (2) Reshape (1.1)-relations.
emb_rel_e0 = self.emb_rel_e0(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e1 = self.emb_rel_e1(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e2 = self.emb_rel_e2(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e3 = self.emb_rel_e3(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e4 = self.emb_rel_e4(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e5 = self.emb_rel_e5(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e6 = self.emb_rel_e6(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e7 = self.emb_rel_e7(r_idx).view(-1, 1, self.embedding_dim)
# (3) Octonion multiplication of ALL entities and unit normalized (1.1).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul_norm(
O_1=(self.emb_ent_e0.weight, self.emb_ent_e1.weight,
self.emb_ent_e2.weight, self.emb_ent_e3.weight,
self.emb_ent_e4.weight, self.emb_ent_e5.weight,
self.emb_ent_e6.weight, self.emb_ent_e7.weight),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (4) Inner product of (3) with (1.2).
e0_score = torch.matmul(e0, emb_tail_e0)
e1_score = torch.matmul(e1, emb_tail_e1)
e2_score = torch.matmul(e2, emb_tail_e2)
e3_score = torch.matmul(e3, emb_tail_e3)
e4_score = torch.matmul(e4, emb_tail_e4)
e5_score = torch.matmul(e5, emb_tail_e5)
e6_score = torch.matmul(e6, emb_tail_e6)
e7_score = torch.matmul(e7, emb_tail_e7)
else:
# (2) BN + Dropout-Reshape (1.1)-relations
emb_rel_e0 = self.input_dp_rel_e0(self.bn_rel_e0(emb_rel_e0)).view(-1, 1, self.embedding_dim)
emb_rel_e1 = self.input_dp_rel_e1(self.bn_rel_e1(emb_rel_e1)).view(-1, 1, self.embedding_dim)
emb_rel_e2 = self.input_dp_rel_e2(self.bn_rel_e2(emb_rel_e2)).view(-1, 1, self.embedding_dim)
emb_rel_e3 = self.input_dp_rel_e3(self.bn_rel_e3(emb_rel_e3)).view(-1, 1, self.embedding_dim)
emb_rel_e4 = self.input_dp_rel_e4(self.bn_rel_e4(emb_rel_e4)).view(-1, 1, self.embedding_dim)
emb_rel_e5 = self.input_dp_rel_e5(self.bn_rel_e5(emb_rel_e5)).view(-1, 1, self.embedding_dim)
emb_rel_e6 = self.input_dp_rel_e6(self.bn_rel_e6(emb_rel_e6)).view(-1, 1, self.embedding_dim)
emb_rel_e7 = self.input_dp_rel_e7(self.bn_rel_e7(emb_rel_e7)).view(-1, 1, self.embedding_dim)
# (3)
# (3.1) BN + Dropout on ALL entities.
# (3.2) Quaternion multiplication of (3.1) and (2).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul(
O_1=(self.input_dp_ent_e0(self.bn_ent_e0(self.emb_ent_e0.weight)),
self.input_dp_ent_e1(self.bn_ent_e1(self.emb_ent_e1.weight)),
self.input_dp_ent_e2(self.bn_ent_e2(self.emb_ent_e2.weight)),
self.input_dp_ent_e3(self.bn_ent_e3(self.emb_ent_e3.weight)),
self.input_dp_ent_e4(self.bn_ent_e4(self.emb_ent_e4.weight)),
self.input_dp_ent_e5(self.bn_ent_e5(self.emb_ent_e5.weight)),
self.input_dp_ent_e6(self.bn_ent_e6(self.emb_ent_e6.weight)),
self.input_dp_ent_e7(self.bn_ent_e7(self.emb_ent_e7.weight))),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (4)
# (4.1) Dropout on (3).
# (4.2) Inner product on (4.1) with (1.2).
e0_score = torch.matmul(self.hidden_dp_e0(e0), emb_tail_e0)
e1_score = torch.matmul(self.hidden_dp_e1(e1), emb_tail_e1)
e2_score = torch.matmul(self.hidden_dp_e2(e2), emb_tail_e2)
e3_score = torch.matmul(self.hidden_dp_e3(e3), emb_tail_e3)
e4_score = torch.matmul(self.hidden_dp_e4(e4), emb_tail_e4)
e5_score = torch.matmul(self.hidden_dp_e5(e5), emb_tail_e5)
e6_score = torch.matmul(self.hidden_dp_e6(e6), emb_tail_e6)
e7_score = torch.matmul(self.hidden_dp_e7(e7), emb_tail_e7)
score = e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
score = score.squeeze()
return torch.sigmoid(score)
def forward_head_and_loss(self, h_idx, r_idx, targets):
return self.loss(self.forward_head_batch(h_idx=h_idx, r_idx=r_idx), targets)
def forward_tail_and_loss(self, r_idx, e2_idx, targets):
return self.loss(self.forward_tail_batch(r_idx=r_idx, e2_idx=e2_idx), targets)
def init(self):
nn.init.xavier_normal_(self.emb_ent_e0.weight.data)
nn.init.xavier_normal_(self.emb_ent_e1.weight.data)
nn.init.xavier_normal_(self.emb_ent_e2.weight.data)
nn.init.xavier_normal_(self.emb_ent_e3.weight.data)
nn.init.xavier_normal_(self.emb_ent_e4.weight.data)
nn.init.xavier_normal_(self.emb_ent_e5.weight.data)
nn.init.xavier_normal_(self.emb_ent_e6.weight.data)
nn.init.xavier_normal_(self.emb_ent_e7.weight.data)
nn.init.xavier_normal_(self.emb_rel_e0.weight.data)
nn.init.xavier_normal_(self.emb_rel_e1.weight.data)
nn.init.xavier_normal_(self.emb_rel_e2.weight.data)
nn.init.xavier_normal_(self.emb_rel_e3.weight.data)
nn.init.xavier_normal_(self.emb_rel_e4.weight.data)
nn.init.xavier_normal_(self.emb_rel_e5.weight.data)
nn.init.xavier_normal_(self.emb_rel_e6.weight.data)
nn.init.xavier_normal_(self.emb_rel_e7.weight.data)
def get_embeddings(self):
entity_emb = torch.cat((
self.emb_ent_e0.weight.data, self.emb_ent_e1.weight.data,
self.emb_ent_e2.weight.data, self.emb_ent_e3.weight.data,
self.emb_ent_e4.weight.data, self.emb_ent_e5.weight.data,
self.emb_ent_e6.weight.data, self.emb_ent_e7.weight.data), 1)
rel_emb = torch.cat((
self.emb_rel_e0.weight.data, self.emb_rel_e1.weight.data,
self.emb_rel_e2.weight.data, self.emb_rel_e3.weight.data,
self.emb_rel_e4.weight.data, self.emb_rel_e5.weight.data,
self.emb_rel_e6.weight.data, self.emb_rel_e7.weight.data), 1)
return entity_emb, rel_emb
class ConvO(nn.Module):
def __init__(self,
num_entities, num_relations,
embedding_dim,
kernel_size=3, num_of_output_channels=16, feature_map_dropout=0.3,
norm_flag=False, input_dropout=0.2, hidden_dropout=0.3):
super(ConvO, self).__init__()
self.name = 'ConvO'
self.embedding_dim = embedding_dim
self.num_entities = num_entities
self.num_relations = num_relations
self.loss = nn.BCELoss()
self.flag_octonion_mul_norm = norm_flag
# Octonion embeddings of entities
self.emb_ent_e0 = nn.Embedding(self.num_entities, self.embedding_dim) # real
self.emb_ent_e1 = nn.Embedding(self.num_entities, self.embedding_dim) # e1
self.emb_ent_e2 = nn.Embedding(self.num_entities, self.embedding_dim) # e2
self.emb_ent_e3 = nn.Embedding(self.num_entities, self.embedding_dim) # e3
self.emb_ent_e4 = nn.Embedding(self.num_entities, self.embedding_dim) # e3
self.emb_ent_e5 = nn.Embedding(self.num_entities, self.embedding_dim) # e4
self.emb_ent_e6 = nn.Embedding(self.num_entities, self.embedding_dim) # e6
self.emb_ent_e7 = nn.Embedding(self.num_entities, self.embedding_dim) # e7
# Octonion embeddings of relations
self.emb_rel_e0 = nn.Embedding(self.num_relations, self.embedding_dim) # real
self.emb_rel_e1 = nn.Embedding(self.num_relations, self.embedding_dim) # e1
self.emb_rel_e2 = nn.Embedding(self.num_relations, self.embedding_dim) # e2
self.emb_rel_e3 = nn.Embedding(self.num_relations, self.embedding_dim) # e3
self.emb_rel_e4 = nn.Embedding(self.num_relations, self.embedding_dim) # e4
self.emb_rel_e5 = nn.Embedding(self.num_relations, self.embedding_dim) # e5
self.emb_rel_e6 = nn.Embedding(self.num_relations, self.embedding_dim) # e6
self.emb_rel_e7 = nn.Embedding(self.num_relations, self.embedding_dim) # e7
# Dropouts for octonion embeddings of ALL entities.
self.input_dp_ent_e0 = nn.Dropout(input_dropout)
self.input_dp_ent_e1 = nn.Dropout(input_dropout)
self.input_dp_ent_e2 = nn.Dropout(input_dropout)
self.input_dp_ent_e3 = nn.Dropout(input_dropout)
self.input_dp_ent_e4 = nn.Dropout(input_dropout)
self.input_dp_ent_e5 = nn.Dropout(input_dropout)
self.input_dp_ent_e6 = nn.Dropout(input_dropout)
self.input_dp_ent_e7 = nn.Dropout(input_dropout)
# Dropouts for octonion embeddings of relations.
self.input_dp_rel_e0 = nn.Dropout(input_dropout)
self.input_dp_rel_e1 = nn.Dropout(input_dropout)
self.input_dp_rel_e2 = nn.Dropout(input_dropout)
self.input_dp_rel_e3 = nn.Dropout(input_dropout)
self.input_dp_rel_e4 = nn.Dropout(input_dropout)
self.input_dp_rel_e5 = nn.Dropout(input_dropout)
self.input_dp_rel_e6 = nn.Dropout(input_dropout)
self.input_dp_rel_e7 = nn.Dropout(input_dropout)
# Dropouts for octonion embeddings obtained from octonion multiplication.
self.hidden_dp_e0 = nn.Dropout(hidden_dropout)
self.hidden_dp_e1 = nn.Dropout(hidden_dropout)
self.hidden_dp_e2 = nn.Dropout(hidden_dropout)
self.hidden_dp_e3 = nn.Dropout(hidden_dropout)
self.hidden_dp_e4 = nn.Dropout(hidden_dropout)
self.hidden_dp_e5 = nn.Dropout(hidden_dropout)
self.hidden_dp_e6 = nn.Dropout(hidden_dropout)
self.hidden_dp_e7 = nn.Dropout(hidden_dropout)
# Batch normalization for octonion embeddings of ALL entities.
self.bn_ent_e0 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e1 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e2 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e3 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e4 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e5 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e6 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e7 = nn.BatchNorm1d(self.embedding_dim)
# Batch normalization for octonion embeddings of relations.
self.bn_rel_e0 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e1 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e2 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e3 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e4 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e5 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e6 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e7 = nn.BatchNorm1d(self.embedding_dim)
# Convolution
self.kernel_size = kernel_size
self.num_of_output_channels = num_of_output_channels
self.feature_map_dropout = nn.Dropout2d(feature_map_dropout)
self.conv1 = nn.Conv1d(in_channels=1, out_channels=self.num_of_output_channels,
kernel_size=(self.kernel_size, self.kernel_size), stride=1, padding=1, bias=True)
self.fc_num_input = self.embedding_dim * 16 * self.num_of_output_channels # 8 because of 8 real values in 2 quaternions
self.fc1 = nn.Linear(self.fc_num_input, self.embedding_dim * 8) # Hard compression.
self.bn_conv1 = nn.BatchNorm2d(self.num_of_output_channels)
self.bn_conv2 = nn.BatchNorm1d(self.embedding_dim * 8)
def forward(self, h_idx, r_idx):
return self.forward_head_batch(h_idx.view(-1), r_idx.view(-1))
def residual_convolution(self, O_1, O_2):
emb_ent_e0, emb_ent_e1, emb_ent_e2, emb_ent_e3, emb_ent_e4, emb_ent_e5, emb_ent_e6, emb_ent_e7 = O_1
emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = O_2
x = torch.cat([emb_ent_e0.view(-1, 1, 1, self.embedding_dim),
emb_ent_e1.view(-1, 1, 1, self.embedding_dim),
emb_ent_e2.view(-1, 1, 1, self.embedding_dim),
emb_ent_e3.view(-1, 1, 1, self.embedding_dim),
emb_ent_e4.view(-1, 1, 1, self.embedding_dim),
emb_ent_e5.view(-1, 1, 1, self.embedding_dim),
emb_ent_e6.view(-1, 1, 1, self.embedding_dim),
emb_ent_e7.view(-1, 1, 1, self.embedding_dim), # entities
emb_rel_e0.view(-1, 1, 1, self.embedding_dim),
emb_rel_e1.view(-1, 1, 1, self.embedding_dim),
emb_rel_e2.view(-1, 1, 1, self.embedding_dim),
emb_rel_e3.view(-1, 1, 1, self.embedding_dim),
emb_rel_e4.view(-1, 1, 1, self.embedding_dim),
emb_rel_e5.view(-1, 1, 1, self.embedding_dim),
emb_rel_e6.view(-1, 1, 1, self.embedding_dim),
emb_rel_e7.view(-1, 1, 1, self.embedding_dim), ], 2)
x = self.conv1(x)
x = self.bn_conv1(x)
x = F.relu(x)
x = self.feature_map_dropout(x)
x = x.view(x.shape[0], -1) # reshape for NN.
x = self.fc1(x)
x = self.bn_conv2(x)
x = F.relu(x)
return torch.chunk(x, 8, dim=1)
def forward_head_batch(self, h_idx, r_idx):
# (1)
# (1.1) Octonion embeddings of head entities
emb_head_e0 = self.emb_ent_e0(h_idx)
emb_head_e1 = self.emb_ent_e1(h_idx)
emb_head_e2 = self.emb_ent_e2(h_idx)
emb_head_e3 = self.emb_ent_e3(h_idx)
emb_head_e4 = self.emb_ent_e4(h_idx)
emb_head_e5 = self.emb_ent_e5(h_idx)
emb_head_e6 = self.emb_ent_e6(h_idx)
emb_head_e7 = self.emb_ent_e7(h_idx)
# (1.2) Octonion embeddings of relations
emb_rel_e0 = self.emb_rel_e0(r_idx)
emb_rel_e1 = self.emb_rel_e1(r_idx)
emb_rel_e2 = self.emb_rel_e2(r_idx)
emb_rel_e3 = self.emb_rel_e3(r_idx)
emb_rel_e4 = self.emb_rel_e4(r_idx)
emb_rel_e5 = self.emb_rel_e5(r_idx)
emb_rel_e6 = self.emb_rel_e6(r_idx)
emb_rel_e7 = self.emb_rel_e7(r_idx)
# (2) Apply convolution operation on (1.1) and (1.2).
O_3 = self.residual_convolution(O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
conv_e0, conv_e1, conv_e2, conv_e3, conv_e4, conv_e5, conv_e6, conv_e7 = O_3
if self.flag_octonion_mul_norm:
# (3) Octonion multiplication of (1.1) and unit normalized (1.2).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul_norm(
O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (4)
# (4.1) Hadamard product of (2) with (3).
# (4.2) Inner product of (4.1) with ALL entities.
e0_score = torch.mm(conv_e0 * e0, self.emb_ent_e0.weight.transpose(1, 0))
e1_score = torch.mm(conv_e1 * e1, self.emb_ent_e1.weight.transpose(1, 0))
e2_score = torch.mm(conv_e2 * e2, self.emb_ent_e2.weight.transpose(1, 0))
e3_score = torch.mm(conv_e3 * e3, self.emb_ent_e3.weight.transpose(1, 0))
e4_score = torch.mm(conv_e4 * e4, self.emb_ent_e4.weight.transpose(1, 0))
e5_score = torch.mm(conv_e5 * e5, self.emb_ent_e5.weight.transpose(1, 0))
e6_score = torch.mm(conv_e6 * e6, self.emb_ent_e6.weight.transpose(1, 0))
e7_score = torch.mm(conv_e7 * e7, self.emb_ent_e7.weight.transpose(1, 0))
else:
# (3)
# (3.1) Apply BN + Dropout on (1.2)-relations.
# (3.2) Apply quaternion multiplication on (1.1) and (3.1).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul(
O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(self.input_dp_rel_e0(self.bn_rel_e0(emb_rel_e0)),
self.input_dp_rel_e1(self.bn_rel_e1(emb_rel_e1)),
self.input_dp_rel_e2(self.bn_rel_e2(emb_rel_e2)),
self.input_dp_rel_e3(self.bn_rel_e3(emb_rel_e3)),
self.input_dp_rel_e4(self.bn_rel_e4(emb_rel_e4)),
self.input_dp_rel_e5(self.bn_rel_e5(emb_rel_e5)),
self.input_dp_rel_e6(self.bn_rel_e6(emb_rel_e6)),
self.input_dp_rel_e7(self.bn_rel_e7(emb_rel_e7))))
# (4)
# (4.1) Hadamard product of (2) with (3).
# (4.2) Dropout on (4.1).
# (4.3) Apply BN + DP on ALL entities.
# (4.4) Inner product
e0_score = torch.mm(self.hidden_dp_e0(conv_e0 * e0),
self.input_dp_ent_e0(self.bn_ent_e0(self.emb_ent_e0.weight)).transpose(1, 0))
e1_score = torch.mm(self.hidden_dp_e1(conv_e1 * e1),
self.input_dp_ent_e1(self.bn_ent_e1(self.emb_ent_e1.weight)).transpose(1, 0))
e2_score = torch.mm(self.hidden_dp_e2(conv_e2 * e2),
self.input_dp_ent_e2(self.bn_ent_e2(self.emb_ent_e2.weight)).transpose(1, 0))
e3_score = torch.mm(self.hidden_dp_e3(conv_e3 * e3),
self.input_dp_ent_e3(self.bn_ent_e3(self.emb_ent_e3.weight)).transpose(1, 0))
e4_score = torch.mm(self.hidden_dp_e4(conv_e4 * e4),
self.input_dp_ent_e4(self.bn_ent_e4(self.emb_ent_e4.weight)).transpose(1, 0))
e5_score = torch.mm(self.hidden_dp_e5(conv_e5 * e5),
self.input_dp_ent_e5(self.bn_ent_e5(self.emb_ent_e5.weight)).transpose(1, 0))
e6_score = torch.mm(self.hidden_dp_e6(conv_e6 * e6),
self.input_dp_ent_e6(self.bn_ent_e6(self.emb_ent_e6.weight)).transpose(1, 0))
e7_score = torch.mm(self.hidden_dp_e7(conv_e7 * e7),
self.input_dp_ent_e7(self.bn_ent_e7(self.emb_ent_e7.weight)).transpose(1, 0))
score = e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
return torch.sigmoid(score)
def forward_tail_batch(self, r_idx, e2_idx):
# (1)
# (1.1) Octonion embeddings of relations
emb_rel_e0 = self.emb_rel_e0(r_idx)
emb_rel_e1 = self.emb_rel_e1(r_idx)
emb_rel_e2 = self.emb_rel_e2(r_idx)
emb_rel_e3 = self.emb_rel_e3(r_idx)
emb_rel_e4 = self.emb_rel_e4(r_idx)
emb_rel_e5 = self.emb_rel_e5(r_idx)
emb_rel_e6 = self.emb_rel_e6(r_idx)
emb_rel_e7 = self.emb_rel_e7(r_idx)
# (1.2) Octonion embeddings of head entities
emb_tail_e0 = self.emb_ent_e0(e2_idx)
emb_tail_e1 = self.emb_ent_e1(e2_idx)
emb_tail_e2 = self.emb_ent_e2(e2_idx)
emb_tail_e3 = self.emb_ent_e3(e2_idx)
emb_tail_e4 = self.emb_ent_e4(e2_idx)
emb_tail_e5 = self.emb_ent_e5(e2_idx)
emb_tail_e6 = self.emb_ent_e6(e2_idx)
emb_tail_e7 = self.emb_ent_e7(e2_idx)
# (2) Apply convolution operation on (1.1) and (1.2). conv(r,t) instead of conv(t,r) or conv(h,r).
O_3 = self.residual_convolution(O_1=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7),
O_2=(emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3,
emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7))
conv_e0, conv_e1, conv_e2, conv_e3, conv_e4, conv_e5, conv_e6, conv_e7 = O_3
# (3)
# (3.1) Reshape (1.2) tail entities.
emb_tail_e0 = emb_tail_e0.view(-1, self.embedding_dim, 1)
emb_tail_e1 = emb_tail_e1.view(-1, self.embedding_dim, 1)
emb_tail_e2 = emb_tail_e2.view(-1, self.embedding_dim, 1)
emb_tail_e3 = emb_tail_e3.view(-1, self.embedding_dim, 1)
emb_tail_e4 = emb_tail_e4.view(-1, self.embedding_dim, 1)
emb_tail_e5 = emb_tail_e5.view(-1, self.embedding_dim, 1)
emb_tail_e6 = emb_tail_e6.view(-1, self.embedding_dim, 1)
emb_tail_e7 = emb_tail_e7.view(-1, self.embedding_dim, 1)
# (3.2) Reshape (2) output of convolution.
conv_e0 = conv_e0.view(-1, 1, self.embedding_dim)
conv_e1 = conv_e1.view(-1, 1, self.embedding_dim)
conv_e2 = conv_e2.view(-1, 1, self.embedding_dim)
conv_e3 = conv_e3.view(-1, 1, self.embedding_dim)
conv_e4 = conv_e4.view(-1, 1, self.embedding_dim)
conv_e5 = conv_e5.view(-1, 1, self.embedding_dim)
conv_e6 = conv_e6.view(-1, 1, self.embedding_dim)
conv_e7 = conv_e7.view(-1, 1, self.embedding_dim)
if self.flag_octonion_mul_norm:
# (4) Reshape (1.1)-relations.
emb_rel_e0 = self.emb_rel_e0(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e1 = self.emb_rel_e1(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e2 = self.emb_rel_e2(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e3 = self.emb_rel_e3(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e4 = self.emb_rel_e4(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e5 = self.emb_rel_e5(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e6 = self.emb_rel_e6(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e7 = self.emb_rel_e7(r_idx).view(-1, 1, self.embedding_dim)
# (5) Octonion multiplication of ALL entities and unit normalized (4.1).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul_norm(
O_1=(self.emb_ent_e0.weight, self.emb_ent_e1.weight,
self.emb_ent_e2.weight, self.emb_ent_e3.weight,
self.emb_ent_e4.weight, self.emb_ent_e5.weight,
self.emb_ent_e6.weight, self.emb_ent_e7.weight),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (4) Inner product of (3) with (1.2).
e0_score = torch.matmul(conv_e0 * e0, emb_tail_e0)
e1_score = torch.matmul(conv_e1 * e1, emb_tail_e1)
e2_score = torch.matmul(conv_e2 * e2, emb_tail_e2)
e3_score = torch.matmul(conv_e3 * e3, emb_tail_e3)
e4_score = torch.matmul(conv_e4 * e4, emb_tail_e4)
e5_score = torch.matmul(conv_e5 * e5, emb_tail_e5)
e6_score = torch.matmul(conv_e6 * e6, emb_tail_e6)
e7_score = torch.matmul(conv_e7 * e7, emb_tail_e7)
else:
# (4) BN + Dropout-Reshape (1.1)-relations
emb_rel_e0 = self.input_dp_rel_e0(self.bn_rel_e0(emb_rel_e0)).view(-1, 1, self.embedding_dim)
emb_rel_e1 = self.input_dp_rel_e1(self.bn_rel_e1(emb_rel_e1)).view(-1, 1, self.embedding_dim)
emb_rel_e2 = self.input_dp_rel_e2(self.bn_rel_e2(emb_rel_e2)).view(-1, 1, self.embedding_dim)
emb_rel_e3 = self.input_dp_rel_e3(self.bn_rel_e3(emb_rel_e3)).view(-1, 1, self.embedding_dim)
emb_rel_e4 = self.input_dp_rel_e4(self.bn_rel_e4(emb_rel_e4)).view(-1, 1, self.embedding_dim)
emb_rel_e5 = self.input_dp_rel_e5(self.bn_rel_e5(emb_rel_e5)).view(-1, 1, self.embedding_dim)
emb_rel_e6 = self.input_dp_rel_e6(self.bn_rel_e6(emb_rel_e6)).view(-1, 1, self.embedding_dim)
emb_rel_e7 = self.input_dp_rel_e7(self.bn_rel_e7(emb_rel_e7)).view(-1, 1, self.embedding_dim)
# (5)
# (5.1) BN + Dropout on ALL entities.
# (5.2) Quaternion multiplication of (5.1) and (4).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul(
O_1=(self.input_dp_ent_e0(self.bn_ent_e0(self.emb_ent_e0.weight)),
self.input_dp_ent_e1(self.bn_ent_e1(self.emb_ent_e1.weight)),
self.input_dp_ent_e2(self.bn_ent_e2(self.emb_ent_e2.weight)),
self.input_dp_ent_e3(self.bn_ent_e3(self.emb_ent_e3.weight)),
self.input_dp_ent_e4(self.bn_ent_e4(self.emb_ent_e4.weight)),
self.input_dp_ent_e5(self.bn_ent_e5(self.emb_ent_e5.weight)),
self.input_dp_ent_e6(self.bn_ent_e6(self.emb_ent_e6.weight)),
self.input_dp_ent_e7(self.bn_ent_e7(self.emb_ent_e7.weight))),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (6)
# (6.1) Hadamard product of (3.2) and (5).
# (6.2) Dropout on (6.1).
# (6.2) Inner product on (5.1) with (3.1).
e0_score = torch.matmul(self.hidden_dp_e0(conv_e0 * e0), emb_tail_e0)
e1_score = torch.matmul(self.hidden_dp_e1(conv_e1 * e1), emb_tail_e1)
e2_score = torch.matmul(self.hidden_dp_e2(conv_e2 * e2), emb_tail_e2)
e3_score = torch.matmul(self.hidden_dp_e3(conv_e3 * e3), emb_tail_e3)
e4_score = torch.matmul(self.hidden_dp_e4(conv_e4 * e4), emb_tail_e4)
e5_score = torch.matmul(self.hidden_dp_e5(conv_e5 * e5), emb_tail_e5)
e6_score = torch.matmul(self.hidden_dp_e6(conv_e6 * e6), emb_tail_e6)
e7_score = torch.matmul(self.hidden_dp_e7(conv_e7 * e7), emb_tail_e7)
score = e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
score = score.squeeze()
return torch.sigmoid(score)
def forward_head_and_loss(self, h_idx, r_idx, targets):
return self.loss(self.forward_head_batch(h_idx=h_idx, r_idx=r_idx), targets)
def forward_tail_and_loss(self, r_idx, e2_idx, targets):
return self.loss(self.forward_tail_batch(r_idx=r_idx, e2_idx=e2_idx), targets)
def init(self):
nn.init.xavier_normal_(self.emb_ent_e0.weight.data)
nn.init.xavier_normal_(self.emb_ent_e1.weight.data)
nn.init.xavier_normal_(self.emb_ent_e2.weight.data)
nn.init.xavier_normal_(self.emb_ent_e3.weight.data)
nn.init.xavier_normal_(self.emb_ent_e4.weight.data)
nn.init.xavier_normal_(self.emb_ent_e5.weight.data)
nn.init.xavier_normal_(self.emb_ent_e6.weight.data)
nn.init.xavier_normal_(self.emb_ent_e7.weight.data)
nn.init.xavier_normal_(self.emb_rel_e0.weight.data)
nn.init.xavier_normal_(self.emb_rel_e1.weight.data)
nn.init.xavier_normal_(self.emb_rel_e2.weight.data)
nn.init.xavier_normal_(self.emb_rel_e3.weight.data)
nn.init.xavier_normal_(self.emb_rel_e4.weight.data)
nn.init.xavier_normal_(self.emb_rel_e5.weight.data)
nn.init.xavier_normal_(self.emb_rel_e6.weight.data)
nn.init.xavier_normal_(self.emb_rel_e7.weight.data)
def get_embeddings(self):
entity_emb = torch.cat((
self.emb_ent_e0.weight.data, self.emb_ent_e1.weight.data,
self.emb_ent_e2.weight.data, self.emb_ent_e3.weight.data,
self.emb_ent_e4.weight.data, self.emb_ent_e5.weight.data,
self.emb_ent_e6.weight.data, self.emb_ent_e7.weight.data), 1)
rel_emb = torch.cat((
self.emb_rel_e0.weight.data, self.emb_rel_e1.weight.data,
self.emb_rel_e2.weight.data, self.emb_rel_e3.weight.data,
self.emb_rel_e4.weight.data, self.emb_rel_e5.weight.data,
self.emb_rel_e6.weight.data, self.emb_rel_e7.weight.data), 1)
return entity_emb, rel_emb | toolbox/nn/OctonionE.py | import torch
import torch.nn as nn
from torch.nn import functional as F
def octonion_mul(O_1, O_2):
x0, x1, x2, x3, x4, x5, x6, x7 = O_1
y0, y1, y2, y3, y4, y5, y6, y7 = O_2
x = x0 * y0 - x1 * y1 - x2 * y2 - x3 * y3 - x4 * y4 - x5 * y5 - x6 * y6 - x7 * y7
e1 = x0 * y1 + x1 * y0 + x2 * y3 - x3 * y2 + x4 * y5 - x5 * y4 - x6 * y7 + x7 * y6
e2 = x0 * y2 - x1 * y3 + x2 * y0 + x3 * y1 + x4 * y6 + x5 * y7 - x6 * y4 - x7 * y5
e3 = x0 * y3 + x1 * y2 - x2 * y1 + x3 * y0 + x4 * y7 - x5 * y6 + x6 * y5 - x7 * y4
e4 = x0 * y4 - x1 * y5 - x2 * y6 - x3 * y7 + x4 * y0 + x5 * y1 + x6 * y2 + x7 * y3
e5 = x0 * y5 + x1 * y4 - x2 * y7 + x3 * y6 - x4 * y1 + x5 * y0 - x6 * y3 + x7 * y2
e6 = x0 * y6 + x1 * y7 + x2 * y4 - x3 * y5 - x4 * y2 + x5 * y3 + x6 * y0 - x7 * y1
e7 = x0 * y7 - x1 * y6 + x2 * y5 + x3 * y4 - x4 * y3 - x5 * y2 + x6 * y1 + x7 * y0
return x, e1, e2, e3, e4, e5, e6, e7
def octonion_mul_norm(O_1, O_2):
x0, x1, x2, x3, x4, x5, x6, x7 = O_1
y0, y1, y2, y3, y4, y5, y6, y7 = O_2
# Normalize the relation to eliminate the scaling effect, may cause Nan due to floating point.
denominator = torch.sqrt(y0 ** 2 + y1 ** 2 + y2 ** 2 + y3 ** 2 + y4 ** 2 + y5 ** 2 + y6 ** 2 + y7 ** 2)
y0 = y0 / denominator
y1 = y1 / denominator
y2 = y2 / denominator
y3 = y3 / denominator
y4 = y4 / denominator
y5 = y5 / denominator
y6 = y6 / denominator
y7 = y7 / denominator
x = x0 * y0 - x1 * y1 - x2 * y2 - x3 * y3 - x4 * y4 - x5 * y5 - x6 * y6 - x7 * y7
e1 = x0 * y1 + x1 * y0 + x2 * y3 - x3 * y2 + x4 * y5 - x5 * y4 - x6 * y7 + x7 * y6
e2 = x0 * y2 - x1 * y3 + x2 * y0 + x3 * y1 + x4 * y6 + x5 * y7 - x6 * y4 - x7 * y5
e3 = x0 * y3 + x1 * y2 - x2 * y1 + x3 * y0 + x4 * y7 - x5 * y6 + x6 * y5 - x7 * y4
e4 = x0 * y4 - x1 * y5 - x2 * y6 - x3 * y7 + x4 * y0 + x5 * y1 + x6 * y2 + x7 * y3
e5 = x0 * y5 + x1 * y4 - x2 * y7 + x3 * y6 - x4 * y1 + x5 * y0 - x6 * y3 + x7 * y2
e6 = x0 * y6 + x1 * y7 + x2 * y4 - x3 * y5 - x4 * y2 + x5 * y3 + x6 * y0 - x7 * y1
e7 = x0 * y7 - x1 * y6 + x2 * y5 + x3 * y4 - x4 * y3 - x5 * y2 + x6 * y1 + x7 * y0
return x, e1, e2, e3, e4, e5, e6, e7
class OMult(torch.nn.Module):
def __init__(self,
num_entities, num_relations,
embedding_dim,
norm_flag=False, input_dropout=0.2, hidden_dropout=0.3
):
super(OMult, self).__init__()
self.name = 'OMult'
self.embedding_dim = embedding_dim
self.num_entities = num_entities
self.num_relations = num_relations
self.loss = nn.BCELoss()
self.flag_octonion_mul_norm = norm_flag
# Octonion embeddings of entities
self.emb_ent_e0 = nn.Embedding(self.num_entities, self.embedding_dim) # real
self.emb_ent_e1 = nn.Embedding(self.num_entities, self.embedding_dim) # e1
self.emb_ent_e2 = nn.Embedding(self.num_entities, self.embedding_dim) # e2
self.emb_ent_e3 = nn.Embedding(self.num_entities, self.embedding_dim) # e3
self.emb_ent_e4 = nn.Embedding(self.num_entities, self.embedding_dim) # e3
self.emb_ent_e5 = nn.Embedding(self.num_entities, self.embedding_dim) # e4
self.emb_ent_e6 = nn.Embedding(self.num_entities, self.embedding_dim) # e6
self.emb_ent_e7 = nn.Embedding(self.num_entities, self.embedding_dim) # e7
# Octonion embeddings of relations
self.emb_rel_e0 = nn.Embedding(self.num_relations, self.embedding_dim) # real
self.emb_rel_e1 = nn.Embedding(self.num_relations, self.embedding_dim) # e1
self.emb_rel_e2 = nn.Embedding(self.num_relations, self.embedding_dim) # e2
self.emb_rel_e3 = nn.Embedding(self.num_relations, self.embedding_dim) # e3
self.emb_rel_e4 = nn.Embedding(self.num_relations, self.embedding_dim) # e4
self.emb_rel_e5 = nn.Embedding(self.num_relations, self.embedding_dim) # e5
self.emb_rel_e6 = nn.Embedding(self.num_relations, self.embedding_dim) # e6
self.emb_rel_e7 = nn.Embedding(self.num_relations, self.embedding_dim) # e7
# Dropouts for octonion embeddings of ALL entities.
self.input_dp_ent_e0 = nn.Dropout(input_dropout)
self.input_dp_ent_e1 = nn.Dropout(input_dropout)
self.input_dp_ent_e2 = nn.Dropout(input_dropout)
self.input_dp_ent_e3 = nn.Dropout(input_dropout)
self.input_dp_ent_e4 = nn.Dropout(input_dropout)
self.input_dp_ent_e5 = nn.Dropout(input_dropout)
self.input_dp_ent_e6 = nn.Dropout(input_dropout)
self.input_dp_ent_e7 = nn.Dropout(input_dropout)
# Dropouts for octonion embeddings of relations.
self.input_dp_rel_e0 = nn.Dropout(input_dropout)
self.input_dp_rel_e1 = nn.Dropout(input_dropout)
self.input_dp_rel_e2 = nn.Dropout(input_dropout)
self.input_dp_rel_e3 = nn.Dropout(input_dropout)
self.input_dp_rel_e4 = nn.Dropout(input_dropout)
self.input_dp_rel_e5 = nn.Dropout(input_dropout)
self.input_dp_rel_e6 = nn.Dropout(input_dropout)
self.input_dp_rel_e7 = nn.Dropout(input_dropout)
# Dropouts for octonion embeddings obtained from octonion multiplication.
self.hidden_dp_e0 = nn.Dropout(hidden_dropout)
self.hidden_dp_e1 = nn.Dropout(hidden_dropout)
self.hidden_dp_e2 = nn.Dropout(hidden_dropout)
self.hidden_dp_e3 = nn.Dropout(hidden_dropout)
self.hidden_dp_e4 = nn.Dropout(hidden_dropout)
self.hidden_dp_e5 = nn.Dropout(hidden_dropout)
self.hidden_dp_e6 = nn.Dropout(hidden_dropout)
self.hidden_dp_e7 = nn.Dropout(hidden_dropout)
# Batch normalization for octonion embeddings of ALL entities.
self.bn_ent_e0 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e1 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e2 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e3 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e4 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e5 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e6 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e7 = nn.BatchNorm1d(self.embedding_dim)
# Batch normalization for octonion embeddings of relations.
self.bn_rel_e0 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e1 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e2 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e3 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e4 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e5 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e6 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e7 = nn.BatchNorm1d(self.embedding_dim)
def forward(self, h_idx, r_idx):
return self.forward_head_batch(h_idx.view(-1), r_idx.view(-1))
def forward_head_batch(self, h_idx, r_idx):
"""
Given a head entity and a relation (h,r), we compute scores for all possible triples,i.e.,
[score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|)
Given a batch of head entities and relations => shape (size of batch,| Entities|)
"""
# (1)
# (1.1) Octonion embeddings of head entities
emb_head_e0 = self.emb_ent_e0(h_idx)
emb_head_e1 = self.emb_ent_e1(h_idx)
emb_head_e2 = self.emb_ent_e2(h_idx)
emb_head_e3 = self.emb_ent_e3(h_idx)
emb_head_e4 = self.emb_ent_e4(h_idx)
emb_head_e5 = self.emb_ent_e5(h_idx)
emb_head_e6 = self.emb_ent_e6(h_idx)
emb_head_e7 = self.emb_ent_e7(h_idx)
# (1.2) Octonion embeddings of relations
emb_rel_e0 = self.emb_rel_e0(r_idx)
emb_rel_e1 = self.emb_rel_e1(r_idx)
emb_rel_e2 = self.emb_rel_e2(r_idx)
emb_rel_e3 = self.emb_rel_e3(r_idx)
emb_rel_e4 = self.emb_rel_e4(r_idx)
emb_rel_e5 = self.emb_rel_e5(r_idx)
emb_rel_e6 = self.emb_rel_e6(r_idx)
emb_rel_e7 = self.emb_rel_e7(r_idx)
if self.flag_octonion_mul_norm:
# (2) Octonion multiplication of (1.1) and unit normalized (1.2).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul_norm(
O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (3) Inner product of (2) with ALL entities.
e0_score = torch.mm(e0, self.emb_ent_e0.weight.transpose(1, 0))
e1_score = torch.mm(e1, self.emb_ent_e1.weight.transpose(1, 0))
e2_score = torch.mm(e2, self.emb_ent_e2.weight.transpose(1, 0))
e3_score = torch.mm(e3, self.emb_ent_e3.weight.transpose(1, 0))
e4_score = torch.mm(e4, self.emb_ent_e4.weight.transpose(1, 0))
e5_score = torch.mm(e5, self.emb_ent_e5.weight.transpose(1, 0))
e6_score = torch.mm(e6, self.emb_ent_e6.weight.transpose(1, 0))
e7_score = torch.mm(e7, self.emb_ent_e7.weight.transpose(1, 0))
else:
# (2)
# (2.1) Apply BN + Dropout on (1.2) relations.
# (2.2.) Apply octonion multiplication of (1.1) and (2.1).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul(
O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(self.input_dp_rel_e0(self.bn_rel_e0(emb_rel_e0)),
self.input_dp_rel_e1(self.bn_rel_e1(emb_rel_e1)),
self.input_dp_rel_e2(self.bn_rel_e2(emb_rel_e2)),
self.input_dp_rel_e3(self.bn_rel_e3(emb_rel_e3)),
self.input_dp_rel_e4(self.bn_rel_e4(emb_rel_e4)),
self.input_dp_rel_e5(self.bn_rel_e5(emb_rel_e5)),
self.input_dp_rel_e6(self.bn_rel_e6(emb_rel_e6)),
self.input_dp_rel_e7(self.bn_rel_e7(emb_rel_e7))))
# (3)
# (3.1) Dropout on (2)-result of octonion multiplication.
# (3.2) Apply BN + DP on ALL entities.
# (3.3) Inner product
e0_score = torch.mm(self.hidden_dp_e0(e0),
self.input_dp_ent_e0(self.bn_ent_e0(self.emb_ent_e0.weight)).transpose(1, 0))
e1_score = torch.mm(self.hidden_dp_e1(e1),
self.input_dp_ent_e1(self.bn_ent_e1(self.emb_ent_e1.weight)).transpose(1, 0))
e2_score = torch.mm(self.hidden_dp_e2(e2),
self.input_dp_ent_e2(self.bn_ent_e2(self.emb_ent_e2.weight)).transpose(1, 0))
e3_score = torch.mm(self.hidden_dp_e3(e3),
self.input_dp_ent_e3(self.bn_ent_e3(self.emb_ent_e3.weight)).transpose(1, 0))
e4_score = torch.mm(self.hidden_dp_e4(e4),
self.input_dp_ent_e4(self.bn_ent_e4(self.emb_ent_e4.weight)).transpose(1, 0))
e5_score = torch.mm(self.hidden_dp_e5(e5),
self.input_dp_ent_e5(self.bn_ent_e5(self.emb_ent_e5.weight)).transpose(1, 0))
e6_score = torch.mm(self.hidden_dp_e6(e6),
self.input_dp_ent_e6(self.bn_ent_e6(self.emb_ent_e6.weight)).transpose(1, 0))
e7_score = torch.mm(self.hidden_dp_e7(e7),
self.input_dp_ent_e7(self.bn_ent_e7(self.emb_ent_e7.weight)).transpose(1, 0))
score = e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
return torch.sigmoid(score)
def forward_tail_batch(self, r_idx, e2_idx):
"""
KvsN reverse.
Given a relation and a tail entity (r,t).
1) Quaternion mul with all entities with r. (E,r)
2) Inner product of (1) with t.
scores=[[0.0,0.1,...,0.8],
...,
[0.8,0.3,...,0.9]]
by quaternion mul of all entities with r a
scores.shape => ( batchsize, number of entities.)
"""
# (1)
# (1.1) Octonion embeddings of relations
emb_rel_e0 = self.emb_rel_e0(r_idx)
emb_rel_e1 = self.emb_rel_e1(r_idx)
emb_rel_e2 = self.emb_rel_e2(r_idx)
emb_rel_e3 = self.emb_rel_e3(r_idx)
emb_rel_e4 = self.emb_rel_e4(r_idx)
emb_rel_e5 = self.emb_rel_e5(r_idx)
emb_rel_e6 = self.emb_rel_e6(r_idx)
emb_rel_e7 = self.emb_rel_e7(r_idx)
# (1.2) Reshape octonion embeddings of tail entities.
emb_tail_e0 = self.emb_ent_e0(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e1 = self.emb_ent_e1(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e2 = self.emb_ent_e2(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e3 = self.emb_ent_e3(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e4 = self.emb_ent_e4(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e5 = self.emb_ent_e5(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e6 = self.emb_ent_e6(e2_idx).view(-1, self.embedding_dim, 1)
emb_tail_e7 = self.emb_ent_e7(e2_idx).view(-1, self.embedding_dim, 1)
if self.flag_octonion_mul_norm:
# (2) Reshape (1.1)-relations.
emb_rel_e0 = self.emb_rel_e0(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e1 = self.emb_rel_e1(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e2 = self.emb_rel_e2(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e3 = self.emb_rel_e3(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e4 = self.emb_rel_e4(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e5 = self.emb_rel_e5(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e6 = self.emb_rel_e6(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e7 = self.emb_rel_e7(r_idx).view(-1, 1, self.embedding_dim)
# (3) Octonion multiplication of ALL entities and unit normalized (1.1).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul_norm(
O_1=(self.emb_ent_e0.weight, self.emb_ent_e1.weight,
self.emb_ent_e2.weight, self.emb_ent_e3.weight,
self.emb_ent_e4.weight, self.emb_ent_e5.weight,
self.emb_ent_e6.weight, self.emb_ent_e7.weight),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (4) Inner product of (3) with (1.2).
e0_score = torch.matmul(e0, emb_tail_e0)
e1_score = torch.matmul(e1, emb_tail_e1)
e2_score = torch.matmul(e2, emb_tail_e2)
e3_score = torch.matmul(e3, emb_tail_e3)
e4_score = torch.matmul(e4, emb_tail_e4)
e5_score = torch.matmul(e5, emb_tail_e5)
e6_score = torch.matmul(e6, emb_tail_e6)
e7_score = torch.matmul(e7, emb_tail_e7)
else:
# (2) BN + Dropout-Reshape (1.1)-relations
emb_rel_e0 = self.input_dp_rel_e0(self.bn_rel_e0(emb_rel_e0)).view(-1, 1, self.embedding_dim)
emb_rel_e1 = self.input_dp_rel_e1(self.bn_rel_e1(emb_rel_e1)).view(-1, 1, self.embedding_dim)
emb_rel_e2 = self.input_dp_rel_e2(self.bn_rel_e2(emb_rel_e2)).view(-1, 1, self.embedding_dim)
emb_rel_e3 = self.input_dp_rel_e3(self.bn_rel_e3(emb_rel_e3)).view(-1, 1, self.embedding_dim)
emb_rel_e4 = self.input_dp_rel_e4(self.bn_rel_e4(emb_rel_e4)).view(-1, 1, self.embedding_dim)
emb_rel_e5 = self.input_dp_rel_e5(self.bn_rel_e5(emb_rel_e5)).view(-1, 1, self.embedding_dim)
emb_rel_e6 = self.input_dp_rel_e6(self.bn_rel_e6(emb_rel_e6)).view(-1, 1, self.embedding_dim)
emb_rel_e7 = self.input_dp_rel_e7(self.bn_rel_e7(emb_rel_e7)).view(-1, 1, self.embedding_dim)
# (3)
# (3.1) BN + Dropout on ALL entities.
# (3.2) Quaternion multiplication of (3.1) and (2).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul(
O_1=(self.input_dp_ent_e0(self.bn_ent_e0(self.emb_ent_e0.weight)),
self.input_dp_ent_e1(self.bn_ent_e1(self.emb_ent_e1.weight)),
self.input_dp_ent_e2(self.bn_ent_e2(self.emb_ent_e2.weight)),
self.input_dp_ent_e3(self.bn_ent_e3(self.emb_ent_e3.weight)),
self.input_dp_ent_e4(self.bn_ent_e4(self.emb_ent_e4.weight)),
self.input_dp_ent_e5(self.bn_ent_e5(self.emb_ent_e5.weight)),
self.input_dp_ent_e6(self.bn_ent_e6(self.emb_ent_e6.weight)),
self.input_dp_ent_e7(self.bn_ent_e7(self.emb_ent_e7.weight))),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (4)
# (4.1) Dropout on (3).
# (4.2) Inner product on (4.1) with (1.2).
e0_score = torch.matmul(self.hidden_dp_e0(e0), emb_tail_e0)
e1_score = torch.matmul(self.hidden_dp_e1(e1), emb_tail_e1)
e2_score = torch.matmul(self.hidden_dp_e2(e2), emb_tail_e2)
e3_score = torch.matmul(self.hidden_dp_e3(e3), emb_tail_e3)
e4_score = torch.matmul(self.hidden_dp_e4(e4), emb_tail_e4)
e5_score = torch.matmul(self.hidden_dp_e5(e5), emb_tail_e5)
e6_score = torch.matmul(self.hidden_dp_e6(e6), emb_tail_e6)
e7_score = torch.matmul(self.hidden_dp_e7(e7), emb_tail_e7)
score = e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
score = score.squeeze()
return torch.sigmoid(score)
def forward_head_and_loss(self, h_idx, r_idx, targets):
return self.loss(self.forward_head_batch(h_idx=h_idx, r_idx=r_idx), targets)
def forward_tail_and_loss(self, r_idx, e2_idx, targets):
return self.loss(self.forward_tail_batch(r_idx=r_idx, e2_idx=e2_idx), targets)
def init(self):
nn.init.xavier_normal_(self.emb_ent_e0.weight.data)
nn.init.xavier_normal_(self.emb_ent_e1.weight.data)
nn.init.xavier_normal_(self.emb_ent_e2.weight.data)
nn.init.xavier_normal_(self.emb_ent_e3.weight.data)
nn.init.xavier_normal_(self.emb_ent_e4.weight.data)
nn.init.xavier_normal_(self.emb_ent_e5.weight.data)
nn.init.xavier_normal_(self.emb_ent_e6.weight.data)
nn.init.xavier_normal_(self.emb_ent_e7.weight.data)
nn.init.xavier_normal_(self.emb_rel_e0.weight.data)
nn.init.xavier_normal_(self.emb_rel_e1.weight.data)
nn.init.xavier_normal_(self.emb_rel_e2.weight.data)
nn.init.xavier_normal_(self.emb_rel_e3.weight.data)
nn.init.xavier_normal_(self.emb_rel_e4.weight.data)
nn.init.xavier_normal_(self.emb_rel_e5.weight.data)
nn.init.xavier_normal_(self.emb_rel_e6.weight.data)
nn.init.xavier_normal_(self.emb_rel_e7.weight.data)
def get_embeddings(self):
entity_emb = torch.cat((
self.emb_ent_e0.weight.data, self.emb_ent_e1.weight.data,
self.emb_ent_e2.weight.data, self.emb_ent_e3.weight.data,
self.emb_ent_e4.weight.data, self.emb_ent_e5.weight.data,
self.emb_ent_e6.weight.data, self.emb_ent_e7.weight.data), 1)
rel_emb = torch.cat((
self.emb_rel_e0.weight.data, self.emb_rel_e1.weight.data,
self.emb_rel_e2.weight.data, self.emb_rel_e3.weight.data,
self.emb_rel_e4.weight.data, self.emb_rel_e5.weight.data,
self.emb_rel_e6.weight.data, self.emb_rel_e7.weight.data), 1)
return entity_emb, rel_emb
class ConvO(nn.Module):
def __init__(self,
num_entities, num_relations,
embedding_dim,
kernel_size=3, num_of_output_channels=16, feature_map_dropout=0.3,
norm_flag=False, input_dropout=0.2, hidden_dropout=0.3):
super(ConvO, self).__init__()
self.name = 'ConvO'
self.embedding_dim = embedding_dim
self.num_entities = num_entities
self.num_relations = num_relations
self.loss = nn.BCELoss()
self.flag_octonion_mul_norm = norm_flag
# Octonion embeddings of entities
self.emb_ent_e0 = nn.Embedding(self.num_entities, self.embedding_dim) # real
self.emb_ent_e1 = nn.Embedding(self.num_entities, self.embedding_dim) # e1
self.emb_ent_e2 = nn.Embedding(self.num_entities, self.embedding_dim) # e2
self.emb_ent_e3 = nn.Embedding(self.num_entities, self.embedding_dim) # e3
self.emb_ent_e4 = nn.Embedding(self.num_entities, self.embedding_dim) # e3
self.emb_ent_e5 = nn.Embedding(self.num_entities, self.embedding_dim) # e4
self.emb_ent_e6 = nn.Embedding(self.num_entities, self.embedding_dim) # e6
self.emb_ent_e7 = nn.Embedding(self.num_entities, self.embedding_dim) # e7
# Octonion embeddings of relations
self.emb_rel_e0 = nn.Embedding(self.num_relations, self.embedding_dim) # real
self.emb_rel_e1 = nn.Embedding(self.num_relations, self.embedding_dim) # e1
self.emb_rel_e2 = nn.Embedding(self.num_relations, self.embedding_dim) # e2
self.emb_rel_e3 = nn.Embedding(self.num_relations, self.embedding_dim) # e3
self.emb_rel_e4 = nn.Embedding(self.num_relations, self.embedding_dim) # e4
self.emb_rel_e5 = nn.Embedding(self.num_relations, self.embedding_dim) # e5
self.emb_rel_e6 = nn.Embedding(self.num_relations, self.embedding_dim) # e6
self.emb_rel_e7 = nn.Embedding(self.num_relations, self.embedding_dim) # e7
# Dropouts for octonion embeddings of ALL entities.
self.input_dp_ent_e0 = nn.Dropout(input_dropout)
self.input_dp_ent_e1 = nn.Dropout(input_dropout)
self.input_dp_ent_e2 = nn.Dropout(input_dropout)
self.input_dp_ent_e3 = nn.Dropout(input_dropout)
self.input_dp_ent_e4 = nn.Dropout(input_dropout)
self.input_dp_ent_e5 = nn.Dropout(input_dropout)
self.input_dp_ent_e6 = nn.Dropout(input_dropout)
self.input_dp_ent_e7 = nn.Dropout(input_dropout)
# Dropouts for octonion embeddings of relations.
self.input_dp_rel_e0 = nn.Dropout(input_dropout)
self.input_dp_rel_e1 = nn.Dropout(input_dropout)
self.input_dp_rel_e2 = nn.Dropout(input_dropout)
self.input_dp_rel_e3 = nn.Dropout(input_dropout)
self.input_dp_rel_e4 = nn.Dropout(input_dropout)
self.input_dp_rel_e5 = nn.Dropout(input_dropout)
self.input_dp_rel_e6 = nn.Dropout(input_dropout)
self.input_dp_rel_e7 = nn.Dropout(input_dropout)
# Dropouts for octonion embeddings obtained from octonion multiplication.
self.hidden_dp_e0 = nn.Dropout(hidden_dropout)
self.hidden_dp_e1 = nn.Dropout(hidden_dropout)
self.hidden_dp_e2 = nn.Dropout(hidden_dropout)
self.hidden_dp_e3 = nn.Dropout(hidden_dropout)
self.hidden_dp_e4 = nn.Dropout(hidden_dropout)
self.hidden_dp_e5 = nn.Dropout(hidden_dropout)
self.hidden_dp_e6 = nn.Dropout(hidden_dropout)
self.hidden_dp_e7 = nn.Dropout(hidden_dropout)
# Batch normalization for octonion embeddings of ALL entities.
self.bn_ent_e0 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e1 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e2 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e3 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e4 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e5 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e6 = nn.BatchNorm1d(self.embedding_dim)
self.bn_ent_e7 = nn.BatchNorm1d(self.embedding_dim)
# Batch normalization for octonion embeddings of relations.
self.bn_rel_e0 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e1 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e2 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e3 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e4 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e5 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e6 = nn.BatchNorm1d(self.embedding_dim)
self.bn_rel_e7 = nn.BatchNorm1d(self.embedding_dim)
# Convolution
self.kernel_size = kernel_size
self.num_of_output_channels = num_of_output_channels
self.feature_map_dropout = nn.Dropout2d(feature_map_dropout)
self.conv1 = nn.Conv1d(in_channels=1, out_channels=self.num_of_output_channels,
kernel_size=(self.kernel_size, self.kernel_size), stride=1, padding=1, bias=True)
self.fc_num_input = self.embedding_dim * 16 * self.num_of_output_channels # 8 because of 8 real values in 2 quaternions
self.fc1 = nn.Linear(self.fc_num_input, self.embedding_dim * 8) # Hard compression.
self.bn_conv1 = nn.BatchNorm2d(self.num_of_output_channels)
self.bn_conv2 = nn.BatchNorm1d(self.embedding_dim * 8)
def forward(self, h_idx, r_idx):
return self.forward_head_batch(h_idx.view(-1), r_idx.view(-1))
def residual_convolution(self, O_1, O_2):
emb_ent_e0, emb_ent_e1, emb_ent_e2, emb_ent_e3, emb_ent_e4, emb_ent_e5, emb_ent_e6, emb_ent_e7 = O_1
emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = O_2
x = torch.cat([emb_ent_e0.view(-1, 1, 1, self.embedding_dim),
emb_ent_e1.view(-1, 1, 1, self.embedding_dim),
emb_ent_e2.view(-1, 1, 1, self.embedding_dim),
emb_ent_e3.view(-1, 1, 1, self.embedding_dim),
emb_ent_e4.view(-1, 1, 1, self.embedding_dim),
emb_ent_e5.view(-1, 1, 1, self.embedding_dim),
emb_ent_e6.view(-1, 1, 1, self.embedding_dim),
emb_ent_e7.view(-1, 1, 1, self.embedding_dim), # entities
emb_rel_e0.view(-1, 1, 1, self.embedding_dim),
emb_rel_e1.view(-1, 1, 1, self.embedding_dim),
emb_rel_e2.view(-1, 1, 1, self.embedding_dim),
emb_rel_e3.view(-1, 1, 1, self.embedding_dim),
emb_rel_e4.view(-1, 1, 1, self.embedding_dim),
emb_rel_e5.view(-1, 1, 1, self.embedding_dim),
emb_rel_e6.view(-1, 1, 1, self.embedding_dim),
emb_rel_e7.view(-1, 1, 1, self.embedding_dim), ], 2)
x = self.conv1(x)
x = self.bn_conv1(x)
x = F.relu(x)
x = self.feature_map_dropout(x)
x = x.view(x.shape[0], -1) # reshape for NN.
x = self.fc1(x)
x = self.bn_conv2(x)
x = F.relu(x)
return torch.chunk(x, 8, dim=1)
def forward_head_batch(self, h_idx, r_idx):
# (1)
# (1.1) Octonion embeddings of head entities
emb_head_e0 = self.emb_ent_e0(h_idx)
emb_head_e1 = self.emb_ent_e1(h_idx)
emb_head_e2 = self.emb_ent_e2(h_idx)
emb_head_e3 = self.emb_ent_e3(h_idx)
emb_head_e4 = self.emb_ent_e4(h_idx)
emb_head_e5 = self.emb_ent_e5(h_idx)
emb_head_e6 = self.emb_ent_e6(h_idx)
emb_head_e7 = self.emb_ent_e7(h_idx)
# (1.2) Octonion embeddings of relations
emb_rel_e0 = self.emb_rel_e0(r_idx)
emb_rel_e1 = self.emb_rel_e1(r_idx)
emb_rel_e2 = self.emb_rel_e2(r_idx)
emb_rel_e3 = self.emb_rel_e3(r_idx)
emb_rel_e4 = self.emb_rel_e4(r_idx)
emb_rel_e5 = self.emb_rel_e5(r_idx)
emb_rel_e6 = self.emb_rel_e6(r_idx)
emb_rel_e7 = self.emb_rel_e7(r_idx)
# (2) Apply convolution operation on (1.1) and (1.2).
O_3 = self.residual_convolution(O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
conv_e0, conv_e1, conv_e2, conv_e3, conv_e4, conv_e5, conv_e6, conv_e7 = O_3
if self.flag_octonion_mul_norm:
# (3) Octonion multiplication of (1.1) and unit normalized (1.2).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul_norm(
O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (4)
# (4.1) Hadamard product of (2) with (3).
# (4.2) Inner product of (4.1) with ALL entities.
e0_score = torch.mm(conv_e0 * e0, self.emb_ent_e0.weight.transpose(1, 0))
e1_score = torch.mm(conv_e1 * e1, self.emb_ent_e1.weight.transpose(1, 0))
e2_score = torch.mm(conv_e2 * e2, self.emb_ent_e2.weight.transpose(1, 0))
e3_score = torch.mm(conv_e3 * e3, self.emb_ent_e3.weight.transpose(1, 0))
e4_score = torch.mm(conv_e4 * e4, self.emb_ent_e4.weight.transpose(1, 0))
e5_score = torch.mm(conv_e5 * e5, self.emb_ent_e5.weight.transpose(1, 0))
e6_score = torch.mm(conv_e6 * e6, self.emb_ent_e6.weight.transpose(1, 0))
e7_score = torch.mm(conv_e7 * e7, self.emb_ent_e7.weight.transpose(1, 0))
else:
# (3)
# (3.1) Apply BN + Dropout on (1.2)-relations.
# (3.2) Apply quaternion multiplication on (1.1) and (3.1).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul(
O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3,
emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7),
O_2=(self.input_dp_rel_e0(self.bn_rel_e0(emb_rel_e0)),
self.input_dp_rel_e1(self.bn_rel_e1(emb_rel_e1)),
self.input_dp_rel_e2(self.bn_rel_e2(emb_rel_e2)),
self.input_dp_rel_e3(self.bn_rel_e3(emb_rel_e3)),
self.input_dp_rel_e4(self.bn_rel_e4(emb_rel_e4)),
self.input_dp_rel_e5(self.bn_rel_e5(emb_rel_e5)),
self.input_dp_rel_e6(self.bn_rel_e6(emb_rel_e6)),
self.input_dp_rel_e7(self.bn_rel_e7(emb_rel_e7))))
# (4)
# (4.1) Hadamard product of (2) with (3).
# (4.2) Dropout on (4.1).
# (4.3) Apply BN + DP on ALL entities.
# (4.4) Inner product
e0_score = torch.mm(self.hidden_dp_e0(conv_e0 * e0),
self.input_dp_ent_e0(self.bn_ent_e0(self.emb_ent_e0.weight)).transpose(1, 0))
e1_score = torch.mm(self.hidden_dp_e1(conv_e1 * e1),
self.input_dp_ent_e1(self.bn_ent_e1(self.emb_ent_e1.weight)).transpose(1, 0))
e2_score = torch.mm(self.hidden_dp_e2(conv_e2 * e2),
self.input_dp_ent_e2(self.bn_ent_e2(self.emb_ent_e2.weight)).transpose(1, 0))
e3_score = torch.mm(self.hidden_dp_e3(conv_e3 * e3),
self.input_dp_ent_e3(self.bn_ent_e3(self.emb_ent_e3.weight)).transpose(1, 0))
e4_score = torch.mm(self.hidden_dp_e4(conv_e4 * e4),
self.input_dp_ent_e4(self.bn_ent_e4(self.emb_ent_e4.weight)).transpose(1, 0))
e5_score = torch.mm(self.hidden_dp_e5(conv_e5 * e5),
self.input_dp_ent_e5(self.bn_ent_e5(self.emb_ent_e5.weight)).transpose(1, 0))
e6_score = torch.mm(self.hidden_dp_e6(conv_e6 * e6),
self.input_dp_ent_e6(self.bn_ent_e6(self.emb_ent_e6.weight)).transpose(1, 0))
e7_score = torch.mm(self.hidden_dp_e7(conv_e7 * e7),
self.input_dp_ent_e7(self.bn_ent_e7(self.emb_ent_e7.weight)).transpose(1, 0))
score = e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
return torch.sigmoid(score)
def forward_tail_batch(self, r_idx, e2_idx):
# (1)
# (1.1) Octonion embeddings of relations
emb_rel_e0 = self.emb_rel_e0(r_idx)
emb_rel_e1 = self.emb_rel_e1(r_idx)
emb_rel_e2 = self.emb_rel_e2(r_idx)
emb_rel_e3 = self.emb_rel_e3(r_idx)
emb_rel_e4 = self.emb_rel_e4(r_idx)
emb_rel_e5 = self.emb_rel_e5(r_idx)
emb_rel_e6 = self.emb_rel_e6(r_idx)
emb_rel_e7 = self.emb_rel_e7(r_idx)
# (1.2) Octonion embeddings of head entities
emb_tail_e0 = self.emb_ent_e0(e2_idx)
emb_tail_e1 = self.emb_ent_e1(e2_idx)
emb_tail_e2 = self.emb_ent_e2(e2_idx)
emb_tail_e3 = self.emb_ent_e3(e2_idx)
emb_tail_e4 = self.emb_ent_e4(e2_idx)
emb_tail_e5 = self.emb_ent_e5(e2_idx)
emb_tail_e6 = self.emb_ent_e6(e2_idx)
emb_tail_e7 = self.emb_ent_e7(e2_idx)
# (2) Apply convolution operation on (1.1) and (1.2). conv(r,t) instead of conv(t,r) or conv(h,r).
O_3 = self.residual_convolution(O_1=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7),
O_2=(emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3,
emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7))
conv_e0, conv_e1, conv_e2, conv_e3, conv_e4, conv_e5, conv_e6, conv_e7 = O_3
# (3)
# (3.1) Reshape (1.2) tail entities.
emb_tail_e0 = emb_tail_e0.view(-1, self.embedding_dim, 1)
emb_tail_e1 = emb_tail_e1.view(-1, self.embedding_dim, 1)
emb_tail_e2 = emb_tail_e2.view(-1, self.embedding_dim, 1)
emb_tail_e3 = emb_tail_e3.view(-1, self.embedding_dim, 1)
emb_tail_e4 = emb_tail_e4.view(-1, self.embedding_dim, 1)
emb_tail_e5 = emb_tail_e5.view(-1, self.embedding_dim, 1)
emb_tail_e6 = emb_tail_e6.view(-1, self.embedding_dim, 1)
emb_tail_e7 = emb_tail_e7.view(-1, self.embedding_dim, 1)
# (3.2) Reshape (2) output of convolution.
conv_e0 = conv_e0.view(-1, 1, self.embedding_dim)
conv_e1 = conv_e1.view(-1, 1, self.embedding_dim)
conv_e2 = conv_e2.view(-1, 1, self.embedding_dim)
conv_e3 = conv_e3.view(-1, 1, self.embedding_dim)
conv_e4 = conv_e4.view(-1, 1, self.embedding_dim)
conv_e5 = conv_e5.view(-1, 1, self.embedding_dim)
conv_e6 = conv_e6.view(-1, 1, self.embedding_dim)
conv_e7 = conv_e7.view(-1, 1, self.embedding_dim)
if self.flag_octonion_mul_norm:
# (4) Reshape (1.1)-relations.
emb_rel_e0 = self.emb_rel_e0(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e1 = self.emb_rel_e1(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e2 = self.emb_rel_e2(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e3 = self.emb_rel_e3(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e4 = self.emb_rel_e4(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e5 = self.emb_rel_e5(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e6 = self.emb_rel_e6(r_idx).view(-1, 1, self.embedding_dim)
emb_rel_e7 = self.emb_rel_e7(r_idx).view(-1, 1, self.embedding_dim)
# (5) Octonion multiplication of ALL entities and unit normalized (4.1).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul_norm(
O_1=(self.emb_ent_e0.weight, self.emb_ent_e1.weight,
self.emb_ent_e2.weight, self.emb_ent_e3.weight,
self.emb_ent_e4.weight, self.emb_ent_e5.weight,
self.emb_ent_e6.weight, self.emb_ent_e7.weight),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (4) Inner product of (3) with (1.2).
e0_score = torch.matmul(conv_e0 * e0, emb_tail_e0)
e1_score = torch.matmul(conv_e1 * e1, emb_tail_e1)
e2_score = torch.matmul(conv_e2 * e2, emb_tail_e2)
e3_score = torch.matmul(conv_e3 * e3, emb_tail_e3)
e4_score = torch.matmul(conv_e4 * e4, emb_tail_e4)
e5_score = torch.matmul(conv_e5 * e5, emb_tail_e5)
e6_score = torch.matmul(conv_e6 * e6, emb_tail_e6)
e7_score = torch.matmul(conv_e7 * e7, emb_tail_e7)
else:
# (4) BN + Dropout-Reshape (1.1)-relations
emb_rel_e0 = self.input_dp_rel_e0(self.bn_rel_e0(emb_rel_e0)).view(-1, 1, self.embedding_dim)
emb_rel_e1 = self.input_dp_rel_e1(self.bn_rel_e1(emb_rel_e1)).view(-1, 1, self.embedding_dim)
emb_rel_e2 = self.input_dp_rel_e2(self.bn_rel_e2(emb_rel_e2)).view(-1, 1, self.embedding_dim)
emb_rel_e3 = self.input_dp_rel_e3(self.bn_rel_e3(emb_rel_e3)).view(-1, 1, self.embedding_dim)
emb_rel_e4 = self.input_dp_rel_e4(self.bn_rel_e4(emb_rel_e4)).view(-1, 1, self.embedding_dim)
emb_rel_e5 = self.input_dp_rel_e5(self.bn_rel_e5(emb_rel_e5)).view(-1, 1, self.embedding_dim)
emb_rel_e6 = self.input_dp_rel_e6(self.bn_rel_e6(emb_rel_e6)).view(-1, 1, self.embedding_dim)
emb_rel_e7 = self.input_dp_rel_e7(self.bn_rel_e7(emb_rel_e7)).view(-1, 1, self.embedding_dim)
# (5)
# (5.1) BN + Dropout on ALL entities.
# (5.2) Quaternion multiplication of (5.1) and (4).
e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul(
O_1=(self.input_dp_ent_e0(self.bn_ent_e0(self.emb_ent_e0.weight)),
self.input_dp_ent_e1(self.bn_ent_e1(self.emb_ent_e1.weight)),
self.input_dp_ent_e2(self.bn_ent_e2(self.emb_ent_e2.weight)),
self.input_dp_ent_e3(self.bn_ent_e3(self.emb_ent_e3.weight)),
self.input_dp_ent_e4(self.bn_ent_e4(self.emb_ent_e4.weight)),
self.input_dp_ent_e5(self.bn_ent_e5(self.emb_ent_e5.weight)),
self.input_dp_ent_e6(self.bn_ent_e6(self.emb_ent_e6.weight)),
self.input_dp_ent_e7(self.bn_ent_e7(self.emb_ent_e7.weight))),
O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3,
emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7))
# (6)
# (6.1) Hadamard product of (3.2) and (5).
# (6.2) Dropout on (6.1).
# (6.2) Inner product on (5.1) with (3.1).
e0_score = torch.matmul(self.hidden_dp_e0(conv_e0 * e0), emb_tail_e0)
e1_score = torch.matmul(self.hidden_dp_e1(conv_e1 * e1), emb_tail_e1)
e2_score = torch.matmul(self.hidden_dp_e2(conv_e2 * e2), emb_tail_e2)
e3_score = torch.matmul(self.hidden_dp_e3(conv_e3 * e3), emb_tail_e3)
e4_score = torch.matmul(self.hidden_dp_e4(conv_e4 * e4), emb_tail_e4)
e5_score = torch.matmul(self.hidden_dp_e5(conv_e5 * e5), emb_tail_e5)
e6_score = torch.matmul(self.hidden_dp_e6(conv_e6 * e6), emb_tail_e6)
e7_score = torch.matmul(self.hidden_dp_e7(conv_e7 * e7), emb_tail_e7)
score = e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
score = score.squeeze()
return torch.sigmoid(score)
def forward_head_and_loss(self, h_idx, r_idx, targets):
return self.loss(self.forward_head_batch(h_idx=h_idx, r_idx=r_idx), targets)
def forward_tail_and_loss(self, r_idx, e2_idx, targets):
return self.loss(self.forward_tail_batch(r_idx=r_idx, e2_idx=e2_idx), targets)
def init(self):
nn.init.xavier_normal_(self.emb_ent_e0.weight.data)
nn.init.xavier_normal_(self.emb_ent_e1.weight.data)
nn.init.xavier_normal_(self.emb_ent_e2.weight.data)
nn.init.xavier_normal_(self.emb_ent_e3.weight.data)
nn.init.xavier_normal_(self.emb_ent_e4.weight.data)
nn.init.xavier_normal_(self.emb_ent_e5.weight.data)
nn.init.xavier_normal_(self.emb_ent_e6.weight.data)
nn.init.xavier_normal_(self.emb_ent_e7.weight.data)
nn.init.xavier_normal_(self.emb_rel_e0.weight.data)
nn.init.xavier_normal_(self.emb_rel_e1.weight.data)
nn.init.xavier_normal_(self.emb_rel_e2.weight.data)
nn.init.xavier_normal_(self.emb_rel_e3.weight.data)
nn.init.xavier_normal_(self.emb_rel_e4.weight.data)
nn.init.xavier_normal_(self.emb_rel_e5.weight.data)
nn.init.xavier_normal_(self.emb_rel_e6.weight.data)
nn.init.xavier_normal_(self.emb_rel_e7.weight.data)
def get_embeddings(self):
entity_emb = torch.cat((
self.emb_ent_e0.weight.data, self.emb_ent_e1.weight.data,
self.emb_ent_e2.weight.data, self.emb_ent_e3.weight.data,
self.emb_ent_e4.weight.data, self.emb_ent_e5.weight.data,
self.emb_ent_e6.weight.data, self.emb_ent_e7.weight.data), 1)
rel_emb = torch.cat((
self.emb_rel_e0.weight.data, self.emb_rel_e1.weight.data,
self.emb_rel_e2.weight.data, self.emb_rel_e3.weight.data,
self.emb_rel_e4.weight.data, self.emb_rel_e5.weight.data,
self.emb_rel_e6.weight.data, self.emb_rel_e7.weight.data), 1)
return entity_emb, rel_emb | 0.896149 | 0.814164 |
from django.db import models
from django.conf import settings
from djnfusion import server, key
from packages.models import InfusionsoftTag, PackagePurchase
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
archetype = models.CharField(max_length=255, blank=True,
verbose_name=u'My Archetype')
anthem = models.TextField(blank=True, verbose_name=u'My Anthem')
about = models.TextField(blank=True, verbose_name=u'I am...')
support = models.TextField(blank=True,
verbose_name=u'I need support in achieving:')
def __unicode__(self):
return self.user.username
class UserPrivateProfile(models.Model):
'''
Private profile for users. Other users are not able to view this unless
they are staff
'''
user = models.OneToOneField(settings.AUTH_USER_MODEL,
related_name='private_profile')
dream = models.TextField(blank=True,
verbose_name=u'My BIG WHY is:')
def __unicode__(self):
return self.user.username
def can_view(self, user):
'''
Other users are not able to view this unless they are staff
'''
return user == self.user or user.is_staff
class FacebookProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
fb_uid = models.TextField(blank=True)
def __unicode__(self):
return self.user.username
class InfusionsoftProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
remote_id = models.TextField(blank=True)
tags = models.ManyToManyField(InfusionsoftTag,
related_name="infusionsoft_profiles", blank=True, null=True)
def __unicode__(self):
return self.user.username
@property
def get_remote_id(self):
if not self.remote_id:
self.update_profile()
return self.remote_id if self.remote_id else None
def update_tags(self):
"""
updates a profiles tags from the infusionsoft server
"""
if settings.DJNFUSION_COMPANY and settings.DJNFUSION_API_KEY:
# get infusionsofts tags from thier server and find the instances in our db
tags = InfusionsoftTag.objects.by_user(self.user)
# get all active purchase for profile.user
active_purchases = PackagePurchase.objects.filter(user__id=self.user_id,
package__infusionsoftpackage__tag_id__in=[tag.id for tag in self.tags.all()], status=1) # 1 == Active
for tag in self.tags.all():
# loop through profile's tags
if tag not in tags:
# profile has tag that was removed on infusionsoft, remove tag
self.tags.remove(tag)
# set past_purchases of this tag to expired
expired = active_purchases.filter(package__infusionsoftpackage__tag_id=tag.id)
for purchase in expired:
purchase.status = 2 # 2 == Expired
purchase.save()
for tag in tags:
# loop through infusionsoft's tags
if tag not in self.tags.all():
# profile does not have tag on infusionsoft, add tag
self.tags.add(tag)
# create a new package purchase for the tags infusionsoft package
PackagePurchase.objects.create(
user=self.user, package=tag.infusionsoftpackage, status=1) # 1 == Active
return self.save()
def update_profile(self, referral_code=None):
"""
updates profile fields from infusionsoft server
"""
provider_data = self._get_provider_data(referral_code=referral_code)
if len(provider_data):
self.remote_id = provider_data["Id"]
return self.save()
def _get_provider_data(self, referral_code=None):
"""
Gets a profiles user's data from infusionsoft
"""
results = server.DataService.findByField(key, "Contact",
10, 0, "email", self.user.email,
["Id", ]);
if not len(results):
#TODO:FIX THIS java.lang.String ERROR
remote_id = server.ContactService.add(key, {
"Email": self.user.email,
"FirstName": self.user.first_name,
"LastName": self.user.last_name})
infusionsoft_id = {"Id": remote_id}
else:
infusionsoft_id = results[0]
# incase there was a referral code passed, we need to to update
# infusionsoft
if referral_code:
server.ContactService.update(key, infusionsoft_id["Id"],
{"ReferralCode": referral_code})
return infusionsoft_id
class InstructorProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
def __unicode__(self):
return self.user.username
class NotificationProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
new_community_post = models.BooleanField(default=True)
new_lesson = models.BooleanField(default=True)
new_course = models.BooleanField(default=True)
news = models.BooleanField(default=True)
from allauth.account.signals import user_logged_in
from django.dispatch import receiver
@receiver(user_logged_in)
def infusionsoft_sync_user(sender, **kwargs):
if settings.DJNFUSION_COMPANY:
user = kwargs['user']
profile = InfusionsoftProfile.objects.get_or_create(user=user)[0]
profile.update_profile()
profile.update_tags() | dtf/profiles/models.py | from django.db import models
from django.conf import settings
from djnfusion import server, key
from packages.models import InfusionsoftTag, PackagePurchase
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
archetype = models.CharField(max_length=255, blank=True,
verbose_name=u'My Archetype')
anthem = models.TextField(blank=True, verbose_name=u'My Anthem')
about = models.TextField(blank=True, verbose_name=u'I am...')
support = models.TextField(blank=True,
verbose_name=u'I need support in achieving:')
def __unicode__(self):
return self.user.username
class UserPrivateProfile(models.Model):
'''
Private profile for users. Other users are not able to view this unless
they are staff
'''
user = models.OneToOneField(settings.AUTH_USER_MODEL,
related_name='private_profile')
dream = models.TextField(blank=True,
verbose_name=u'My BIG WHY is:')
def __unicode__(self):
return self.user.username
def can_view(self, user):
'''
Other users are not able to view this unless they are staff
'''
return user == self.user or user.is_staff
class FacebookProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
fb_uid = models.TextField(blank=True)
def __unicode__(self):
return self.user.username
class InfusionsoftProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
remote_id = models.TextField(blank=True)
tags = models.ManyToManyField(InfusionsoftTag,
related_name="infusionsoft_profiles", blank=True, null=True)
def __unicode__(self):
return self.user.username
@property
def get_remote_id(self):
if not self.remote_id:
self.update_profile()
return self.remote_id if self.remote_id else None
def update_tags(self):
"""
updates a profiles tags from the infusionsoft server
"""
if settings.DJNFUSION_COMPANY and settings.DJNFUSION_API_KEY:
# get infusionsofts tags from thier server and find the instances in our db
tags = InfusionsoftTag.objects.by_user(self.user)
# get all active purchase for profile.user
active_purchases = PackagePurchase.objects.filter(user__id=self.user_id,
package__infusionsoftpackage__tag_id__in=[tag.id for tag in self.tags.all()], status=1) # 1 == Active
for tag in self.tags.all():
# loop through profile's tags
if tag not in tags:
# profile has tag that was removed on infusionsoft, remove tag
self.tags.remove(tag)
# set past_purchases of this tag to expired
expired = active_purchases.filter(package__infusionsoftpackage__tag_id=tag.id)
for purchase in expired:
purchase.status = 2 # 2 == Expired
purchase.save()
for tag in tags:
# loop through infusionsoft's tags
if tag not in self.tags.all():
# profile does not have tag on infusionsoft, add tag
self.tags.add(tag)
# create a new package purchase for the tags infusionsoft package
PackagePurchase.objects.create(
user=self.user, package=tag.infusionsoftpackage, status=1) # 1 == Active
return self.save()
def update_profile(self, referral_code=None):
"""
updates profile fields from infusionsoft server
"""
provider_data = self._get_provider_data(referral_code=referral_code)
if len(provider_data):
self.remote_id = provider_data["Id"]
return self.save()
def _get_provider_data(self, referral_code=None):
"""
Gets a profiles user's data from infusionsoft
"""
results = server.DataService.findByField(key, "Contact",
10, 0, "email", self.user.email,
["Id", ]);
if not len(results):
#TODO:FIX THIS java.lang.String ERROR
remote_id = server.ContactService.add(key, {
"Email": self.user.email,
"FirstName": self.user.first_name,
"LastName": self.user.last_name})
infusionsoft_id = {"Id": remote_id}
else:
infusionsoft_id = results[0]
# incase there was a referral code passed, we need to to update
# infusionsoft
if referral_code:
server.ContactService.update(key, infusionsoft_id["Id"],
{"ReferralCode": referral_code})
return infusionsoft_id
class InstructorProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
def __unicode__(self):
return self.user.username
class NotificationProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
new_community_post = models.BooleanField(default=True)
new_lesson = models.BooleanField(default=True)
new_course = models.BooleanField(default=True)
news = models.BooleanField(default=True)
from allauth.account.signals import user_logged_in
from django.dispatch import receiver
@receiver(user_logged_in)
def infusionsoft_sync_user(sender, **kwargs):
if settings.DJNFUSION_COMPANY:
user = kwargs['user']
profile = InfusionsoftProfile.objects.get_or_create(user=user)[0]
profile.update_profile()
profile.update_tags() | 0.357904 | 0.111749 |
from lightbulb.core.utils.httphandler import HTTPHandler
from lightbulb.core.utils.common import findlibrary
import json
META = {
'author': '<NAME>, <NAME>',
'description': 'Identifies a WAF filter using a distinguish tree',
'type':'Distinguisher',
'options': [
('FILE', None, True, 'File containting a distinguish tree'),
('URL', "http://127.0.0.1", True, 'The target URL'),
('REQUEST_TYPE', "GET", True, 'The HTTP request type (GET/POST)'),
('PARAM', "input", True, 'The request parameter'),
('BLOCK', None, False, 'The response string that indicates that the WAF blocks the request'),
('BYPASS', None, False, 'The response string that indicates that the WAF allows the request'),
('PROXY_SCHEME', None, False, 'The proxy scheme (e.g. http, https'),
('PROXY_HOST', None, False, 'The proxy host'),
('PROXY_PORT', None, False, 'The proxy port'),
('PROXY_USERNAME', None, False, 'The proxy username'),
('PROXY_PASSWORD', None, False, 'The proxy password'),
('USER_AGENT', "Mozilla/5.0", True, 'The request user agent'),
('REFERER', "http://google.com", True, 'The request referrer'),
('PRELOAD', False, True, 'Preload the input filter'),
],
'comments': ['Sample comment 1', 'Sample comment 2']
}
class Handler(HTTPHandler):
def setup(self, configuration):
self.url = configuration['URL']
self.request_type = configuration['REQUEST_TYPE']
self.param = configuration['PARAM']
self.block = configuration['BLOCK']
self.bypass = configuration['BYPASS']
self.proxy_scheme = configuration['PROXY_SCHEME']
self.proxy_host = configuration['PROXY_HOST']
self.proxy_port = configuration['PROXY_PORT']
self.proxy_username = configuration['PROXY_USERNAME']
self.proxy_password = configuration['PROXY_PASSWORD']
self.user_agent = configuration['USER_AGENT']
self.referer = configuration['REFERER']
class Module():
def __init__(self, configuration):
self.distinguisher = None
self.loadfile(findlibrary(configuration['FILE']))
self.httphandler = Handler(configuration)
self.name = None
self.queries = 0
def loadfile(self, input_filename):
"""Loads a distinguish tree from a custom location"""
with open(input_filename, 'r') as input_file:
self.distinguisher = json.load(input_file)
def algorithm(self, check):
"""
This function distinguish a WAF using
the current distinguish tree and a function
that performs membership queries
Args:
check (func): The membership query function
Returns:
str: The identified WAF name
"""
pos = self.distinguisher
while True:
pos = pos["RESULT"][str(check(pos["STRING"]))]
if not isinstance(pos, dict):
return pos
def learn(self):
def check(string):
"""
This function performs a membership query
Args:
string (str): The examined string
Returns:
str: A string with values either 'true' or 'false'
"""
self.queries = self.queries + 1
return self.httphandler.query(string)
self.name = self.algorithm(check)
def stats(self):
return [("Membership Queries",self.queries)]
def getresult(self):
return "Waf", self.name | lightbulb/modules/distinguish_waf.py | from lightbulb.core.utils.httphandler import HTTPHandler
from lightbulb.core.utils.common import findlibrary
import json
META = {
'author': '<NAME>, <NAME>',
'description': 'Identifies a WAF filter using a distinguish tree',
'type':'Distinguisher',
'options': [
('FILE', None, True, 'File containting a distinguish tree'),
('URL', "http://127.0.0.1", True, 'The target URL'),
('REQUEST_TYPE', "GET", True, 'The HTTP request type (GET/POST)'),
('PARAM', "input", True, 'The request parameter'),
('BLOCK', None, False, 'The response string that indicates that the WAF blocks the request'),
('BYPASS', None, False, 'The response string that indicates that the WAF allows the request'),
('PROXY_SCHEME', None, False, 'The proxy scheme (e.g. http, https'),
('PROXY_HOST', None, False, 'The proxy host'),
('PROXY_PORT', None, False, 'The proxy port'),
('PROXY_USERNAME', None, False, 'The proxy username'),
('PROXY_PASSWORD', None, False, 'The proxy password'),
('USER_AGENT', "Mozilla/5.0", True, 'The request user agent'),
('REFERER', "http://google.com", True, 'The request referrer'),
('PRELOAD', False, True, 'Preload the input filter'),
],
'comments': ['Sample comment 1', 'Sample comment 2']
}
class Handler(HTTPHandler):
def setup(self, configuration):
self.url = configuration['URL']
self.request_type = configuration['REQUEST_TYPE']
self.param = configuration['PARAM']
self.block = configuration['BLOCK']
self.bypass = configuration['BYPASS']
self.proxy_scheme = configuration['PROXY_SCHEME']
self.proxy_host = configuration['PROXY_HOST']
self.proxy_port = configuration['PROXY_PORT']
self.proxy_username = configuration['PROXY_USERNAME']
self.proxy_password = configuration['PROXY_PASSWORD']
self.user_agent = configuration['USER_AGENT']
self.referer = configuration['REFERER']
class Module():
def __init__(self, configuration):
self.distinguisher = None
self.loadfile(findlibrary(configuration['FILE']))
self.httphandler = Handler(configuration)
self.name = None
self.queries = 0
def loadfile(self, input_filename):
"""Loads a distinguish tree from a custom location"""
with open(input_filename, 'r') as input_file:
self.distinguisher = json.load(input_file)
def algorithm(self, check):
"""
This function distinguish a WAF using
the current distinguish tree and a function
that performs membership queries
Args:
check (func): The membership query function
Returns:
str: The identified WAF name
"""
pos = self.distinguisher
while True:
pos = pos["RESULT"][str(check(pos["STRING"]))]
if not isinstance(pos, dict):
return pos
def learn(self):
def check(string):
"""
This function performs a membership query
Args:
string (str): The examined string
Returns:
str: A string with values either 'true' or 'false'
"""
self.queries = self.queries + 1
return self.httphandler.query(string)
self.name = self.algorithm(check)
def stats(self):
return [("Membership Queries",self.queries)]
def getresult(self):
return "Waf", self.name | 0.601594 | 0.162746 |
import multiprocessing
import argparse
import os
import itertools
import shutil
from tqdm.auto import tqdm
from skimage import io
def crop_image(img, path):
"""
Crop the image and the corresponding ground truth, and saves all sub images
img : string, image to crop
path : string, path to directory containing images.
At this path their should be two sub directories "images/" and "gt/"
"""
res_x = 500
res_y = 500
n_x = 5000 // res_x
n_y = 5000 // res_y
# Load image and ground truth
image_path = path + "images/" + img
im = io.imread(image_path)
gt_path = path + "gt/" + img
im_gt = io.imread(gt_path)
# Crop
for x in range(n_x):
for y in range(n_y):
io.imsave(f"{path}/img_crop/{img.split('.')[0]}_{y}_{x}.png", im[y*res_y : (y+1)*res_y, x*res_x:(x+1)*res_x], check_contrast=False)
io.imsave(f"{path}/gt_crop/{img.split('.')[0]}_{y}_{x}.png", im_gt[y*res_y : (y+1)*res_y, x*res_x:(x+1)*res_x], check_contrast=False)
return f"Done cropping image {img}."
def multiprocess_crop(path):
"""
Multiprocessing function that allows to crop all images in a parallel way
path : string, path to directory containing images.
At this path their should be two sub directories "images/" and "gt/"
"""
PROCESSES = 8
print('Creating pool with %d processes' % PROCESSES)
with multiprocessing.Pool(PROCESSES) as pool:
# Find path for images to crop and check that they all have ground truth
dict_dataset = {x: os.listdir(path + x) for x in ["images/", "gt/"]}
assert dict_dataset["gt/"] == dict_dataset["images/"], "There are some images without ground truth or ground truth without images in the dataset."
# Create folder for croped images
if not os.path.isdir(path + "img_crop/"):
os.makedirs(path + "img_crop/")
if not os.path.isdir(path + "gt_crop/"):
os.makedirs(path + "gt_crop/")
# Run Cropping
TASKS = [[img, path] for img in dict_dataset["gt/"]]
print(f"There are {len(TASKS)} images to crop.")
results = [pool.apply_async(crop_image, t) for t in TASKS]
for r in results:
print('\t', r.get())
print()
def split_train_test(path):
"""
Split the cropped dataset into a training and a test set
path : string, path to directory containing cropped images.
At this path their should be two sub directories "img_crop/" and "gt_crop/"
"""
if not os.path.isdir(path + "img_crop_train/"):
os.makedirs(path + "img_crop_train/")
if not os.path.isdir(path + "img_crop_test/"):
os.makedirs(path + "img_crop_test/")
if not os.path.isdir(path + "gt_crop_train/"):
os.makedirs(path + "gt_crop_train/")
if not os.path.isdir(path + "gt_crop_test/"):
os.makedirs(path + "gt_crop_test/")
lo_files = os.listdir(path + 'img_crop/')
lo_names = ['austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna']
lo_idx = ['1', '2', '3', '4', '5']
all_test_files = [''.join(x) for x in itertools.product(lo_names, lo_idx)]
for file in tqdm(lo_files):
if file.split('_')[0].endswith(tuple(all_test_files)):
shutil.copyfile(path + 'gt_crop/' + file, path + 'gt_crop_test/' + file)
shutil.copyfile(path + 'img_crop/' + file, path + 'img_crop_test/' + file)
else:
shutil.copyfile(path + 'gt_crop/' + file, path + 'gt_crop_train/' + file)
shutil.copyfile(path + 'img_crop/' + file, path + 'img_crop_train/' + file)
if __name__ == '__main__':
# example of path : C:/Users/tangu/Documents/MVA/S2/1.SatelliteImage/Project/data/AerialImageDataset/train/
multiprocessing.freeze_support()
parser = argparse.ArgumentParser(description='Crop a dataset of images and ground truth.')
parser.add_argument('path', metavar='dataset_path', type=str,
help='path to the folder conatining the dataset')
parser.add_argument('--no_crop', action="store_true",
help='whether to crop the images in the dataset')
parser.add_argument('--no_split', action="store_true",
help='whether to split the dataset into train and test')
args = parser.parse_args()
if not args.no_crop:
print("---\nCropping Images\n---")
multiprocess_crop(args.path)
if not args.no_split:
print("---\nSplitting into train/test\n---")
split_train_test(args.path)
print("Done !") | scripts/preprocess_im_sat_data.py | import multiprocessing
import argparse
import os
import itertools
import shutil
from tqdm.auto import tqdm
from skimage import io
def crop_image(img, path):
"""
Crop the image and the corresponding ground truth, and saves all sub images
img : string, image to crop
path : string, path to directory containing images.
At this path their should be two sub directories "images/" and "gt/"
"""
res_x = 500
res_y = 500
n_x = 5000 // res_x
n_y = 5000 // res_y
# Load image and ground truth
image_path = path + "images/" + img
im = io.imread(image_path)
gt_path = path + "gt/" + img
im_gt = io.imread(gt_path)
# Crop
for x in range(n_x):
for y in range(n_y):
io.imsave(f"{path}/img_crop/{img.split('.')[0]}_{y}_{x}.png", im[y*res_y : (y+1)*res_y, x*res_x:(x+1)*res_x], check_contrast=False)
io.imsave(f"{path}/gt_crop/{img.split('.')[0]}_{y}_{x}.png", im_gt[y*res_y : (y+1)*res_y, x*res_x:(x+1)*res_x], check_contrast=False)
return f"Done cropping image {img}."
def multiprocess_crop(path):
"""
Multiprocessing function that allows to crop all images in a parallel way
path : string, path to directory containing images.
At this path their should be two sub directories "images/" and "gt/"
"""
PROCESSES = 8
print('Creating pool with %d processes' % PROCESSES)
with multiprocessing.Pool(PROCESSES) as pool:
# Find path for images to crop and check that they all have ground truth
dict_dataset = {x: os.listdir(path + x) for x in ["images/", "gt/"]}
assert dict_dataset["gt/"] == dict_dataset["images/"], "There are some images without ground truth or ground truth without images in the dataset."
# Create folder for croped images
if not os.path.isdir(path + "img_crop/"):
os.makedirs(path + "img_crop/")
if not os.path.isdir(path + "gt_crop/"):
os.makedirs(path + "gt_crop/")
# Run Cropping
TASKS = [[img, path] for img in dict_dataset["gt/"]]
print(f"There are {len(TASKS)} images to crop.")
results = [pool.apply_async(crop_image, t) for t in TASKS]
for r in results:
print('\t', r.get())
print()
def split_train_test(path):
"""
Split the cropped dataset into a training and a test set
path : string, path to directory containing cropped images.
At this path their should be two sub directories "img_crop/" and "gt_crop/"
"""
if not os.path.isdir(path + "img_crop_train/"):
os.makedirs(path + "img_crop_train/")
if not os.path.isdir(path + "img_crop_test/"):
os.makedirs(path + "img_crop_test/")
if not os.path.isdir(path + "gt_crop_train/"):
os.makedirs(path + "gt_crop_train/")
if not os.path.isdir(path + "gt_crop_test/"):
os.makedirs(path + "gt_crop_test/")
lo_files = os.listdir(path + 'img_crop/')
lo_names = ['austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna']
lo_idx = ['1', '2', '3', '4', '5']
all_test_files = [''.join(x) for x in itertools.product(lo_names, lo_idx)]
for file in tqdm(lo_files):
if file.split('_')[0].endswith(tuple(all_test_files)):
shutil.copyfile(path + 'gt_crop/' + file, path + 'gt_crop_test/' + file)
shutil.copyfile(path + 'img_crop/' + file, path + 'img_crop_test/' + file)
else:
shutil.copyfile(path + 'gt_crop/' + file, path + 'gt_crop_train/' + file)
shutil.copyfile(path + 'img_crop/' + file, path + 'img_crop_train/' + file)
if __name__ == '__main__':
# example of path : C:/Users/tangu/Documents/MVA/S2/1.SatelliteImage/Project/data/AerialImageDataset/train/
multiprocessing.freeze_support()
parser = argparse.ArgumentParser(description='Crop a dataset of images and ground truth.')
parser.add_argument('path', metavar='dataset_path', type=str,
help='path to the folder conatining the dataset')
parser.add_argument('--no_crop', action="store_true",
help='whether to crop the images in the dataset')
parser.add_argument('--no_split', action="store_true",
help='whether to split the dataset into train and test')
args = parser.parse_args()
if not args.no_crop:
print("---\nCropping Images\n---")
multiprocess_crop(args.path)
if not args.no_split:
print("---\nSplitting into train/test\n---")
split_train_test(args.path)
print("Done !") | 0.373419 | 0.329419 |
from ..map_resource.utility import Utility
from .. import tools
import pandas as pd
settings = {
'app_id': 'F8aPRXcW3MmyUvQ8Z3J9',
'app_code' : 'IVp1_zoGHdLdz0GvD_Eqsw',
'map_tile_base_url': 'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/',
'json_tile_base_url': 'https://traffic.cit.api.here.com/traffic/6.2/flow.json?'
}
util = Utility(settings)
def test_get_tile():
"""
The official example provided by HERE
https://developer.here.com/rest-apis/documentation/enterprise-map-tile/topics/key-concepts.html
"""
assert util.get_tile(52.525439, 13.38727, 12) == [2200, 1343]
def test_get_quadkeys():
"""
The official example provided by HERE
https://developer.here.com/rest-apis/documentation/traffic/common/map_tile/topics/quadkeys.html
"""
assert util.get_quadkeys(35210, 21493, 16) == "1202102332221212"
def test_get_map_tile_resource():
assert util.get_map_tile_resource((33.670156, -84.325984),"latlon", 14, 512) == \
'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/14/4354/6562/512/png8?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw'
assert util.get_map_tile_resource((4354, 6562),"colrow", 14, 512) == \
'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/14/4354/6562/512/png8?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw'
def test_get_traffic_json_resource():
assert util.get_traffic_json_resource((34.9237, -82.4383), "latlon", 14) == \
'https://traffic.cit.api.here.com/traffic/6.2/flow.json?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw&quadkey=03200303033202&responseattributes=sh,fc'
assert util.get_traffic_json_resource((4440, 6493), "colrow", 14) == \
'https://traffic.cit.api.here.com/traffic/6.2/flow.json?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw&quadkey=03200303033202&responseattributes=sh,fc'
def test_get_area_tile_matrix():
df1 = pd.DataFrame([[(4350, 6557),(4351, 6557),(4352, 6557)],
[(4350, 6558),(4351, 6558),(4352, 6558)],
[(4350, 6559),(4351, 6559),(4352, 6559)]])
df2 = pd.DataFrame([[(4350, 6558),(4351, 6558),(4352, 6558)],
[(4350, 6559),(4351, 6559),(4352, 6559)]])
df3 = pd.DataFrame([[(4351, 6557),(4352, 6557)],
[(4351, 6558),(4352, 6558)],
[(4351, 6559),(4352, 6559)]])
assert df1.equals(util.get_area_tile_matrix([(33.766764, -84.409533), (33.740003, -84.368978)], 14))
assert df2.equals(util.get_area_tile_matrix([(33.741455, -84.397218), (33.744203, -84.369581)], 14)) # asymmetrical case 1
assert df3.equals(util.get_area_tile_matrix([(33.728999, -84.395856), (33.775902, -84.363917)], 14)) # asymmetrical case 2
def test_get_area_tile_matrix_url():
df = tools.load_data_object("test_data/get_area_tile_matrix_url() for map_tile.pkl")
cor1 = (33.766764, -84.409533)
cor2 = (33.740003, -84.368978)
info = util.get_area_tile_matrix([cor1, cor2], 14)
matrix = util.get_area_tile_matrix_url("map_tile", [cor1, cor2], 14)
assert df.equals(matrix)
def test_get_distance():
assert util.get_distance((33.70524,-84.40353), (33.71337,-84.39347)) == 1297.72758534478
def test_read_geojson_polygon():
assert util.read_geojson_polygon('{ "type": "FeatureCollection", "features": [ { "type": "Feature", "geometry": { "type": "Polygon", "coordinates": [ [ [ -84.39285278320312, 33.76266589608855 ], [ -84.3738842010498, 33.770015152780125 ], [ -84.3610954284668, 33.7613101391079 ], [ -84.37019348144531, 33.74468253332004 ], [ -84.38830375671387, 33.751391054166746 ], [ -84.39705848693848, 33.758384485188 ], [ -84.39285278320312, 33.76266589608855 ] ] ] }, "properties": {} } ] }') == [[33.76266589608855,-84.39285278320312],[33.770015152780125,-84.3738842010498],[33.7613101391079,-84.3610954284668],[33.74468253332004,-84.37019348144531],[33.751391054166746,-84.38830375671387],[33.758384485188,-84.39705848693848],[33.76266589608855,-84.39285278320312]] | streettraffic/tests/test_utility.py | from ..map_resource.utility import Utility
from .. import tools
import pandas as pd
settings = {
'app_id': 'F8aPRXcW3MmyUvQ8Z3J9',
'app_code' : 'IVp1_zoGHdLdz0GvD_Eqsw',
'map_tile_base_url': 'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/',
'json_tile_base_url': 'https://traffic.cit.api.here.com/traffic/6.2/flow.json?'
}
util = Utility(settings)
def test_get_tile():
"""
The official example provided by HERE
https://developer.here.com/rest-apis/documentation/enterprise-map-tile/topics/key-concepts.html
"""
assert util.get_tile(52.525439, 13.38727, 12) == [2200, 1343]
def test_get_quadkeys():
"""
The official example provided by HERE
https://developer.here.com/rest-apis/documentation/traffic/common/map_tile/topics/quadkeys.html
"""
assert util.get_quadkeys(35210, 21493, 16) == "1202102332221212"
def test_get_map_tile_resource():
assert util.get_map_tile_resource((33.670156, -84.325984),"latlon", 14, 512) == \
'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/14/4354/6562/512/png8?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw'
assert util.get_map_tile_resource((4354, 6562),"colrow", 14, 512) == \
'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/14/4354/6562/512/png8?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw'
def test_get_traffic_json_resource():
assert util.get_traffic_json_resource((34.9237, -82.4383), "latlon", 14) == \
'https://traffic.cit.api.here.com/traffic/6.2/flow.json?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw&quadkey=03200303033202&responseattributes=sh,fc'
assert util.get_traffic_json_resource((4440, 6493), "colrow", 14) == \
'https://traffic.cit.api.here.com/traffic/6.2/flow.json?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw&quadkey=03200303033202&responseattributes=sh,fc'
def test_get_area_tile_matrix():
df1 = pd.DataFrame([[(4350, 6557),(4351, 6557),(4352, 6557)],
[(4350, 6558),(4351, 6558),(4352, 6558)],
[(4350, 6559),(4351, 6559),(4352, 6559)]])
df2 = pd.DataFrame([[(4350, 6558),(4351, 6558),(4352, 6558)],
[(4350, 6559),(4351, 6559),(4352, 6559)]])
df3 = pd.DataFrame([[(4351, 6557),(4352, 6557)],
[(4351, 6558),(4352, 6558)],
[(4351, 6559),(4352, 6559)]])
assert df1.equals(util.get_area_tile_matrix([(33.766764, -84.409533), (33.740003, -84.368978)], 14))
assert df2.equals(util.get_area_tile_matrix([(33.741455, -84.397218), (33.744203, -84.369581)], 14)) # asymmetrical case 1
assert df3.equals(util.get_area_tile_matrix([(33.728999, -84.395856), (33.775902, -84.363917)], 14)) # asymmetrical case 2
def test_get_area_tile_matrix_url():
df = tools.load_data_object("test_data/get_area_tile_matrix_url() for map_tile.pkl")
cor1 = (33.766764, -84.409533)
cor2 = (33.740003, -84.368978)
info = util.get_area_tile_matrix([cor1, cor2], 14)
matrix = util.get_area_tile_matrix_url("map_tile", [cor1, cor2], 14)
assert df.equals(matrix)
def test_get_distance():
assert util.get_distance((33.70524,-84.40353), (33.71337,-84.39347)) == 1297.72758534478
def test_read_geojson_polygon():
assert util.read_geojson_polygon('{ "type": "FeatureCollection", "features": [ { "type": "Feature", "geometry": { "type": "Polygon", "coordinates": [ [ [ -84.39285278320312, 33.76266589608855 ], [ -84.3738842010498, 33.770015152780125 ], [ -84.3610954284668, 33.7613101391079 ], [ -84.37019348144531, 33.74468253332004 ], [ -84.38830375671387, 33.751391054166746 ], [ -84.39705848693848, 33.758384485188 ], [ -84.39285278320312, 33.76266589608855 ] ] ] }, "properties": {} } ] }') == [[33.76266589608855,-84.39285278320312],[33.770015152780125,-84.3738842010498],[33.7613101391079,-84.3610954284668],[33.74468253332004,-84.37019348144531],[33.751391054166746,-84.38830375671387],[33.758384485188,-84.39705848693848],[33.76266589608855,-84.39285278320312]] | 0.502441 | 0.405979 |
from msg import ReqMsg
from cmds import Cmd
class Window(Cmd):
def __init__(self, session):
self._session = session
# Function: window_get_buffer
# Parameters Window: window
# Returns Buffer
# Recieves channel id False
# Can fail True
def get_buffer(self, window):
return self.send_sync(ReqMsg('window_get_buffer', *[window]))
# Function: window_get_cursor
# Parameters Window: window
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_cursor(self, window):
return self.send_sync(ReqMsg('window_get_cursor', *[window]))
# Function: window_set_cursor
# Parameters Window: window, ArrayOf(Integer, 2): pos
# Returns void
# Recieves channel id False
# Can fail True
def set_cursor(self, window, pos):
return self.send_sync(ReqMsg('window_set_cursor', *[window, pos]))
# Function: window_get_height
# Parameters Window: window
# Returns Integer
# Recieves channel id False
# Can fail True
def get_height(self, window):
return self.send_sync(ReqMsg('window_get_height', *[window]))
# Function: window_set_height
# Parameters Window: window, Integer: height
# Returns void
# Recieves channel id False
# Can fail True
def set_height(self, window, height):
return self.send_sync(ReqMsg('window_set_height', *[window, height]))
# Function: window_get_width
# Parameters Window: window
# Returns Integer
# Recieves channel id False
# Can fail True
def get_width(self, window):
return self.send_sync(ReqMsg('window_get_width', *[window]))
# Function: window_set_width
# Parameters Window: window, Integer: width
# Returns void
# Recieves channel id False
# Can fail True
def set_width(self, window, width):
return self.send_sync(ReqMsg('window_set_width', *[window, width]))
# Function: window_get_var
# Parameters Window: window, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, window, name):
return self.send_sync(ReqMsg('window_get_var', *[window, name]))
# Function: window_set_var
# Parameters Window: window, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, window, name, value):
return self.send_sync(ReqMsg('window_set_var', *[window, name, value]))
# Function: window_get_option
# Parameters Window: window, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, window, name):
return self.send_sync(ReqMsg('window_get_option', *[window, name]))
# Function: window_set_option
# Parameters Window: window, String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, window, name, value):
return self.send_sync(ReqMsg('window_set_option', *[window, name, value]))
# Function: window_get_position
# Parameters Window: window
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_position(self, window):
return self.send_sync(ReqMsg('window_get_position', *[window]))
# Function: window_get_tabpage
# Parameters Window: window
# Returns Tabpage
# Recieves channel id False
# Can fail True
def get_tabpage(self, window):
return self.send_sync(ReqMsg('window_get_tabpage', *[window]))
# Function: window_is_valid
# Parameters Window: window
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, window):
return self.send_sync(ReqMsg('window_is_valid', *[window]))
class Buffer(Cmd):
def __init__(self, session):
self._session = session
# Function: buffer_line_count
# Parameters Buffer: buffer
# Returns Integer
# Recieves channel id False
# Can fail True
def line_count(self, buffer):
return self.send_sync(ReqMsg('buffer_line_count', *[buffer]))
# Function: buffer_get_line
# Parameters Buffer: buffer, Integer: index
# Returns String
# Recieves channel id False
# Can fail True
def get_line(self, buffer, index):
return self.send_sync(ReqMsg('buffer_get_line', *[buffer, index]))
# Function: buffer_set_line
# Parameters Buffer: buffer, Integer: index, String: line
# Returns void
# Recieves channel id False
# Can fail True
def set_line(self, buffer, index, line):
return self.send_sync(ReqMsg('buffer_set_line', *[buffer, index, line]))
# Function: buffer_del_line
# Parameters Buffer: buffer, Integer: index
# Returns void
# Recieves channel id False
# Can fail True
def del_line(self, buffer, index):
return self.send_sync(ReqMsg('buffer_del_line', *[buffer, index]))
# Function: buffer_get_line_slice
# Parameters Buffer: buffer, Integer: start, Integer: end, Boolean: include_start, Boolean: include_end
# Returns ArrayOf(String)
# Recieves channel id False
# Can fail True
def get_line_slice(self, buffer, start, end, include_start, include_end):
return self.send_sync(ReqMsg('buffer_get_line_slice', *[buffer, start, end, include_start, include_end]))
# Function: buffer_set_line_slice
# Parameters Buffer: buffer, Integer: start, Integer: end, Boolean: include_start, Boolean: include_end, ArrayOf(String): replacement
# Returns void
# Recieves channel id False
# Can fail True
def set_line_slice(self, buffer, start, end, include_start, include_end, replacement):
return self.send_sync(ReqMsg('buffer_set_line_slice', *[buffer, start, end, include_start, include_end, replacement]))
# Function: buffer_get_var
# Parameters Buffer: buffer, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_var', *[buffer, name]))
# Function: buffer_set_var
# Parameters Buffer: buffer, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, buffer, name, value):
return self.send_sync(ReqMsg('buffer_set_var', *[buffer, name, value]))
# Function: buffer_get_option
# Parameters Buffer: buffer, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_option', *[buffer, name]))
# Function: buffer_set_option
# Parameters Buffer: buffer, String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, buffer, name, value):
return self.send_sync(ReqMsg('buffer_set_option', *[buffer, name, value]))
# Function: buffer_get_number
# Parameters Buffer: buffer
# Returns Integer
# Recieves channel id False
# Can fail True
def get_number(self, buffer):
return self.send_sync(ReqMsg('buffer_get_number', *[buffer]))
# Function: buffer_get_name
# Parameters Buffer: buffer
# Returns String
# Recieves channel id False
# Can fail True
def get_name(self, buffer):
return self.send_sync(ReqMsg('buffer_get_name', *[buffer]))
# Function: buffer_set_name
# Parameters Buffer: buffer, String: name
# Returns void
# Recieves channel id False
# Can fail True
def set_name(self, buffer, name):
return self.send_sync(ReqMsg('buffer_set_name', *[buffer, name]))
# Function: buffer_is_valid
# Parameters Buffer: buffer
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, buffer):
return self.send_sync(ReqMsg('buffer_is_valid', *[buffer]))
# Function: buffer_insert
# Parameters Buffer: buffer, Integer: lnum, ArrayOf(String): lines
# Returns void
# Recieves channel id False
# Can fail True
def insert(self, buffer, lnum, lines):
return self.send_sync(ReqMsg('buffer_insert', *[buffer, lnum, lines]))
# Function: buffer_get_mark
# Parameters Buffer: buffer, String: name
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_mark(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_mark', *[buffer, name]))
class Tabpage(Cmd):
def __init__(self, session):
self._session = session
# Function: tabpage_get_windows
# Parameters Tabpage: tabpage
# Returns ArrayOf(Window)
# Recieves channel id False
# Can fail True
def get_windows(self, tabpage):
return self.send_sync(ReqMsg('tabpage_get_windows', *[tabpage]))
# Function: tabpage_get_var
# Parameters Tabpage: tabpage, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, tabpage, name):
return self.send_sync(ReqMsg('tabpage_get_var', *[tabpage, name]))
# Function: tabpage_set_var
# Parameters Tabpage: tabpage, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, tabpage, name, value):
return self.send_sync(ReqMsg('tabpage_set_var', *[tabpage, name, value]))
# Function: tabpage_get_window
# Parameters Tabpage: tabpage
# Returns Window
# Recieves channel id False
# Can fail True
def get_window(self, tabpage):
return self.send_sync(ReqMsg('tabpage_get_window', *[tabpage]))
# Function: tabpage_is_valid
# Parameters Tabpage: tabpage
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, tabpage):
return self.send_sync(ReqMsg('tabpage_is_valid', *[tabpage]))
class Vim(Cmd):
def __init__(self, session):
self._session = session
# Function: vim_push_keys
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def push_keys(self, v_str):
return self.send_sync(ReqMsg('vim_push_keys', *[v_str]))
# Function: vim_command
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail True
def command(self, v_str):
return self.send_sync(ReqMsg('vim_command', *[v_str]))
# Function: vim_feedkeys
# Parameters String: keys, String: mode
# Returns void
# Recieves channel id False
# Can fail False
def feedkeys(self, keys, mode):
return self.send_sync(ReqMsg('vim_feedkeys', *[keys, mode]))
# Function: vim_replace_termcodes
# Parameters String: str, Boolean: from_part, Boolean: do_lt, Boolean: special
# Returns String
# Recieves channel id False
# Can fail False
def replace_termcodes(self, v_str, from_part, do_lt, special):
return self.send_sync(ReqMsg('vim_replace_termcodes', *[v_str, from_part, do_lt, special]))
# Function: vim_eval
# Parameters String: str
# Returns Object
# Recieves channel id False
# Can fail True
def eval(self, v_str):
return self.send_sync(ReqMsg('vim_eval', *[v_str]))
# Function: vim_strwidth
# Parameters String: str
# Returns Integer
# Recieves channel id False
# Can fail True
def strwidth(self, v_str):
return self.send_sync(ReqMsg('vim_strwidth', *[v_str]))
# Function: vim_list_runtime_paths
# Parameters
# Returns ArrayOf(String)
# Recieves channel id False
# Can fail False
def list_runtime_paths(self, ):
return self.send_sync(ReqMsg('vim_list_runtime_paths', *[]))
# Function: vim_change_directory
# Parameters String: dir
# Returns void
# Recieves channel id False
# Can fail True
def change_directory(self, v_dir):
return self.send_sync(ReqMsg('vim_change_directory', *[v_dir]))
# Function: vim_get_current_line
# Parameters
# Returns String
# Recieves channel id False
# Can fail True
def get_current_line(self, ):
return self.send_sync(ReqMsg('vim_get_current_line', *[]))
# Function: vim_set_current_line
# Parameters String: line
# Returns void
# Recieves channel id False
# Can fail True
def set_current_line(self, line):
return self.send_sync(ReqMsg('vim_set_current_line', *[line]))
# Function: vim_del_current_line
# Parameters
# Returns void
# Recieves channel id False
# Can fail True
def del_current_line(self, ):
return self.send_sync(ReqMsg('vim_del_current_line', *[]))
# Function: vim_get_var
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, name):
return self.send_sync(ReqMsg('vim_get_var', *[name]))
# Function: vim_set_var
# Parameters String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, name, value):
return self.send_sync(ReqMsg('vim_set_var', *[name, value]))
# Function: vim_get_vvar
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_vvar(self, name):
return self.send_sync(ReqMsg('vim_get_vvar', *[name]))
# Function: vim_get_option
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, name):
return self.send_sync(ReqMsg('vim_get_option', *[name]))
# Function: vim_set_option
# Parameters String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, name, value):
return self.send_sync(ReqMsg('vim_set_option', *[name, value]))
# Function: vim_out_write
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def out_write(self, v_str):
return self.send_sync(ReqMsg('vim_out_write', *[v_str]))
# Function: vim_err_write
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def err_write(self, v_str):
return self.send_sync(ReqMsg('vim_err_write', *[v_str]))
# Function: vim_report_error
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def report_error(self, v_str):
return self.send_sync(ReqMsg('vim_report_error', *[v_str]))
# Function: vim_get_buffers
# Parameters
# Returns ArrayOf(Buffer)
# Recieves channel id False
# Can fail False
def get_buffers(self, ):
return self.send_sync(ReqMsg('vim_get_buffers', *[]))
# Function: vim_get_current_buffer
# Parameters
# Returns Buffer
# Recieves channel id False
# Can fail False
def get_current_buffer(self, ):
return self.send_sync(ReqMsg('vim_get_current_buffer', *[]))
# Function: vim_set_current_buffer
# Parameters Buffer: buffer
# Returns void
# Recieves channel id False
# Can fail True
def set_current_buffer(self, buffer):
return self.send_sync(ReqMsg('vim_set_current_buffer', *[buffer]))
# Function: vim_get_windows
# Parameters
# Returns ArrayOf(Window)
# Recieves channel id False
# Can fail False
def get_windows(self, ):
return self.send_sync(ReqMsg('vim_get_windows', *[]))
# Function: vim_get_current_window
# Parameters
# Returns Window
# Recieves channel id False
# Can fail False
def get_current_window(self, ):
return self.send_sync(ReqMsg('vim_get_current_window', *[]))
# Function: vim_set_current_window
# Parameters Window: window
# Returns void
# Recieves channel id False
# Can fail True
def set_current_window(self, window):
return self.send_sync(ReqMsg('vim_set_current_window', *[window]))
# Function: vim_get_tabpages
# Parameters
# Returns ArrayOf(Tabpage)
# Recieves channel id False
# Can fail False
def get_tabpages(self, ):
return self.send_sync(ReqMsg('vim_get_tabpages', *[]))
# Function: vim_get_current_tabpage
# Parameters
# Returns Tabpage
# Recieves channel id False
# Can fail False
def get_current_tabpage(self, ):
return self.send_sync(ReqMsg('vim_get_current_tabpage', *[]))
# Function: vim_set_current_tabpage
# Parameters Tabpage: tabpage
# Returns void
# Recieves channel id False
# Can fail True
def set_current_tabpage(self, tabpage):
return self.send_sync(ReqMsg('vim_set_current_tabpage', *[tabpage]))
# Function: vim_subscribe
# Parameters String: event
# Returns void
# Recieves channel id True
# Can fail False
def subscribe(self, event):
return self.send_sync(ReqMsg('vim_subscribe', *[event]))
# Function: vim_unsubscribe
# Parameters String: event
# Returns void
# Recieves channel id True
# Can fail False
def unsubscribe(self, event):
return self.send_sync(ReqMsg('vim_unsubscribe', *[event]))
# Function: vim_register_provider
# Parameters String: feature
# Returns void
# Recieves channel id True
# Can fail True
def register_provider(self, feature):
return self.send_sync(ReqMsg('vim_register_provider', *[feature]))
# Function: vim_get_api_info
# Parameters
# Returns Array
# Recieves channel id True
# Can fail False
def get_api_info(self, ):
return self.send_sync(ReqMsg('vim_get_api_info', *[]))
function_classes = {
'window': Window,
'buffer': Buffer,
'tabpage': Tabpage,
'vim': Vim,
} | client/nvim_funcs.py | from msg import ReqMsg
from cmds import Cmd
class Window(Cmd):
def __init__(self, session):
self._session = session
# Function: window_get_buffer
# Parameters Window: window
# Returns Buffer
# Recieves channel id False
# Can fail True
def get_buffer(self, window):
return self.send_sync(ReqMsg('window_get_buffer', *[window]))
# Function: window_get_cursor
# Parameters Window: window
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_cursor(self, window):
return self.send_sync(ReqMsg('window_get_cursor', *[window]))
# Function: window_set_cursor
# Parameters Window: window, ArrayOf(Integer, 2): pos
# Returns void
# Recieves channel id False
# Can fail True
def set_cursor(self, window, pos):
return self.send_sync(ReqMsg('window_set_cursor', *[window, pos]))
# Function: window_get_height
# Parameters Window: window
# Returns Integer
# Recieves channel id False
# Can fail True
def get_height(self, window):
return self.send_sync(ReqMsg('window_get_height', *[window]))
# Function: window_set_height
# Parameters Window: window, Integer: height
# Returns void
# Recieves channel id False
# Can fail True
def set_height(self, window, height):
return self.send_sync(ReqMsg('window_set_height', *[window, height]))
# Function: window_get_width
# Parameters Window: window
# Returns Integer
# Recieves channel id False
# Can fail True
def get_width(self, window):
return self.send_sync(ReqMsg('window_get_width', *[window]))
# Function: window_set_width
# Parameters Window: window, Integer: width
# Returns void
# Recieves channel id False
# Can fail True
def set_width(self, window, width):
return self.send_sync(ReqMsg('window_set_width', *[window, width]))
# Function: window_get_var
# Parameters Window: window, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, window, name):
return self.send_sync(ReqMsg('window_get_var', *[window, name]))
# Function: window_set_var
# Parameters Window: window, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, window, name, value):
return self.send_sync(ReqMsg('window_set_var', *[window, name, value]))
# Function: window_get_option
# Parameters Window: window, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, window, name):
return self.send_sync(ReqMsg('window_get_option', *[window, name]))
# Function: window_set_option
# Parameters Window: window, String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, window, name, value):
return self.send_sync(ReqMsg('window_set_option', *[window, name, value]))
# Function: window_get_position
# Parameters Window: window
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_position(self, window):
return self.send_sync(ReqMsg('window_get_position', *[window]))
# Function: window_get_tabpage
# Parameters Window: window
# Returns Tabpage
# Recieves channel id False
# Can fail True
def get_tabpage(self, window):
return self.send_sync(ReqMsg('window_get_tabpage', *[window]))
# Function: window_is_valid
# Parameters Window: window
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, window):
return self.send_sync(ReqMsg('window_is_valid', *[window]))
class Buffer(Cmd):
def __init__(self, session):
self._session = session
# Function: buffer_line_count
# Parameters Buffer: buffer
# Returns Integer
# Recieves channel id False
# Can fail True
def line_count(self, buffer):
return self.send_sync(ReqMsg('buffer_line_count', *[buffer]))
# Function: buffer_get_line
# Parameters Buffer: buffer, Integer: index
# Returns String
# Recieves channel id False
# Can fail True
def get_line(self, buffer, index):
return self.send_sync(ReqMsg('buffer_get_line', *[buffer, index]))
# Function: buffer_set_line
# Parameters Buffer: buffer, Integer: index, String: line
# Returns void
# Recieves channel id False
# Can fail True
def set_line(self, buffer, index, line):
return self.send_sync(ReqMsg('buffer_set_line', *[buffer, index, line]))
# Function: buffer_del_line
# Parameters Buffer: buffer, Integer: index
# Returns void
# Recieves channel id False
# Can fail True
def del_line(self, buffer, index):
return self.send_sync(ReqMsg('buffer_del_line', *[buffer, index]))
# Function: buffer_get_line_slice
# Parameters Buffer: buffer, Integer: start, Integer: end, Boolean: include_start, Boolean: include_end
# Returns ArrayOf(String)
# Recieves channel id False
# Can fail True
def get_line_slice(self, buffer, start, end, include_start, include_end):
return self.send_sync(ReqMsg('buffer_get_line_slice', *[buffer, start, end, include_start, include_end]))
# Function: buffer_set_line_slice
# Parameters Buffer: buffer, Integer: start, Integer: end, Boolean: include_start, Boolean: include_end, ArrayOf(String): replacement
# Returns void
# Recieves channel id False
# Can fail True
def set_line_slice(self, buffer, start, end, include_start, include_end, replacement):
return self.send_sync(ReqMsg('buffer_set_line_slice', *[buffer, start, end, include_start, include_end, replacement]))
# Function: buffer_get_var
# Parameters Buffer: buffer, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_var', *[buffer, name]))
# Function: buffer_set_var
# Parameters Buffer: buffer, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, buffer, name, value):
return self.send_sync(ReqMsg('buffer_set_var', *[buffer, name, value]))
# Function: buffer_get_option
# Parameters Buffer: buffer, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_option', *[buffer, name]))
# Function: buffer_set_option
# Parameters Buffer: buffer, String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, buffer, name, value):
return self.send_sync(ReqMsg('buffer_set_option', *[buffer, name, value]))
# Function: buffer_get_number
# Parameters Buffer: buffer
# Returns Integer
# Recieves channel id False
# Can fail True
def get_number(self, buffer):
return self.send_sync(ReqMsg('buffer_get_number', *[buffer]))
# Function: buffer_get_name
# Parameters Buffer: buffer
# Returns String
# Recieves channel id False
# Can fail True
def get_name(self, buffer):
return self.send_sync(ReqMsg('buffer_get_name', *[buffer]))
# Function: buffer_set_name
# Parameters Buffer: buffer, String: name
# Returns void
# Recieves channel id False
# Can fail True
def set_name(self, buffer, name):
return self.send_sync(ReqMsg('buffer_set_name', *[buffer, name]))
# Function: buffer_is_valid
# Parameters Buffer: buffer
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, buffer):
return self.send_sync(ReqMsg('buffer_is_valid', *[buffer]))
# Function: buffer_insert
# Parameters Buffer: buffer, Integer: lnum, ArrayOf(String): lines
# Returns void
# Recieves channel id False
# Can fail True
def insert(self, buffer, lnum, lines):
return self.send_sync(ReqMsg('buffer_insert', *[buffer, lnum, lines]))
# Function: buffer_get_mark
# Parameters Buffer: buffer, String: name
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_mark(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_mark', *[buffer, name]))
class Tabpage(Cmd):
def __init__(self, session):
self._session = session
# Function: tabpage_get_windows
# Parameters Tabpage: tabpage
# Returns ArrayOf(Window)
# Recieves channel id False
# Can fail True
def get_windows(self, tabpage):
return self.send_sync(ReqMsg('tabpage_get_windows', *[tabpage]))
# Function: tabpage_get_var
# Parameters Tabpage: tabpage, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, tabpage, name):
return self.send_sync(ReqMsg('tabpage_get_var', *[tabpage, name]))
# Function: tabpage_set_var
# Parameters Tabpage: tabpage, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, tabpage, name, value):
return self.send_sync(ReqMsg('tabpage_set_var', *[tabpage, name, value]))
# Function: tabpage_get_window
# Parameters Tabpage: tabpage
# Returns Window
# Recieves channel id False
# Can fail True
def get_window(self, tabpage):
return self.send_sync(ReqMsg('tabpage_get_window', *[tabpage]))
# Function: tabpage_is_valid
# Parameters Tabpage: tabpage
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, tabpage):
return self.send_sync(ReqMsg('tabpage_is_valid', *[tabpage]))
class Vim(Cmd):
def __init__(self, session):
self._session = session
# Function: vim_push_keys
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def push_keys(self, v_str):
return self.send_sync(ReqMsg('vim_push_keys', *[v_str]))
# Function: vim_command
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail True
def command(self, v_str):
return self.send_sync(ReqMsg('vim_command', *[v_str]))
# Function: vim_feedkeys
# Parameters String: keys, String: mode
# Returns void
# Recieves channel id False
# Can fail False
def feedkeys(self, keys, mode):
return self.send_sync(ReqMsg('vim_feedkeys', *[keys, mode]))
# Function: vim_replace_termcodes
# Parameters String: str, Boolean: from_part, Boolean: do_lt, Boolean: special
# Returns String
# Recieves channel id False
# Can fail False
def replace_termcodes(self, v_str, from_part, do_lt, special):
return self.send_sync(ReqMsg('vim_replace_termcodes', *[v_str, from_part, do_lt, special]))
# Function: vim_eval
# Parameters String: str
# Returns Object
# Recieves channel id False
# Can fail True
def eval(self, v_str):
return self.send_sync(ReqMsg('vim_eval', *[v_str]))
# Function: vim_strwidth
# Parameters String: str
# Returns Integer
# Recieves channel id False
# Can fail True
def strwidth(self, v_str):
return self.send_sync(ReqMsg('vim_strwidth', *[v_str]))
# Function: vim_list_runtime_paths
# Parameters
# Returns ArrayOf(String)
# Recieves channel id False
# Can fail False
def list_runtime_paths(self, ):
return self.send_sync(ReqMsg('vim_list_runtime_paths', *[]))
# Function: vim_change_directory
# Parameters String: dir
# Returns void
# Recieves channel id False
# Can fail True
def change_directory(self, v_dir):
return self.send_sync(ReqMsg('vim_change_directory', *[v_dir]))
# Function: vim_get_current_line
# Parameters
# Returns String
# Recieves channel id False
# Can fail True
def get_current_line(self, ):
return self.send_sync(ReqMsg('vim_get_current_line', *[]))
# Function: vim_set_current_line
# Parameters String: line
# Returns void
# Recieves channel id False
# Can fail True
def set_current_line(self, line):
return self.send_sync(ReqMsg('vim_set_current_line', *[line]))
# Function: vim_del_current_line
# Parameters
# Returns void
# Recieves channel id False
# Can fail True
def del_current_line(self, ):
return self.send_sync(ReqMsg('vim_del_current_line', *[]))
# Function: vim_get_var
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, name):
return self.send_sync(ReqMsg('vim_get_var', *[name]))
# Function: vim_set_var
# Parameters String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, name, value):
return self.send_sync(ReqMsg('vim_set_var', *[name, value]))
# Function: vim_get_vvar
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_vvar(self, name):
return self.send_sync(ReqMsg('vim_get_vvar', *[name]))
# Function: vim_get_option
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, name):
return self.send_sync(ReqMsg('vim_get_option', *[name]))
# Function: vim_set_option
# Parameters String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, name, value):
return self.send_sync(ReqMsg('vim_set_option', *[name, value]))
# Function: vim_out_write
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def out_write(self, v_str):
return self.send_sync(ReqMsg('vim_out_write', *[v_str]))
# Function: vim_err_write
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def err_write(self, v_str):
return self.send_sync(ReqMsg('vim_err_write', *[v_str]))
# Function: vim_report_error
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def report_error(self, v_str):
return self.send_sync(ReqMsg('vim_report_error', *[v_str]))
# Function: vim_get_buffers
# Parameters
# Returns ArrayOf(Buffer)
# Recieves channel id False
# Can fail False
def get_buffers(self, ):
return self.send_sync(ReqMsg('vim_get_buffers', *[]))
# Function: vim_get_current_buffer
# Parameters
# Returns Buffer
# Recieves channel id False
# Can fail False
def get_current_buffer(self, ):
return self.send_sync(ReqMsg('vim_get_current_buffer', *[]))
# Function: vim_set_current_buffer
# Parameters Buffer: buffer
# Returns void
# Recieves channel id False
# Can fail True
def set_current_buffer(self, buffer):
return self.send_sync(ReqMsg('vim_set_current_buffer', *[buffer]))
# Function: vim_get_windows
# Parameters
# Returns ArrayOf(Window)
# Recieves channel id False
# Can fail False
def get_windows(self, ):
return self.send_sync(ReqMsg('vim_get_windows', *[]))
# Function: vim_get_current_window
# Parameters
# Returns Window
# Recieves channel id False
# Can fail False
def get_current_window(self, ):
return self.send_sync(ReqMsg('vim_get_current_window', *[]))
# Function: vim_set_current_window
# Parameters Window: window
# Returns void
# Recieves channel id False
# Can fail True
def set_current_window(self, window):
return self.send_sync(ReqMsg('vim_set_current_window', *[window]))
# Function: vim_get_tabpages
# Parameters
# Returns ArrayOf(Tabpage)
# Recieves channel id False
# Can fail False
def get_tabpages(self, ):
return self.send_sync(ReqMsg('vim_get_tabpages', *[]))
# Function: vim_get_current_tabpage
# Parameters
# Returns Tabpage
# Recieves channel id False
# Can fail False
def get_current_tabpage(self, ):
return self.send_sync(ReqMsg('vim_get_current_tabpage', *[]))
# Function: vim_set_current_tabpage
# Parameters Tabpage: tabpage
# Returns void
# Recieves channel id False
# Can fail True
def set_current_tabpage(self, tabpage):
return self.send_sync(ReqMsg('vim_set_current_tabpage', *[tabpage]))
# Function: vim_subscribe
# Parameters String: event
# Returns void
# Recieves channel id True
# Can fail False
def subscribe(self, event):
return self.send_sync(ReqMsg('vim_subscribe', *[event]))
# Function: vim_unsubscribe
# Parameters String: event
# Returns void
# Recieves channel id True
# Can fail False
def unsubscribe(self, event):
return self.send_sync(ReqMsg('vim_unsubscribe', *[event]))
# Function: vim_register_provider
# Parameters String: feature
# Returns void
# Recieves channel id True
# Can fail True
def register_provider(self, feature):
return self.send_sync(ReqMsg('vim_register_provider', *[feature]))
# Function: vim_get_api_info
# Parameters
# Returns Array
# Recieves channel id True
# Can fail False
def get_api_info(self, ):
return self.send_sync(ReqMsg('vim_get_api_info', *[]))
function_classes = {
'window': Window,
'buffer': Buffer,
'tabpage': Tabpage,
'vim': Vim,
} | 0.849222 | 0.086516 |
import logging
import time
import warnings
import numpy as np
import xgboost
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
from sklearn.base import clone
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.metrics import mean_squared_error
from explorer import Explorer
from util import RandomFloat
from util import RandomInt
warnings.filterwarnings("ignore")
plt3d = mplot3d
seed = 42
np.random.seed(seed)
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def mean_absolute_percentage_error(y_true, y_pred):
"""
compute mean absolute percentage error
:param y_true:
:param y_pred:
:return:
"""
y_true, y_pred = np.array(y_true), np.array(y_pred)
for i, y in enumerate(y_true):
if y == 0:
y_true[i] = 1
y_pred[i] = 1
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def plot1D(X, y, plot_observed_data=False, plot_predictions=False, model=None, n_test=500, ):
plt.figure(figsize=(20, 10))
if plot_observed_data:
plt.plot(X, y, 'kx')
if plot_predictions:
Xtest = np.linspace(-0.05, 1.05, n_test).reshape(-1, 1) # test inputs
# compute predictive mean and variance
mean, sd = model.predict(Xtest)
plt.plot(Xtest, mean, 'r', lw=2) # plot the mean
plt.fill_between(Xtest.flatten(), # plot the two-sigma uncertainty about the mean
(mean - 2.0 * sd),
(mean + 2.0 * sd),
color='C0', alpha=0.3)
plt.xlabel("feature values")
plt.ylabel("target values")
plt.xlim(-0.05, 1.05)
def plot2D(X, y, model, estimator, eval_func, input_domain):
fig = plt.figure(figsize=plt.figaspect(0.5))
n = 30
min_X, max_X = input_domain
x1 = np.outer(np.linspace(min_X, max_X, n), np.ones(n))
x2 = np.outer(np.linspace(min_X, max_X, n), np.ones(n)).T
z = eval_func({"x1": x1.flatten(), "x2": x2.flatten()}).reshape(n, n)
cmap = 'Spectral'
ax = fig.add_subplot(1, 5, 1, projection='3d')
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("true plot")
z = model.predict(np.array([x1, x2]).T.reshape(-1, 2))[0].reshape(n, n).T
ax = fig.add_subplot(1, 5, 2, projection='3d')
# ax.scatter(X.T[0], X.T[1], y, marker="x")
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("Bayesian + GP")
xgb_adv = clone(estimator)
xgb_adv.fit(X, y)
z = xgb_adv.predict(np.array([x1, x2]).T.reshape(-1, 2)).reshape(n, n).T
ax = fig.add_subplot(1, 5, 3, projection='3d')
# ax.scatter(X.T[0], X.T[1], y, marker="x")
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("Bayesian + XGB")
rand_X = np.random.uniform(min_X, max_X, X.shape)
rand_y = eval_func({"x%d" % i: _x for i, _x in enumerate(rand_X.T)})
gpr_rand = GaussianProcessRegressor(RBF(2), alpha=0.01)
gpr_rand.fit(rand_X, rand_y)
z = gpr_rand.predict(np.array([x1, x2]).T.reshape(-1, 2)).reshape(n, n).T
ax = fig.add_subplot(1, 5, 4, projection='3d')
# ax.scatter(rand_X.T[0], rand_X.T[1], rand_y, marker="x")
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("uniform random + GP")
xgb_rand = clone(estimator)
xgb_rand.fit(rand_X, rand_y)
z = xgb_rand.predict(np.array([x1, x2]).T.reshape(-1, 2)).reshape(n, n).T
ax = fig.add_subplot(1, 5, 5, projection='3d')
# ax.scatter(rand_X.T[0], rand_X.T[1], rand_y, marker="x")
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("uniform random + XGB")
def eval_accuracy(X, y, model, estimator, eval_func, input_domain):
n = 30
min_X, max_X, = input_domain
test_x1 = np.outer(np.linspace(min_X, max_X, n), np.ones(n)).flatten()
test_x2 = np.outer(np.linspace(min_X, max_X, n), np.ones(n)).T.flatten()
test_X = np.array([test_x1, test_x2]).T.reshape(-1, 2)
test_y = eval_func({"x1": test_x1, "x2": test_x2})
pred_gpr = model.predict(np.array([test_x1, test_x2]).T.reshape(-1, 2))[0]
print("Error using %d explored data with GPR MSE : %.4f ,MAPE : %.4f" % (
X.shape[0], mean_squared_error(test_y, pred_gpr),
mean_absolute_percentage_error(test_y, pred_gpr)))
model_best = clone(estimator)
model_best.fit(X, y)
pred_best = model_best.predict(test_X)
print("Error using %d explored data with XGB MSE : %.4f ,MAPE : %.4f" % (
X.shape[0], mean_squared_error(test_y, pred_best),
mean_absolute_percentage_error(test_y, pred_best)))
gpr_err = []
xgb_err = []
for i in range(5):
rand_X = np.random.uniform(min_X, max_X, X.shape)
rand_y = eval_func({"x%d" % i: _x for i, _x in enumerate(rand_X.T)})
model_rand = GaussianProcessRegressor(RBF(2), alpha=0.01)
model_rand.fit(rand_X, rand_y)
pred_rand = model_rand.predict(test_X)
gpr_err.append([mean_squared_error(test_y, pred_rand), mean_absolute_percentage_error(test_y, pred_rand)])
model_rand = xgboost.XGBRegressor()
model_rand.fit(rand_X, rand_y)
pred_rand = model_rand.predict(test_X)
xgb_err.append([mean_squared_error(test_y, pred_rand), mean_absolute_percentage_error(test_y, pred_rand)])
print("Error using %d uniform sampled data with GPR MSE : %.4f ,MAPE : %.4f" % (
X.shape[0], np.mean(gpr_err, axis=0)[0], np.mean(gpr_err, axis=0)[1]))
print("Error using %d uniform sampled data with XGB MSE : %.4f ,MAPE : %.4f" % (
X.shape[0], np.mean(xgb_err, axis=0)[0], np.mean(xgb_err, axis=0)[1]))
def test_eval(param_dict):
"""
input_domain = [-2, 2]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict])
return np.cos(X[0].T ** 2 + X[1].T ** 2)
def rastrigin_function(param_dict):
"""
input_domain = [-5.12, 5.12]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict]).T.reshape(-1, 2)
return 10 * len(param_dict) + np.sum(np.square(X) - 10 * np.cos(2 * np.pi * X), axis=1).flatten()
def rosenbrock_function(param_dict):
"""
input_domain = [-2, 2]
:param param_dict:
:return:
"""
a, b = 1, 100
X = np.array([param_dict[params] for params in param_dict]).reshape(2, -1)
return (a - X[0].flatten()) ** 2 + b * (X[1] - X[0] ** 2) ** 2 + np.random.normal(0, 0.2, len(X[0]))
def himmelblau_function(param_dict):
"""
input_domain = [-5, 5]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict]).reshape(2, -1)
return (X[0] ** 2 + X[1] + 11) ** 2 + (X[0] + X[1] ** 2 - 7) ** 2 + np.random.normal(0, 0.2, len(X[0]))
def styblinski_tang_function(param_dict):
"""
input_domain = [-4, 4]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict]).reshape(2, -1).T
return 0.5 * np.sum(X ** 4 - 16 * X ** 2 + 5 * X, axis=1) + np.random.normal(0, 0.2, X.shape[0])
def eggholder_function(param_dict):
"""
input_domain = [-212, 212]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict]).reshape(2, -1)
return -(X[1] + 47) * np.sin(np.sqrt(np.abs(X[0] / 2.0 + (X[1] + 47)))) - X[0] * np.sin(
np.sqrt(np.abs(X[0] - (X[1] + 47)))) + np.random.normal(0, 0.2, len(X[0]))
if __name__ == '__main__':
file_id = time.time()
step = 30
n = 200
eval_func = styblinski_tang_function
input_domain_1 = [10, 100]
input_domain_2 = [10, 100]
xgb = xgboost.XGBRegressor(verbosity=0)
init_df = None
explorer = Explorer(
{
'concurrency': RandomInt(input_domain_1[0], input_domain_1[1]),
'message': RandomInt(input_domain_2[0], input_domain_2[1]),
},
#path="data/out_%d.csv" % file_id
path = "data/out_1606153147.csv"
)
for i in range(0, n, step):
init_df = explorer.explore(step, eval_func, init_n=5)
X, y = init_df.iloc[:, :-1].values, init_df.iloc[:, -1].values
print('Learned points : ')
print(X)
print("Number of data points : %d" % X.shape[0])
#eval_accuracy(X, y, explorer.gpr, xgb, eval_func, input_domain)
#plot2D(X, y, explorer.gpr, xgb, eval_func, input_domain)
#plt.show() | examples.py | import logging
import time
import warnings
import numpy as np
import xgboost
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
from sklearn.base import clone
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.metrics import mean_squared_error
from explorer import Explorer
from util import RandomFloat
from util import RandomInt
warnings.filterwarnings("ignore")
plt3d = mplot3d
seed = 42
np.random.seed(seed)
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def mean_absolute_percentage_error(y_true, y_pred):
"""
compute mean absolute percentage error
:param y_true:
:param y_pred:
:return:
"""
y_true, y_pred = np.array(y_true), np.array(y_pred)
for i, y in enumerate(y_true):
if y == 0:
y_true[i] = 1
y_pred[i] = 1
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def plot1D(X, y, plot_observed_data=False, plot_predictions=False, model=None, n_test=500, ):
plt.figure(figsize=(20, 10))
if plot_observed_data:
plt.plot(X, y, 'kx')
if plot_predictions:
Xtest = np.linspace(-0.05, 1.05, n_test).reshape(-1, 1) # test inputs
# compute predictive mean and variance
mean, sd = model.predict(Xtest)
plt.plot(Xtest, mean, 'r', lw=2) # plot the mean
plt.fill_between(Xtest.flatten(), # plot the two-sigma uncertainty about the mean
(mean - 2.0 * sd),
(mean + 2.0 * sd),
color='C0', alpha=0.3)
plt.xlabel("feature values")
plt.ylabel("target values")
plt.xlim(-0.05, 1.05)
def plot2D(X, y, model, estimator, eval_func, input_domain):
fig = plt.figure(figsize=plt.figaspect(0.5))
n = 30
min_X, max_X = input_domain
x1 = np.outer(np.linspace(min_X, max_X, n), np.ones(n))
x2 = np.outer(np.linspace(min_X, max_X, n), np.ones(n)).T
z = eval_func({"x1": x1.flatten(), "x2": x2.flatten()}).reshape(n, n)
cmap = 'Spectral'
ax = fig.add_subplot(1, 5, 1, projection='3d')
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("true plot")
z = model.predict(np.array([x1, x2]).T.reshape(-1, 2))[0].reshape(n, n).T
ax = fig.add_subplot(1, 5, 2, projection='3d')
# ax.scatter(X.T[0], X.T[1], y, marker="x")
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("Bayesian + GP")
xgb_adv = clone(estimator)
xgb_adv.fit(X, y)
z = xgb_adv.predict(np.array([x1, x2]).T.reshape(-1, 2)).reshape(n, n).T
ax = fig.add_subplot(1, 5, 3, projection='3d')
# ax.scatter(X.T[0], X.T[1], y, marker="x")
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("Bayesian + XGB")
rand_X = np.random.uniform(min_X, max_X, X.shape)
rand_y = eval_func({"x%d" % i: _x for i, _x in enumerate(rand_X.T)})
gpr_rand = GaussianProcessRegressor(RBF(2), alpha=0.01)
gpr_rand.fit(rand_X, rand_y)
z = gpr_rand.predict(np.array([x1, x2]).T.reshape(-1, 2)).reshape(n, n).T
ax = fig.add_subplot(1, 5, 4, projection='3d')
# ax.scatter(rand_X.T[0], rand_X.T[1], rand_y, marker="x")
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("uniform random + GP")
xgb_rand = clone(estimator)
xgb_rand.fit(rand_X, rand_y)
z = xgb_rand.predict(np.array([x1, x2]).T.reshape(-1, 2)).reshape(n, n).T
ax = fig.add_subplot(1, 5, 5, projection='3d')
# ax.scatter(rand_X.T[0], rand_X.T[1], rand_y, marker="x")
ax.plot_surface(x1, x2, z, cmap=cmap, edgecolor='none')
ax.set_title("uniform random + XGB")
def eval_accuracy(X, y, model, estimator, eval_func, input_domain):
n = 30
min_X, max_X, = input_domain
test_x1 = np.outer(np.linspace(min_X, max_X, n), np.ones(n)).flatten()
test_x2 = np.outer(np.linspace(min_X, max_X, n), np.ones(n)).T.flatten()
test_X = np.array([test_x1, test_x2]).T.reshape(-1, 2)
test_y = eval_func({"x1": test_x1, "x2": test_x2})
pred_gpr = model.predict(np.array([test_x1, test_x2]).T.reshape(-1, 2))[0]
print("Error using %d explored data with GPR MSE : %.4f ,MAPE : %.4f" % (
X.shape[0], mean_squared_error(test_y, pred_gpr),
mean_absolute_percentage_error(test_y, pred_gpr)))
model_best = clone(estimator)
model_best.fit(X, y)
pred_best = model_best.predict(test_X)
print("Error using %d explored data with XGB MSE : %.4f ,MAPE : %.4f" % (
X.shape[0], mean_squared_error(test_y, pred_best),
mean_absolute_percentage_error(test_y, pred_best)))
gpr_err = []
xgb_err = []
for i in range(5):
rand_X = np.random.uniform(min_X, max_X, X.shape)
rand_y = eval_func({"x%d" % i: _x for i, _x in enumerate(rand_X.T)})
model_rand = GaussianProcessRegressor(RBF(2), alpha=0.01)
model_rand.fit(rand_X, rand_y)
pred_rand = model_rand.predict(test_X)
gpr_err.append([mean_squared_error(test_y, pred_rand), mean_absolute_percentage_error(test_y, pred_rand)])
model_rand = xgboost.XGBRegressor()
model_rand.fit(rand_X, rand_y)
pred_rand = model_rand.predict(test_X)
xgb_err.append([mean_squared_error(test_y, pred_rand), mean_absolute_percentage_error(test_y, pred_rand)])
print("Error using %d uniform sampled data with GPR MSE : %.4f ,MAPE : %.4f" % (
X.shape[0], np.mean(gpr_err, axis=0)[0], np.mean(gpr_err, axis=0)[1]))
print("Error using %d uniform sampled data with XGB MSE : %.4f ,MAPE : %.4f" % (
X.shape[0], np.mean(xgb_err, axis=0)[0], np.mean(xgb_err, axis=0)[1]))
def test_eval(param_dict):
"""
input_domain = [-2, 2]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict])
return np.cos(X[0].T ** 2 + X[1].T ** 2)
def rastrigin_function(param_dict):
"""
input_domain = [-5.12, 5.12]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict]).T.reshape(-1, 2)
return 10 * len(param_dict) + np.sum(np.square(X) - 10 * np.cos(2 * np.pi * X), axis=1).flatten()
def rosenbrock_function(param_dict):
"""
input_domain = [-2, 2]
:param param_dict:
:return:
"""
a, b = 1, 100
X = np.array([param_dict[params] for params in param_dict]).reshape(2, -1)
return (a - X[0].flatten()) ** 2 + b * (X[1] - X[0] ** 2) ** 2 + np.random.normal(0, 0.2, len(X[0]))
def himmelblau_function(param_dict):
"""
input_domain = [-5, 5]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict]).reshape(2, -1)
return (X[0] ** 2 + X[1] + 11) ** 2 + (X[0] + X[1] ** 2 - 7) ** 2 + np.random.normal(0, 0.2, len(X[0]))
def styblinski_tang_function(param_dict):
"""
input_domain = [-4, 4]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict]).reshape(2, -1).T
return 0.5 * np.sum(X ** 4 - 16 * X ** 2 + 5 * X, axis=1) + np.random.normal(0, 0.2, X.shape[0])
def eggholder_function(param_dict):
"""
input_domain = [-212, 212]
:param param_dict:
:return:
"""
X = np.array([param_dict[params] for params in param_dict]).reshape(2, -1)
return -(X[1] + 47) * np.sin(np.sqrt(np.abs(X[0] / 2.0 + (X[1] + 47)))) - X[0] * np.sin(
np.sqrt(np.abs(X[0] - (X[1] + 47)))) + np.random.normal(0, 0.2, len(X[0]))
if __name__ == '__main__':
file_id = time.time()
step = 30
n = 200
eval_func = styblinski_tang_function
input_domain_1 = [10, 100]
input_domain_2 = [10, 100]
xgb = xgboost.XGBRegressor(verbosity=0)
init_df = None
explorer = Explorer(
{
'concurrency': RandomInt(input_domain_1[0], input_domain_1[1]),
'message': RandomInt(input_domain_2[0], input_domain_2[1]),
},
#path="data/out_%d.csv" % file_id
path = "data/out_1606153147.csv"
)
for i in range(0, n, step):
init_df = explorer.explore(step, eval_func, init_n=5)
X, y = init_df.iloc[:, :-1].values, init_df.iloc[:, -1].values
print('Learned points : ')
print(X)
print("Number of data points : %d" % X.shape[0])
#eval_accuracy(X, y, explorer.gpr, xgb, eval_func, input_domain)
#plot2D(X, y, explorer.gpr, xgb, eval_func, input_domain)
#plt.show() | 0.749179 | 0.563108 |
from pygmol.abc import Chemistry, PlasmaParameters, Equations
class DefaultChemistry(Chemistry):
"""Default concrete Chemistry subclass for testing."""
# species: e-, Ar, Ar+
species_ids = ["Ar", "Ar+"]
species_charges = [0, 1]
species_masses = [39.948, 39.948]
species_lj_sigma_coefficients = [0.542, 3.542]
species_surface_sticking_coefficients = [0.0, 1.0]
species_surface_return_matrix = [0, 1], [0, 0]
# reactions:
# 3: e- + Ar -> Ar + e-
# 7: e- + Ar -> Ar+ + e- + e-
# 48: e- + Ar+ -> Ar+ + e-
reactions_ids = [3, 7, 48]
reactions_strings = [
"e- + Ar -> Ar + e-",
"e- + Ar -> Ar+ + e- + e-",
"e- + Ar+ -> Ar+ + e-",
]
reactions_arrh_a = [2.66e-07, 3.09e-08, 1.61e-04]
reactions_arrh_b = [-1.28e-02, 4.46e-01, -1.22e00]
reactions_arrh_c = [3.15e00, 1.70e01, 3.82e-02]
reactions_el_energy_losses = [0.0, 15.875, 0.0]
reactions_elastic_flags = [True, False, True]
reactions_electron_stoich_lhs = [1, 1, 1]
reactions_electron_stoich_rhs = [1, 2, 1]
reactions_arbitrary_stoich_lhs = [0, 0, 0]
reactions_arbitrary_stoich_rhs = [0, 0, 0]
reactions_species_stoichiomatrix_lhs = [
[1, 0],
[1, 0],
[0, 1],
]
reactions_species_stoichiomatrix_rhs = [
[1, 0],
[0, 1],
[0, 1],
]
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
super().__init__()
class DefaultChemistryMinimal(Chemistry):
"""Default concrete Chemistry subclass for testing."""
# species: e-, Ar, Ar+
species_ids = ["Ar", "Ar+"]
species_surface_return_matrix = [0, 1], [0, 0]
# reactions:
# 3: e- + Ar -> Ar + e-
# 7: e- + Ar -> Ar+ + e- + e-
# 48: e- + Ar+ -> Ar+ + e-
reactions_ids = [3, 7, 48]
reactions_arrh_a = [2.66e-07, 3.09e-08, 1.61e-04]
reactions_arrh_b = [-1.28e-02, 4.46e-01, -1.22e00]
reactions_arrh_c = [3.15e00, 1.70e01, 3.82e-02]
reactions_el_energy_losses = [0.0, 15.875, 0.0]
reactions_elastic_flags = [True, False, True]
reactions_electron_stoich_lhs = [1, 1, 1]
reactions_electron_stoich_rhs = [1, 2, 1]
reactions_arbitrary_stoich_lhs = [0, 0, 0]
reactions_arbitrary_stoich_rhs = [0, 0, 0]
reactions_species_stoichiomatrix_lhs = [
[1, 0],
[1, 0],
[0, 1],
]
reactions_species_stoichiomatrix_rhs = [
[1, 0],
[0, 1],
[0, 1],
]
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
super().__init__()
class DefaultParamsStat(PlasmaParameters):
"""Default concrete PlasmaParameters for testing with static power"""
radius = 1.0
length = 2.0
pressure = 3.0
power = 400.0
feeds = {"Ar": 5.0}
temp_e = 6.0
temp_n = 700.0
t_end = 8.0
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
super().__init__()
class DefaultParamsDyn(PlasmaParameters):
"""Default concrete PlasmaParameters for testing with dynamic power"""
radius = 1.0
length = 2.0
pressure = 3.0
power = [0.0, 400.0]
t_power = [0.0, 1.0]
feeds = {"Ar": 5.0}
temp_e = 6.0
temp_n = 700.0
t_end = 8.0
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
super().__init__()
class DefaultParamsMinimal(PlasmaParameters):
"""Default concrete PlasmaParameters for testing, implementing the
absolute minimum of the values
"""
radius, length, pressure, power = 1.0, 2.0, 2.0, 400.0
# noinspection PyAbstractClass
class MockEquations(Equations):
ode_system_rhs = None
final_solution_labels = None
def __init__(self, chemistry, plasma_params):
super().__init__(chemistry, plasma_params)
def get_final_solution_values(self, y):
pass
def get_y0_default(self, initial_densities=None):
pass | tests/resources.py | from pygmol.abc import Chemistry, PlasmaParameters, Equations
class DefaultChemistry(Chemistry):
"""Default concrete Chemistry subclass for testing."""
# species: e-, Ar, Ar+
species_ids = ["Ar", "Ar+"]
species_charges = [0, 1]
species_masses = [39.948, 39.948]
species_lj_sigma_coefficients = [0.542, 3.542]
species_surface_sticking_coefficients = [0.0, 1.0]
species_surface_return_matrix = [0, 1], [0, 0]
# reactions:
# 3: e- + Ar -> Ar + e-
# 7: e- + Ar -> Ar+ + e- + e-
# 48: e- + Ar+ -> Ar+ + e-
reactions_ids = [3, 7, 48]
reactions_strings = [
"e- + Ar -> Ar + e-",
"e- + Ar -> Ar+ + e- + e-",
"e- + Ar+ -> Ar+ + e-",
]
reactions_arrh_a = [2.66e-07, 3.09e-08, 1.61e-04]
reactions_arrh_b = [-1.28e-02, 4.46e-01, -1.22e00]
reactions_arrh_c = [3.15e00, 1.70e01, 3.82e-02]
reactions_el_energy_losses = [0.0, 15.875, 0.0]
reactions_elastic_flags = [True, False, True]
reactions_electron_stoich_lhs = [1, 1, 1]
reactions_electron_stoich_rhs = [1, 2, 1]
reactions_arbitrary_stoich_lhs = [0, 0, 0]
reactions_arbitrary_stoich_rhs = [0, 0, 0]
reactions_species_stoichiomatrix_lhs = [
[1, 0],
[1, 0],
[0, 1],
]
reactions_species_stoichiomatrix_rhs = [
[1, 0],
[0, 1],
[0, 1],
]
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
super().__init__()
class DefaultChemistryMinimal(Chemistry):
"""Default concrete Chemistry subclass for testing."""
# species: e-, Ar, Ar+
species_ids = ["Ar", "Ar+"]
species_surface_return_matrix = [0, 1], [0, 0]
# reactions:
# 3: e- + Ar -> Ar + e-
# 7: e- + Ar -> Ar+ + e- + e-
# 48: e- + Ar+ -> Ar+ + e-
reactions_ids = [3, 7, 48]
reactions_arrh_a = [2.66e-07, 3.09e-08, 1.61e-04]
reactions_arrh_b = [-1.28e-02, 4.46e-01, -1.22e00]
reactions_arrh_c = [3.15e00, 1.70e01, 3.82e-02]
reactions_el_energy_losses = [0.0, 15.875, 0.0]
reactions_elastic_flags = [True, False, True]
reactions_electron_stoich_lhs = [1, 1, 1]
reactions_electron_stoich_rhs = [1, 2, 1]
reactions_arbitrary_stoich_lhs = [0, 0, 0]
reactions_arbitrary_stoich_rhs = [0, 0, 0]
reactions_species_stoichiomatrix_lhs = [
[1, 0],
[1, 0],
[0, 1],
]
reactions_species_stoichiomatrix_rhs = [
[1, 0],
[0, 1],
[0, 1],
]
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
super().__init__()
class DefaultParamsStat(PlasmaParameters):
"""Default concrete PlasmaParameters for testing with static power"""
radius = 1.0
length = 2.0
pressure = 3.0
power = 400.0
feeds = {"Ar": 5.0}
temp_e = 6.0
temp_n = 700.0
t_end = 8.0
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
super().__init__()
class DefaultParamsDyn(PlasmaParameters):
"""Default concrete PlasmaParameters for testing with dynamic power"""
radius = 1.0
length = 2.0
pressure = 3.0
power = [0.0, 400.0]
t_power = [0.0, 1.0]
feeds = {"Ar": 5.0}
temp_e = 6.0
temp_n = 700.0
t_end = 8.0
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
super().__init__()
class DefaultParamsMinimal(PlasmaParameters):
"""Default concrete PlasmaParameters for testing, implementing the
absolute minimum of the values
"""
radius, length, pressure, power = 1.0, 2.0, 2.0, 400.0
# noinspection PyAbstractClass
class MockEquations(Equations):
ode_system_rhs = None
final_solution_labels = None
def __init__(self, chemistry, plasma_params):
super().__init__(chemistry, plasma_params)
def get_final_solution_values(self, y):
pass
def get_y0_default(self, initial_densities=None):
pass | 0.834036 | 0.669502 |
import argparse
import os
import xml.etree.ElementTree as ET
from pathlib import Path
# Directory for product data
def transform_name(product_name):
# IMPLEMENT
return product_name
directory = r'/workspace/datasets/product_data/products/'
parser = argparse.ArgumentParser(description='Process some integers.')
general = parser.add_argument_group("general")
general.add_argument("--input", default=directory, help="The directory containing product data")
general.add_argument("--output", default="/workspace/datasets/fasttext/output.fasttext", help="the file to output to")
# Consuming all of the product data will take over an hour! But we still want to be able to obtain a representative sample.
general.add_argument("--sample_rate", default=0.05, type=float, help="The rate at which to sample input (default is 0.05)")
# Setting max_input is useful for quick iterations that don't require consuming all of the input or having a representative sample.
general.add_argument("--max_input", default=0, type=int, help="The maximum number of rows to process (0 means no maximum)")
# Setting min_product_names removes infrequent categories and makes the classifier's task easier.
general.add_argument("--min_product_names", default=5, type=int, help="The minimum number of products per category.")
# Setting max_product_names makes the category distribution more balanced.
general.add_argument("--max_product_names", default=50, type=int, help="The maximum number of products per category.")
args = parser.parse_args()
output_file = args.output
path = Path(output_file)
output_dir = path.parent
if os.path.isdir(output_dir) == False:
os.mkdir(output_dir)
if args.input:
directory = args.input
# IMPLEMENT: Track the number of items in each category and only output if above the min and below the max
min_product_names = args.min_product_names
max_product_names = args.max_product_names
sample_rate = args.sample_rate
total_input = 0
print("Writing results to %s" % output_file)
with open(output_file, 'w') as output:
for filename in os.listdir(directory):
# Terminate early if max_input is specified and reached.
if max_input > 0 and total_input == max_input:
break
if filename.endswith(".xml"):
print("Processing %s" % filename)
f = os.path.join(directory, filename)
tree = ET.parse(f)
root = tree.getroot()
for child in root:
if random.random() > sample_rate:
continue
# Check to make sure category name is valid
if (child.find('name') is not None and child.find('name').text is not None and
child.find('categoryPath') is not None and len(child.find('categoryPath')) > 0 and
child.find('categoryPath')[len(child.find('categoryPath')) - 1][0].text is not None):
# Choose last element in categoryPath as the leaf categoryId
cat = child.find('categoryPath')[len(child.find('categoryPath')) - 1][0].text
# Replace newline chars with spaces so fastText doesn't complain
name = child.find('name').text.replace('\n', ' ')
output.write("__label__%s %s\n" % (cat, transform_name(name)))
total_input = total_input + 1
# Terminate early if max_input is specified and reached.
if total_input == max_input:
break | week3/createContentTrainingData.py | import argparse
import os
import xml.etree.ElementTree as ET
from pathlib import Path
# Directory for product data
def transform_name(product_name):
# IMPLEMENT
return product_name
directory = r'/workspace/datasets/product_data/products/'
parser = argparse.ArgumentParser(description='Process some integers.')
general = parser.add_argument_group("general")
general.add_argument("--input", default=directory, help="The directory containing product data")
general.add_argument("--output", default="/workspace/datasets/fasttext/output.fasttext", help="the file to output to")
# Consuming all of the product data will take over an hour! But we still want to be able to obtain a representative sample.
general.add_argument("--sample_rate", default=0.05, type=float, help="The rate at which to sample input (default is 0.05)")
# Setting max_input is useful for quick iterations that don't require consuming all of the input or having a representative sample.
general.add_argument("--max_input", default=0, type=int, help="The maximum number of rows to process (0 means no maximum)")
# Setting min_product_names removes infrequent categories and makes the classifier's task easier.
general.add_argument("--min_product_names", default=5, type=int, help="The minimum number of products per category.")
# Setting max_product_names makes the category distribution more balanced.
general.add_argument("--max_product_names", default=50, type=int, help="The maximum number of products per category.")
args = parser.parse_args()
output_file = args.output
path = Path(output_file)
output_dir = path.parent
if os.path.isdir(output_dir) == False:
os.mkdir(output_dir)
if args.input:
directory = args.input
# IMPLEMENT: Track the number of items in each category and only output if above the min and below the max
min_product_names = args.min_product_names
max_product_names = args.max_product_names
sample_rate = args.sample_rate
total_input = 0
print("Writing results to %s" % output_file)
with open(output_file, 'w') as output:
for filename in os.listdir(directory):
# Terminate early if max_input is specified and reached.
if max_input > 0 and total_input == max_input:
break
if filename.endswith(".xml"):
print("Processing %s" % filename)
f = os.path.join(directory, filename)
tree = ET.parse(f)
root = tree.getroot()
for child in root:
if random.random() > sample_rate:
continue
# Check to make sure category name is valid
if (child.find('name') is not None and child.find('name').text is not None and
child.find('categoryPath') is not None and len(child.find('categoryPath')) > 0 and
child.find('categoryPath')[len(child.find('categoryPath')) - 1][0].text is not None):
# Choose last element in categoryPath as the leaf categoryId
cat = child.find('categoryPath')[len(child.find('categoryPath')) - 1][0].text
# Replace newline chars with spaces so fastText doesn't complain
name = child.find('name').text.replace('\n', ' ')
output.write("__label__%s %s\n" % (cat, transform_name(name)))
total_input = total_input + 1
# Terminate early if max_input is specified and reached.
if total_input == max_input:
break | 0.451327 | 0.157137 |
import numpy as np
from skimage import io, transform
def get_data(img_root, x_file, y_file):
"""
Retrieves the image data and labels from the directory
and files as given by the associated parameters. Note that
images retrieved are resized to be 300 x 300 pixels.
:param img_root: The directory where the images are located.
:param x_file: The name of the file with the names of the images.
:param y_file: The name of the file with the labels for the images.
:returns: A numpy array with the image data and a numpy array
with the labels.
"""
x_s = np.loadtxt(x_file, dtype=str)
y_s = np.loadtxt(y_file)
tempx = []
tempy = []
for i, xname in enumerate(x_s, 0):
img_name = img_root + xname
image = transform.resize(io.imread(img_name), (300, 300))
tempx.append(image)
tempy.append(y_s[i])
return np.array(tempx), np.array(tempy)
def get_test_data():
"""
Retrieves the test image data and labels. Note that images retrieved
are resized to be 300 x 300 pixels.
:returns: A numpy array with the test image data and a numpy array
with the test labels.
"""
return get_data('testset/test/', 'testset/test-x', 'testset/test-y')
def get_train_data():
"""
Retrieves the training image data and labels. Note that images
retrieved are resized to be 300 x 300 pixels.
:returns: A numpy array with the training image data and a numpy
array with the training labels.
"""
return get_data('dataset/train', 'dataset/train-x', 'dataset/train-y')
def print_baseline(labels):
"""
Prints the accuracy if we just classified all images one label for 0 and 1
:param labels: The actual classification labels for the data set as a
numpy array.
"""
numzeros = 0.0
numones = 0.0
for i in labels:
if i == 0.0:
numzeros += 1.0
if i == 1.0:
numones += 1.0
print('Guess Zero: ', str(numzeros / float(len(labels))))
print('Guess One: ', str(numones / float(len(labels)))) | utils.py | import numpy as np
from skimage import io, transform
def get_data(img_root, x_file, y_file):
"""
Retrieves the image data and labels from the directory
and files as given by the associated parameters. Note that
images retrieved are resized to be 300 x 300 pixels.
:param img_root: The directory where the images are located.
:param x_file: The name of the file with the names of the images.
:param y_file: The name of the file with the labels for the images.
:returns: A numpy array with the image data and a numpy array
with the labels.
"""
x_s = np.loadtxt(x_file, dtype=str)
y_s = np.loadtxt(y_file)
tempx = []
tempy = []
for i, xname in enumerate(x_s, 0):
img_name = img_root + xname
image = transform.resize(io.imread(img_name), (300, 300))
tempx.append(image)
tempy.append(y_s[i])
return np.array(tempx), np.array(tempy)
def get_test_data():
"""
Retrieves the test image data and labels. Note that images retrieved
are resized to be 300 x 300 pixels.
:returns: A numpy array with the test image data and a numpy array
with the test labels.
"""
return get_data('testset/test/', 'testset/test-x', 'testset/test-y')
def get_train_data():
"""
Retrieves the training image data and labels. Note that images
retrieved are resized to be 300 x 300 pixels.
:returns: A numpy array with the training image data and a numpy
array with the training labels.
"""
return get_data('dataset/train', 'dataset/train-x', 'dataset/train-y')
def print_baseline(labels):
"""
Prints the accuracy if we just classified all images one label for 0 and 1
:param labels: The actual classification labels for the data set as a
numpy array.
"""
numzeros = 0.0
numones = 0.0
for i in labels:
if i == 0.0:
numzeros += 1.0
if i == 1.0:
numones += 1.0
print('Guess Zero: ', str(numzeros / float(len(labels))))
print('Guess One: ', str(numones / float(len(labels)))) | 0.823719 | 0.765155 |