index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
30,672
|
ik6cgsg/min-disk-check
|
refs/heads/main
|
/main.py
|
import sys
from min_disk_checker import *
from point import *
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: /path/to/python3.x main.py <data_file>\n"
"data_file format:\n"
"\tFirst line - '[int] [int] [int]\\n' - indices of disk edge points from list below\n"
"\tFurther - '<int> <int>\\n' - 2d points coordinates")
exit()
try:
file = open(sys.argv[1])
coordStr = file.readline()
coords = [int(x) for x in coordStr.split()]
points = []
for line in file:
point = Point.create_from_str(line)
points.append(point)
file.close()
mdc = MinDiskChecker()
print("Is disk minimal? ", mdc.is_disk_minimal(coords, points))
except IOError:
print("ERROR: File not accessible")
except ValueError:
print("ERROR: Data must be integer")
except PointException as pe:
print("Point ERROR: " + str(pe))
except MinDiskCheckerException as me:
print("MinDiskChecker ERROR: " + str(me))
|
{"/main.py": ["/min_disk_checker.py", "/point.py"], "/min_disk_checker.py": ["/point.py"], "/test.py": ["/min_disk_checker.py", "/point.py"]}
|
30,673
|
ik6cgsg/min-disk-check
|
refs/heads/main
|
/point.py
|
class PointException(Exception):
pass
class Point(object):
def __init__(self, x: int, y: int):
self.x = x
self.y = y
@staticmethod
def create_from_str(string: str):
if not string:
raise PointException("Wrong string format")
try:
coords = [int(x) for x in string.split()]
if len(coords) != 2:
raise PointException("Wrong number of coordinates")
return Point(coords[0], coords[1])
except ValueError:
raise PointException("Coordinates are not int")
def len(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __mul__(self, num: int):
return Point(self.x * num, self.y * num)
def __str__(self):
return "Point(%s, %s)" % (self.x, self.y)
def __repr__(self):
return self.__str__()
|
{"/main.py": ["/min_disk_checker.py", "/point.py"], "/min_disk_checker.py": ["/point.py"], "/test.py": ["/min_disk_checker.py", "/point.py"]}
|
30,674
|
ik6cgsg/min-disk-check
|
refs/heads/main
|
/min_disk_checker.py
|
import numpy
from point import *
class MinDiskCheckerException(Exception):
pass
def det3x3(a) -> int:
return a[0][0] * (a[1][1] * a[2][2] - a[2][1] * a[1][2]) - \
a[1][0] * (a[0][1] * a[2][2] - a[2][1] * a[0][2]) + \
a[2][0] * (a[0][1] * a[1][2] - a[1][1] * a[0][2])
def det4x4(a) -> int:
return a[0][0] * det3x3([
[a[1][1], a[1][2], a[1][3]],
[a[2][1], a[2][2], a[2][3]],
[a[3][1], a[3][2], a[3][3]]
]) - a[0][1] * det3x3([
[a[1][0], a[1][2], a[1][3]],
[a[2][0], a[2][2], a[2][3]],
[a[3][0], a[3][2], a[3][3]]
]) + a[0][2] * det3x3([
[a[1][0], a[1][1], a[1][3]],
[a[2][0], a[2][1], a[2][3]],
[a[3][0], a[3][1], a[3][3]]
]) - a[0][3] * det3x3([
[a[1][0], a[1][1], a[1][2]],
[a[2][0], a[2][1], a[2][2]],
[a[3][0], a[3][1], a[3][2]]
])
class MinDiskChecker(object):
def __init__(self):
self.coords: [int] = []
self.points: [Point] = []
self.edge_points: [Point] = []
self.sign = None
self.dets3x3 = None
def _inside_for_2_points(self, p: Point) -> bool:
p0 = self.edge_points[0]
p1 = self.edge_points[1]
return (p.x - p0.x) * (p.x - p1.x) + (p.y - p0.y) * (p.y - p1.y) <= 0
# Check middle point of p0p1 side
def _get_sign_for_3_points(self, p0: Point, p1: Point, p2: Point):
p = p1 + p0 # doubled middle of p0p1 side
matrix = [
[p.x ** 2 + p.y ** 2, p.x, p.y, 2],
[4 * p0.x ** 2 + 4 * p0.y ** 2, 2 * p0.x, 2 * p0.y, 2],
[4 * p1.x ** 2 + 4 * p1.y ** 2, 2 * p1.x, 2 * p1.y, 2],
[4 * p2.x ** 2 + 4 * p2.y ** 2, 2 * p2.x, 2 * p2.y, 2],
] # doubled matrix respectively
if det4x4(matrix) < 0:
self.sign = 1
else:
self.sign = -1
def _get_dets_for_3_points(self, p0: Point, p1: Point, p2: Point):
p0xy2 = p0.x ** 2 + p0.y ** 2
p1xy2 = p1.x ** 2 + p1.y ** 2
p2xy2 = p2.x ** 2 + p2.y ** 2
self.dets3x3 = []
self.dets3x3.append(det3x3([
[p0.x, p0.y, 1],
[p1.x, p1.y, 1],
[p2.x, p2.y, 1]
]))
self.dets3x3.append(det3x3([
[p0xy2, p0.y, 1],
[p1xy2, p1.y, 1],
[p2xy2, p2.y, 1]
]))
self.dets3x3.append(det3x3([
[p0xy2, p0.x, 1],
[p1xy2, p1.x, 1],
[p2xy2, p2.x, 1]
]))
self.dets3x3.append(det3x3([
[p0xy2, p0.x, p0.y],
[p1xy2, p1.x, p1.y],
[p2xy2, p2.x, p2.y]
]))
def _inside_for_3_points(self, p: Point) -> bool:
det = (p.x ** 2 + p.y ** 2) * self.dets3x3[0] - p.x * self.dets3x3[1] + p.y * self.dets3x3[2] - self.dets3x3[3]
return self.sign * det <= 0
def _is_obtuse_triangle(self) -> bool:
p0 = self.edge_points[0]
p1 = self.edge_points[1]
p2 = self.edge_points[2]
side0sqr = (p0.x - p1.x) ** 2 + (p0.y - p1.y) ** 2
side1sqr = (p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2
side2sqr = (p2.x - p0.x) ** 2 + (p2.y - p0.y) ** 2
if (side0sqr + side1sqr > side2sqr) and (side1sqr + side2sqr > side0sqr) and (side2sqr + side0sqr > side1sqr):
return True
return False
def _all_points_inside(self) -> bool:
self.edge_points = [self.points[i] for i in self.coords]
inside = None
if len(self.coords) == 2:
inside = self._inside_for_2_points
elif len(self.coords) == 3:
inside = self._inside_for_3_points
self._get_sign_for_3_points(self.edge_points[0], self.edge_points[1], self.edge_points[2])
self._get_dets_for_3_points(self.edge_points[0], self.edge_points[1], self.edge_points[2])
else:
return False
for p in self.points:
if not inside(p):
return False
return True
def is_disk_minimal(self, coords: [int], points: [Point]) -> bool:
self.sign = None
self.dets3x3 = None
if len(coords) > len(points):
raise MinDiskCheckerException("Number of indices is larger then number of points")
if len(coords) > 3:
raise MinDiskCheckerException("Too many indices")
if len(coords) == 0:
if len(points) == 0:
return True
else:
return False
if len(coords) == 1:
if len(points) == 1:
return True
else:
return False
self.coords = coords
self.points = points
if not self._all_points_inside():
return False
if len(coords) == 2:
return True
if len(coords) == 3 and self._is_obtuse_triangle():
return True
return False
|
{"/main.py": ["/min_disk_checker.py", "/point.py"], "/min_disk_checker.py": ["/point.py"], "/test.py": ["/min_disk_checker.py", "/point.py"]}
|
30,675
|
ik6cgsg/min-disk-check
|
refs/heads/main
|
/test.py
|
import unittest
from min_disk_checker import *
from point import *
import warnings
class TestMinDiskChecker(unittest.TestCase):
def setUp(self) -> None:
self.mdc = MinDiskChecker()
warnings.filterwarnings("error")
def test_min_disk_0_points(self):
points = []
coords = []
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_0_coords(self):
points = [
Point(100, 100),
Point(100, 200)
]
coords = []
self.assertEqual(self.mdc.is_disk_minimal(coords, points), False)
def test_min_disk_1_point_ok(self):
points = [
Point(100, 100)
]
coords = [0]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_1_point_bad(self):
points = [
Point(100, 100),
Point(100, 50)
]
coords = [0]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), False)
def test_min_disk_2_points_ok(self):
points = [
Point(100, 100),
Point(100, 0),
Point(100, 50),
Point(120, 40),
Point(90, 60),
Point(149, 50)
]
coords = [0, 1]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_2_points_ok_2(self):
points = [
Point(100, 100),
Point(100, 0),
Point(100, 50),
Point(120, 40),
Point(90, 60),
Point(149, 50)
]
coords = [1, 0]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_2_points_wrong(self):
points = [
Point(100, 100)
]
coords = [0, 1]
with self.assertRaises(MinDiskCheckerException):
self.mdc.is_disk_minimal(coords, points)
def test_min_disk_2_points_bad(self):
points = [
Point(100, 100),
Point(100, 0),
Point(100, 50),
Point(120, 40),
Point(90, 60),
Point(149, 50),
Point(200, 200)
]
coords = [0, 1]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), False)
def test_min_disk_2_points_bad_2(self):
points = [
Point(100, 100),
Point(100, 0),
Point(100, 50),
Point(120, 40),
Point(90, 60),
Point(149, 50),
Point(200, 200)
]
coords = [1, 0]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), False)
def test_min_disk_3_points_ok(self):
points = [
Point(0, 30),
Point(24, -18),
Point(-18, -24),
Point(18, 24),
Point(0, 0),
Point(10, 15),
Point(-24, 18),
Point(-6, 21),
Point(11, -16),
Point(15, -25),
]
coords = [0, 1, 2]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_3_points_ok_2(self):
points = [
Point(6, 3),
Point(1, -2),
Point(-3, 6),
Point(0, 0),
Point(2, 4),
Point(4, 7)
]
coords = [2, 1, 0]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_3_points_ok_3(self):
points = [
Point(6, 3),
Point(1, -2),
Point(-3, 6),
Point(0, 0),
Point(2, 4),
Point(4, 7)
]
coords = [1, 0, 2]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_3_points_bad_obtuse(self):
points = [
Point(6, 3),
Point(1, -2),
Point(0, 0),
Point(2, 4),
Point(4, 7),
]
coords = [0, 1, 4]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), False)
def test_min_disk_3_points_ok_large_num(self):
points = [
Point(948904, 106447),
Point(344710, 448131),
Point(803743, 922708),
Point(78651, 108263)
]
coords = [2, 3, 0]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_3_points_ok_large_num_2(self):
points = [
Point(948904, 106447),
Point(344710, 448131),
Point(803743, 922708),
Point(78651, 108263)
]
coords = [3, 0, 2]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_3_points_ok_large_num_3(self):
points = [
Point(948904, 106447),
Point(344710, 448131),
Point(803743, 922708),
Point(78651, 108263)
]
coords = [0, 3, 2]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
def test_min_disk_3_points_ok_extra_large_num(self):
points = [
Point(9489040000, 1064470000),
Point(3447100000, 4481310000),
Point(8037430000, 9227080000),
Point(786510000, 1082630000)
]
coords = [2, 0, 3]
self.assertEqual(self.mdc.is_disk_minimal(coords, points), True)
|
{"/main.py": ["/min_disk_checker.py", "/point.py"], "/min_disk_checker.py": ["/point.py"], "/test.py": ["/min_disk_checker.py", "/point.py"]}
|
30,680
|
Keiichi-Hirano/python-LineBot-ddp
|
refs/heads/master
|
/app/models/noanswer.py
|
"""
author : nsuhara <na010210dv@gmail.com>
date created : 2019/5/1
python version : 3.7.3
"""
import datetime
import json
import logging
from linebot.models.actions import PostbackAction, URIAction
from linebot.models.template import ButtonsTemplate, TemplateSendMessage
from app.framework.nslinebot.models.story_board import StoryBoard
from app.processes.trash import Process
from linebot.models.messages import TextMessage
logger = logging.getLogger(__name__)
class noanswer(StoryBoard):
def __init__(self):
super().__init__()
process = Process()
self.PROCESS = {
'noanswer_pro': process.what_day_of_garbage_is_today
}
def process_handler(self, kwargs):
logger.info('process_handler:{}'.format(kwargs))
return self.PROCESS.get(kwargs.get('handle'))()
def story_board(self, text):
return {
# answer
'answer': TextMessage(text='すみません。会話を理解できませんでした。' + '\n' + '[メニュー]と頂ければメインメニューを表示できますよ。')
}
|
{"/app/models/__init__.py": ["/app/models/noanswer.py"]}
|
30,681
|
Keiichi-Hirano/python-LineBot-ddp
|
refs/heads/master
|
/app/processes/ddp.py
|
"""
author :
date created : 2019/7/31
python version : 3.7.2
"""
import datetime
import logging
logger = logging.getLogger(__name__)
# DMBS
DB_answer = ''
RDBMS = 'RDBMS使用の必須要件が発生した場合に、RDBMS(Oracle/DB2/PostgreSQL)を使用'
MarkLogic = 'MarklogicをDataHUBとして使用'
Hadoop = 'Data Aggregation(集計)データの格納を目的にDWHとしてHadoopを使用'
HANA = 'CokeOne Dataをリアルタイムにレポート・分析する際にHANAを使用'
CokeOne = 'CokeOneトランザクションの更新を伴う場合は、CokeOneシステムを使用'
# Business Logic
Logic_answer = ''
Abinito = 'ETLに関わるすべての処理を担うプラットフォームとしてAbInitoを使用(複数データの非同期更新)'
JAVA = 'API及び、データエントリーに関わるGUIの開発にてJAVA/Java Scriptを使用(少量データの即時同期更新)'
Python = '統計解析・分析・シュミレーション処理開発にPython及びRを使用(JAVA代替としも使用可能)'
ABAP = 'CokeOne・HANAを始めとするSAP環境では、専用開発言語のABAPを使用'
# Presentation
Pre_answer = ''
BI_tool = '分析用にAggregation(集計)されたデータを元に' + \
'データを可視化(Visualization)分析を行う際にTableauまたは、Sisenseを使用' + \
'(可視化のパターン変化が多い場合、継続して使用可能)'
UI5 = '・SAP HANA上でのレポートを行う際にUI5(SAPのHTMLベースGUI)を使用'
HTML5 = 'HTML5:標準化選定にてCokeOne以外のシステムはGUIをHTML5で構築する為' + \
'UIを使用の際はHTML5を使用\n' + \
'D3:Tableauで可視化(Visualization)されたものをHTMLベースで再構築する際に使用' + \
'(可視化のパターン変化が少ない、又は、レポートの代替機能構築時に使用)'
SAP_GUI = 'SAP専用GUIを使用'
Export_File = 'UIの構築を伴わない場合、File Exportを実装(AbInito)'
class Process(object):
def __init__(self):
pass
# def DDP_check_process(self):
def DDP_check_process(self,check1,check2,check3,check4,check5):
# CokeOne Transaction
if check1 == 'Y':
# CokeOne read only
if check2 == 'Y':
# Realtime
if check4 == 'Y':
DB_answer = HANA
Logic_answer = ABAP
# Use UI
if check3 == 'Y':
Pre_answer = UI5
else:
Pre_answer = Export_File
# Non-Realtime
else:
DB_answer = MarkLogic
Logic_answer = Abinito
# Use UI
if check3 == 'Y':
Pre_answer = HTML5
# Non-Use UI
else:
Pre_answer = Export_File
# Analytics
if check5 == 'Y':
DB_answer = DB_answer + '\n・また' + Hadoop
Logic_answer = Logic_answer + '\n・また' + Python
# Non-Analytics
else:
pass
# Use UI + Analytics
if check3 == 'Y' and check5 == 'Y':
Pre_answer = Pre_answer + '\n・また' + BI_tool
else:
# CokeOne CRUD
DB_answer = CokeOne
Logic_answer = ABAP
# Use UI
if check3 == 'Y':
Pre_answer = SAP_GUI
# Non-Use UI
else:
Pre_answer = Export_File
# CokeOne Transaction以外
else:
DB_answer = RDBMS + '\n・また' + MarkLogic
Logic_answer = Abinito
# Use UI
if check3 == 'Y':
Pre_answer = HTML5
# Non-Use UI
else:
Pre_answer = Export_File
# Analytics
if check5 == 'Y':
DB_answer = DB_answer + '\n・また' + Hadoop
Logic_answer = Logic_answer + '\n・また' + Python
# Non-Analytics
else:
pass
# Use UI + Analytics
if check3 == 'Y' and check5 == 'Y':
Pre_answer = Pre_answer + '\n・また' + BI_tool
# Realtime
if check4 == 'Y':
Logic_answer = Logic_answer + '\n・また' + JAVA
# return '1は{}・2は{}・3は{}・4は{}・5は{}です\n'.format(check1,check2,check3,check4,check5)
return '1.DBは{}。\n\n2.開発言語は{}。\n\n3.プレゼンテーション機能は{}が推奨となります。'.format(DB_answer,Logic_answer,Pre_answer)
# def _get_week_number(self, date_time):
# day = date_time.day
# week_number = 0
# while day > 0:
# week_number += 1
# day -= 7
# return week_number
|
{"/app/models/__init__.py": ["/app/models/noanswer.py"]}
|
30,682
|
Keiichi-Hirano/python-LineBot-ddp
|
refs/heads/master
|
/app/models/__init__.py
|
from .clock_in import ClockIn
from .main_menu import MainMenu
from .trash import Trash
from .ddp import Ddp
from .noanswer import noanswer
MODELS = {
'main_menu': MainMenu,
'clock_in': ClockIn,
# 2019/07/03 add start
# DDP条件メニュー
'trash': Trash,
'ddp': Ddp,
'noanswer':noanswer
# 2019/07/03 add end
}
MESSAGE_MODELS = {
'メインメニュー': {
'model': 'main_menu',
'scene': 'menu'
},
# '勤怠メニュー': {
# 'model': 'clock_in',
# 'scene': 'menu'
# },
# 'ごみ出しメニュー': {
# 'model': 'trash',
# 'scene': 'menu'
# 2019/07/03 add start
# DDP条件メニュー
# },
'DDP利用メニュー': {
'model': 'ddp',
'scene': 'menu'
},
'noanswer': {
'model': 'noanswer',
'scene': 'answer'
# 2019/07/03 add end
}
}
|
{"/app/models/__init__.py": ["/app/models/noanswer.py"]}
|
30,683
|
Keiichi-Hirano/python-LineBot-ddp
|
refs/heads/master
|
/app/processes/noanswer.py
|
"""
author : nsuhara <na010210dv@gmail.com>
date created : 2019/5/1
python version : 3.7.3
"""
import datetime
import logging
logger = logging.getLogger(__name__)
class Process(object):
def __init__(self):
pass
def DDP_check_process(self,check1,check2,check3,check4,check5):
return '1は{}/2は{}/3は{}/4は{}/5は{}です\n'.format(check1,check2,check3,check4,check5)
|
{"/app/models/__init__.py": ["/app/models/noanswer.py"]}
|
30,684
|
tungvx/reporting
|
refs/heads/master
|
/urls.py
|
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('reporting.views',
# Examples:
# url(r'^$', 'report_tool.views.home', name='home'),
# url(r'^report_tool/', include('report_tool.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
#(r'^admin/report_tool/upload/$', 'views.index'),
url(r'^add/$', 'upload_file',name='upload_file'),
url(r'^add_spreadsheet/$', 'spreadsheet_report',name='spreadsheet_report'),
url(r'^list/$','file_list',name='file_list'),
url(r'^download/$','download_file',name='download_file'),
url(r'^view_report/$','view_report',name='view_report'),
#(r'^admin/report_tool/uploads/(?P<upload_id>\d+)/$', 'views.detail'),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'index'),
url(r'index$', 'index'),
url(r'help$', 'help'),
)
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,685
|
tungvx/reporting
|
refs/heads/master
|
/generate_from_spreadsheet.py
|
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
import gdata
import gdata.spreadsheet.service
import gdata.service
import gdata.spreadsheet
import gdata.docs
import gdata.docs.data
import gdata.docs.client
import gdata.docs.service
import gdata.spreadsheet.service
except :
''
import datetime
import os
from report import generate
SITE_ROOT = os.path.dirname(os.path.realpath(__file__)) #path of the app
FILE_UPLOAD_PATH = SITE_ROOT + '/uploaded' #path to uploaded folder
FILE_GENERATE_PATH = SITE_ROOT + '/generated' #path to generated folder
def generate_from_spreadsheet(key, token, username, password, request):
message = 'ok' #message to be returned to indicate whether the function is executed successfully
try: #try to get all the cell containing the data in the first sheet
gd_client = gdata.docs.service.DocsService()
gd_client.email = username
gd_client.password = password
gd_client.ssl = True
gd_client.source = "My Fancy Spreadsheet Downloader"
gd_client.ProgrammaticLogin()
uri = 'http://docs.google.com/feeds/documents/private/full/%s' % key
entry = gd_client.GetDocumentListEntry(uri)
title = entry.title.text
spreadsheets_client = gdata.spreadsheet.service.SpreadsheetsService()
spreadsheets_client.email = gd_client.email
spreadsheets_client.password = gd_client.password
spreadsheets_client.source = "My Fancy Spreadsheet Downloader"
spreadsheets_client.ProgrammaticLogin()
docs_auth_token = gd_client.GetClientLoginToken()
gd_client.SetClientLoginToken(spreadsheets_client.GetClientLoginToken())
now = datetime.datetime.now()
uploaded_file_name = str(now.year)+str(now.day)+str(now.month)+str(now.hour)+str(now.minute)+str(now.second) + '.xls'
gd_client.Export(entry, FILE_UPLOAD_PATH + '/' + uploaded_file_name)
gd_client.SetClientLoginToken(docs_auth_token)
except :
return "Wrong spreadsheet link or you do not have permission to modify the file, please check again!", "", ""
#call generate function
request.session['is_spreadsheet'] = True
message, response = generate(uploaded_file_name, request)
request.session['is_spreadsheet'] = None
if message != 'ok':
return message, "", ""
message, output_link = upload_result(uploaded_file_name, title, username, password)
return message, output_link, title #return the message
def upload_result(file_name, title, username, password):
message = 'ok'
try:
gd_client = gdata.docs.service.DocsService(source='yourCo-yourAppName-v1')
gd_client.ClientLogin(username, password)
except :
return "Wrong email or password!",""
try:
ms = gdata.MediaSource(file_path=FILE_GENERATE_PATH + '/' + file_name, content_type=gdata.docs.service.SUPPORTED_FILETYPES['XLS'])
entry = gd_client.Upload(ms, 'Report result of ' + title)
output_link = entry.GetAlternateLink().href
except :
return "Invalid file!",""
return message, output_link
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,686
|
tungvx/reporting
|
refs/heads/master
|
/admin.py
|
from reporting.models import Upload
from django.contrib import admin
class UploadAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['filename']}),
(None, {'fields': ['description']}),
('Date information', {'fields': ['upload_time'], 'classes': ['collapse']}),
]
list_display = ('filename', 'upload_time', 'description')
admin.site.register(Upload, UploadAdmin)
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,687
|
tungvx/reporting
|
refs/heads/master
|
/views.py
|
from time import time, ctime
from django.core.files import File
import os.path
import datetime
from django.core import serializers
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed
from django.core.urlresolvers import reverse
from django.http.multipartparser import FILE
from django.shortcuts import render_to_response, redirect
from django.template.loader import render_to_string
from django.template import RequestContext, loader
from django.core.exceptions import *
from django.middleware.csrf import get_token
from django.utils import simplejson
from django.contrib.auth.forms import *
from django.template import Context, loader
from reporting.models import Upload,upload_file_form,handle_uploaded_file, Spreadsheet_report, spreadsheet_report_form
from django.http import HttpResponse,HttpResponseRedirect
import datetime
import reporting.definitions
from django.core.servers.basehttp import FileWrapper
from xlwt.Workbook import Workbook
import xlrd,xlwt
from reporting.report import generate
from reporting.generate_from_spreadsheet import generate_from_spreadsheet
import mimetypes
import os
from urlparse import urlparse, parse_qs
import gdata.service
import settings
from django.contrib.auth.decorators import login_required
from django.contrib import auth
from django.contrib.auth.forms import UserCreationForm
from django import forms
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
UPLOAD = 'upload.html'
SPREADSHEET_REPORT = 'spreadsheet_report.html'
FILE_LIST = 'filelist.html'
FILE_UPLOAD_PATH = SITE_ROOT + '/uploaded'
FILE_GENERATE_PATH = SITE_ROOT + '/generated'
FILE_INSTRUCTION_PATH = SITE_ROOT + '/instructions'
DATABASE_PATH = SITE_ROOT + '/databases'
def index(request):
message= "Welcome to Reporting system"
t = loader.get_template(os.path.join('index.html'))
c = RequestContext(request, {
'message':message,
}
)
return HttpResponse(t.render(c))
def help(request):
message=None
t = loader.get_template(os.path.join('help.html'))
c = RequestContext(request, {
'message':message,
}
)
return HttpResponse(t.render(c))
def download_file(request):
message = None
if (request.method == "GET"):
fname = request.GET['filename']
path = eval(request.GET['path'])
try:
wrapper = FileWrapper( open( '%s/%s' % (path, fname), "r" ) )
response = HttpResponse(wrapper, mimetype='application/ms-excel')
response['Content-Disposition'] = u'attachment; filename=%s' % fname
return response
except:
message = 'The file you requested does not exist or is deleted due to time limit!'
c = RequestContext(request)
return render_to_response(FILE_LIST, {'message':message},context_instance = c)
def file_list(request):
message = None
file_list = list(Upload.objects.order_by('-upload_time'))
spreadsheet_list = list(Spreadsheet_report.objects.order_by('-created_time'))
c = RequestContext(request)
return render_to_response(FILE_LIST, {'message':message,'file_list':file_list, 'spreadsheet_list':spreadsheet_list},
context_instance = c
)
def upload_file(request):
#This function handle upload action
message=None
if request.method == 'POST': # If file fom is submitted
form = upload_file_form(request.POST, request.FILES)
if form.is_valid(): #Cheking form validate
f = request.FILES['file']
fileName, fileExtension = os.path.splitext(f.name);
if fileExtension!=('.xls'):
message ='wrong file extension'
else:
now = datetime.datetime.now()
temp = Upload( filestore=str(now.year)+str(now.day)+str(now.month)+str(now.hour)+str(now.minute)+str(now.second)+f.name,filename =f.name,description = request.POST['description'],upload_time=datetime.datetime.now())
handle_uploaded_file(f, FILE_UPLOAD_PATH,temp.filestore) #Save file content to uploaded folder
generator, response = generate(temp.filestore, request)
if generator != "ok":
message = generator
c = RequestContext(request)
os.remove(FILE_UPLOAD_PATH + '/' + temp.filestore)
return render_to_response(UPLOAD, {'form':form, 'message':message},
context_instance = c
)
else:
temp.save() #Save file information into database
message="Uploaded successfully. Your uploaded and generated file will be stored shortly. You should download them in the file list page as soon as possible!"
c = RequestContext(request)
file_list = [temp]
return render_to_response(FILE_LIST, {'file_list':file_list, 'message':message},
context_instance = c
)
else:
message="Error"
#return HttpResponseRedirect('http://127.0.0.1:8000/admin')
else: #if file is not submitted that generate the upload form
form = upload_file_form()
c = RequestContext(request)
return render_to_response(UPLOAD, {'form':form, 'message':message},
context_instance = c
)
def spreadsheet_report(request): #action to handle create report from google spreadsheet
message = ''
if request.method == 'POST': # if the form is submitted
form = spreadsheet_report_form(request.POST) #get the form
#if the form is valid
if form.is_valid():
spreadsheet_key = None
# get the spreadsheet link from the request
spreadsheet_link = request.POST.get('spreadsheet_link')
#get google username
username = request.POST.get('username')
#get password of google account
password = request.POST.get('password')
# try to extract the key from the spreadsheet link
try:
spreadsheet_key = parse_qs(urlparse(spreadsheet_link).query).get('key')[0]
except :
message = 'Wrong link'
c = RequestContext(request)
return render_to_response(SPREADSHEET_REPORT, {'form':form, 'message':message}, context_instance = c)
if spreadsheet_key == '' or spreadsheet_key == None: #if the spreadsheet key is empty
# display error message
message = 'Please enter the correct spreadsheet link'
c = RequestContext(request)
return render_to_response(SPREADSHEET_REPORT, {'form':form, 'message':message}, context_instance = c)
# from the key of the spreadsheet, generate the report
generator, output_link,title = generate_from_spreadsheet(spreadsheet_key, request.session.get('token'), username, password, request)
#if the message is not ok
if generator != 'ok':
#render the add report page, and display the error message
message = generator
c = RequestContext(request)
return render_to_response(SPREADSHEET_REPORT, {'form':form, 'message':message}, context_instance = c)
else:
#create and save spreadsheet_report object
now = datetime.datetime.now()
spreadsheet_report_object = Spreadsheet_report(created_time = now, description = request.POST['description'],spreadsheet_link = spreadsheet_link, output_link = output_link, title = title)
#uncomment next line to save the report
spreadsheet_report_object.save()
message = "Successfully generate the report"
c = RequestContext(request)
spreadsheet_list = [spreadsheet_report_object]
return render_to_response(FILE_LIST, {'message':message,'file_list':file_list, 'spreadsheet_list':spreadsheet_list},
context_instance = c
)
else: # if the form is not valid, then raise error
message = 'Please enter the required fields'
else: #if user want to create new report from spreadsheet
form = spreadsheet_report_form()
c = RequestContext(request)
return render_to_response(SPREADSHEET_REPORT, {'form':form, 'message':message}, context_instance = c)
def view_report(request):
fname = request.GET['filename']
generator, response = generate(fname, request)
return response
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,688
|
tungvx/reporting
|
refs/heads/master
|
/extract_information.py
|
import re
import xlwt
from reporting.models import Upload, Spreadsheet_report
import django
import definitions
try:
import sqlite3
import psycopg2
import psycopg2.extras
import MySQLdb
except :
''
try:
from report_tool.settings import DATABASE_PATH
except :
''
#this function is used for extracting information from a string input value
def extract_information(index_of_function, index_of_group, body, indexes_of_body,
index_of_excel_function, excel_function, value, row_x, col_x, other_info, index_of_other_info,
body_input, indexes_of_body_input, head, index_of_head, head_input, index_of_head_input,
foot, index_of_foot, foot_input, index_of_foot_input,
once, index_of_once, once_input, index_of_once_input, group, reserve_postions, index_of_end_group):
function_name = ''
value = unicode(value)
temp = re.search('#<.*?>', value) #if the cell contains the function which returns the data
if temp:
function_name = (temp.group(0).rstrip('>').lstrip('#<')) #remove > at the right and #< at the left
index_of_function.append((row_x, col_x)) #stores the index of this function
if (row_x, col_x) not in reserve_postions:
reserve_postions.append((row_x, col_x))
else:
temp = re.findall('{{.*?}}', unicode(value)) # find all the specified fields of data
if temp: #if yes
for temp1 in temp: #iterating all of the fields
temp1 = temp1.rstrip('}}').lstrip('{{') # remove tags to get attributes
if (temp1.startswith('head')): #if the field is the group:
temp_head = temp1[4:] #else the field is the head
head_key = temp_head[:temp_head.index(':')]
if not head.get(head_key):
head[head_key] = []
index_of_head[head_key] = []
index_of_head_input[head_key] = []
head_input[head_key] = []
head[head_key].append(temp_head[temp_head.index(':') + 1:])
index_of_head[head_key].append((row_x, col_x)) #stores the location of the head
if (row_x, col_x) not in index_of_head_input.get(head_key):
head_input[head_key].append(value)
index_of_head_input[head_key].append((row_x, col_x))
elif (temp1.startswith('foot')): #if the field is the footer
temp_foot = temp1[4:]
foot_key = temp_foot[:temp_foot.index(':')]
if not foot.get(foot_key):
foot[foot_key] = []
index_of_foot[foot_key] = []
index_of_foot_input[foot_key] = []
foot_input[foot_key] = []
foot[foot_key].append(temp_foot[temp_foot.index(':') + 1:])
index_of_foot[foot_key].append((row_x, col_x))
if (row_x, col_x) not in index_of_foot_input.get(foot_key):
foot_input[foot_key].append(value)
index_of_foot_input[foot_key].append((row_x, col_x))
elif (temp1.startswith('once:')): #if the field is the footer
if (row_x, col_x) not in index_of_once_input:
once_input.append(value) # add value to foot array
index_of_once_input.append((row_x, col_x)) #also store index of foot
once.append(temp1[5:]) #store the field of foot
index_of_once.append((row_x, col_x))
else:
if (row_x, col_x) not in indexes_of_body_input:
body_input.append(value)
indexes_of_body_input.append((row_x, col_x))
body.append(temp1) #else the field is the body
indexes_of_body.append((row_x, col_x)) #stores the location of the body
if value.startswith(":="):
excel_function.append(value) #strores the value of the cell contain the specified excel function
index_of_excel_function.append((row_x, col_x)) #store index of above excel function
if (row_x, col_x) not in reserve_postions:
reserve_postions.append((row_x, col_x))
else:
temp = re.findall('<.*?>', unicode(value)) # find all group tag
if temp:
for temp1 in temp: #iterating all of the fields
temp1 = temp1.rstrip('>').lstrip('<') # remove tags to get attributes
if (temp1.startswith('group')): #if the field is the group
temp_group = temp1[5:] #remove group:
group_key = temp_group[:temp_group.index(':')]
group[group_key] = temp_group[temp_group.index(':') + 1:]
index_of_group[group_key] = (row_x, col_x) #stores the location of the group
elif (temp1.startswith('/group')):
temp_group = temp1[6:] #remove /group:
group_key = temp_group
index_of_end_group[group_key] = (row_x, col_x) #stores the locations of end group
if (row_x, col_x) not in reserve_postions:
reserve_postions.append((row_x, col_x))
else:
other_info.append(value) #store other information
index_of_other_info.append((row_x,col_x))#store the index of other information
return function_name
#function to get a list of objects containing the data
def get_list_of_object(function_name, index_of_function, request):
if function_name == '':
return 'ok', []
#try to get list of objects from definitions.py file, or execute the fuction directly
try:
list_objects = eval('definitions.%s' %function_name)
except :
try:
list_objects = eval(function_name)
except :
print 'error'
#if the list is not empty, then return the list
try:
if len(list_objects) >= 0:
return 'ok', list_objects
except :
print 'error'
try:
current_user = request.user.get_profile() #get user profile
except :
return 'You must set up you database!', []
try:
database_engine = current_user.database_engine #get database engine
except :
try:
return 'Data specification error at cell ' + xlwt.Utils.rowcol_to_cell(index_of_function[0][0],index_of_function[0][1]), []
except :
return 'The data function must be specified!', []
if database_engine == 'sqlite':
#connect to sqlite database
try:
connection = sqlite3.connect(database = DATABASE_PATH + '/' + user.username + '.db')
connection.row_factory = dict_factory
cursor = connection.cursor()
except :
return "Wrong database file!", []
elif database_engine == 'mysql':
#connect to mysql
try:
connection = MySQLdb.connect (host = current_user.database_host,
user = current_user.database_user,
passwd = current_user.database_password,
db = current_user.database_name)
cursor = connection.cursor (MySQLdb.cursors.DictCursor)
except:
return 'Wrong database settings!', []
elif database_engine == 'postgresql':
try:
print "dbname='%s' user='%s' host='%s' password='%s'" %(current_user.database_name, current_user.database_user, current_user.database_host, current_user.database_password)
connection = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'" %(current_user.database_name, current_user.database_user, current_user.database_host, current_user.database_password));
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except :
return 'Wrong database settings',[]
try:
cursor.execute(function_name)
list_objects = cursor.fetchall()
except :
try:
return 'Query syntax error at cell ' + xlwt.Utils.rowcol_to_cell(index_of_function[0][0],index_of_function[0][1]), []
except :
return 'The query must be specified!', []
#close connection and rollback:
connection.close()
return 'ok', list_objects
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,689
|
tungvx/reporting
|
refs/heads/master
|
/definitions.py
|
try:
from report_tool.models import Pupil
from django.contrib.admin.models import LogEntry
except :
''
def get_ds_hs():
return Pupil.objects.all()
def get_student_in_class(_class_name):
return Pupil.objects.filter(class_id__name = _class_name)
def get_admin_log():
return LogEntry.objects.all()
try:
from school.models import *
from app.models import *
except :
print ''
def mark_for_class(request):
# return Mark.objects.filter(student_id__class_id__name = '6 A1')
return Mark.objects.filter(subject_id__class_id__id = int(request.session.get('class_id')),term_id__number=int(request.session.get('termNumber')),current=True).order_by('student_id__index','student_id__first_name','student_id__last_name','student_id__birthday')
def student_list(request):
return Pupil.objects.filter(class_id__id = int(request.session.get('class_id')))
def student_list():
return Pupil.objects.all()
def get_class(request):
return Class.objects.filter(id = int(request.session.get('class_id')))
def get_class_list(request):
class_list = Class.objects.filter(year_id__id = int(request.session.get('year_id'))).order_by('name')
request.session['class_list'] = class_list
request.session['additional_keys'].append('class_list')
return class_list
def get_subject_list_by_class(request):
return Subject.objects.filter(name=request.session.get('subject_name'),class_id__year_id__id = int(request.session.get('year_id'))).order_by('class_id')
def get_subject_list_by_teacher(request):
return Subject.objects.filter(name=request.session.get('subject_name'),class_id__year_id=int(request.session.get('year_id')),teacher_id__isnull=False).order_by('teacher_id__first_name','teacher_id__last_name')
def get_dh(request):
termNumber = int(request.session.get('term_number'))
year_id = int(request.session.get('year_id'))
type = int(request.session.get('type'))
school_id = int(request.session.get('school_id'))
if int(termNumber) < 3:
if type == 1:
danhHieus = TBHocKy.objects.filter(student_id__classes__block_id__school_id__id = school_id, student_id__classes__year_id__id=year_id, term_id__number=termNumber,
danh_hieu_hk='G').order_by("student_id__index")
elif type == 2:
danhHieus = TBHocKy.objects.filter(student_id__classes__block_id__school_id__id = school_id, student_id__classes__year_id__id=year_id, term_id__number=termNumber,
danh_hieu_hk='TT').order_by("student_id__index")
elif type == 3:
danhHieus = TBHocKy.objects.filter(student_id__classes__block_id__school_id__id = school_id, student_id__classes__year_id__id=year_id, term_id__number=termNumber,
danh_hieu_hk__in=['G', 'TT']).order_by("danh_hieu_hk",
"student_id__index")
else:
if type == 1:
danhHieus = TBNam.objects.filter(student_id__classes__block_id__school_id__id = school_id, student_id__classes__year_id__id=year_id, danh_hieu_nam='G').order_by("student_id__index")
elif type == 2:
danhHieus = TBNam.objects.filter(student_id__classes__block_id__school_id__id = school_id, student_id__classes__year_id__id=year_id, danh_hieu_nam='TT').order_by("student_id__index")
elif type == 3:
danhHieus=TBNam.objects.filter(student_id__classes__block_id__school_id__id = school_id, student_id__classes__year_id__id=year_id,danh_hieu_nam__in=['G','TT']).order_by("danh_hieu_nam","student_id__index")
return danhHieus
def get_pupils_no_pass(request):
type = int(request.session.get('type'))
school_id = int(request.session.get('school_id'))
year_id = int(request.session.get('year_id'))
if type == 1:
pupils = TBNam.objects.filter(student_id__classes__block_id__school_id = school_id, student_id__classes__year_id__id=year_id, len_lop=False).order_by("student_id__index")
elif type == 2:
pupils = TBNam.objects.filter(student_id__classes__block_id__school_id = school_id, student_id__classes__year_id__id=year_id, thi_lai=True).order_by("student_id__index")
elif type == 3:
pupils = TBNam.objects.filter(student_id__classes__block_id__school_id = school_id, student_id__classes__year_id__id=year_id, ren_luyen_lai=True).order_by("student_id__index")
return pupils
#bao cao cap so:
def get_year(request):
school = Organization.objects.get(id = '2')
year = school.year_set.latest('time')
request.session["term_number"] = 3
request.session["year_id"] = year.id
request.session["additional_keys"] = []
return [year]
def get_block_list(request):
school = Organization.objects.get(id = '2')
year = school.year_set.latest('time')
request.session["term_number"] = 1
request.session["year_id"] = year.id
request.session["additional_keys"] = []
return Block.objects.filter(school_id=school.id)
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,690
|
tungvx/reporting
|
refs/heads/master
|
/models.py
|
# -*- coding: utf-8 -*-
import datetime
from django.db import models
from django import forms
class Upload(models.Model): #Upload files table in databases
filename = models.CharField(max_length=255)
upload_time = models.DateTimeField('time uploaded')
description = models.CharField(max_length=255)
filestore = models.CharField(max_length=255)
def __unicode__(self):
return self.description
class Spreadsheet_report(models.Model): # model to store the information about the spreadsheet used by user
created_time = models.DateTimeField('time created')
description = models.CharField(max_length=255)
spreadsheet_link = models.CharField(max_length=255)
output_link = models.CharField(max_length=255)
title = models.CharField(max_length=255)
def __unicode__(self):
return self.description
class upload_file_form(forms.Form): # Define a simple form for uploading excels file
description = forms.CharField(max_length=255,required=True)
file = forms.FileField(required=True,)
def handle_uploaded_file(f,location,filename):
#Save file upload content to uploaded folder
fd = open('%s/%s' % (location, str(filename)), 'wb') #Create new file for write
for chunk in f.chunks():
fd.write(chunk) #Write file data
fd.close() #Close the file
class spreadsheet_report_form(forms.Form):
description = forms.CharField(max_length=255,required=True)
spreadsheet_link = forms.CharField(max_length=255,required=False)
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,691
|
tungvx/reporting
|
refs/heads/master
|
/tests.py
|
from django.test import TestCase
from reporting.models import Upload, Spreadsheet_report
from datetime import datetime
from extract_information import get_list_of_object, extract_information
from generate_from_spreadsheet import upload_result
class SimpleTest(TestCase):
def setUp(self):
self.upload = Upload.objects.create(filename = 'tung.xls', upload_time = datetime.now(), description = "tung", filestore = "tung.xls")
self.spreadsheet_report = Spreadsheet_report.objects.create(description = 'tung', created_time = datetime.now())
def test_returned_name(self):
"Upload object should have name same as it's description"
self.assertEqual(str(self.upload), 'tung')
self.assertEqual(str(self.spreadsheet_report), 'tung')
def test_get_list_of_object(self):
#test if the function get_list_of_object is correct
message, objects_list = get_list_of_object('Upload.objects.all()', [(1,2)])
self.assertEqual(message, 'ok') #check if the returned message is 'ok;
#check if the returned objects list is the correct list
self.assertEqual(objects_list[0],self.upload)
self.assertEqual(len(objects_list),1)
#test for exception, when both argument of this function is empty:
message, objects_list = get_list_of_object('',[])
self.assertEqual(message, 'The data function should be specify!')
self.assertEqual(objects_list, [])
#test if the function is not correct, then the correct message should be returned
message, objects_list = get_list_of_object('toilatung', [(1,2)])
self.assertEqual(message, 'Definition of data function error at cell C2')
self.assertEqual(objects_list, [])
#test if the function if correct, but returned value of object_list is not appropriate
message, objects_list = get_list_of_object('Upload.objects', [(1,2)])
self.assertEqual(message, 'The function you defined returns wrong result (must return a list of objects):cell C2')
self.assertEqual(objects_list, [])
def test_extract_information_function(self):
index_of_function = []
index_of_head = []
body = []
indexes_of_body = []
index_of_excel_function = []
excel_function = []
other_info = []
index_of_other_info = []
#call function to test function_name extraction
function_name, head = extract_information(index_of_function, index_of_head, body, indexes_of_body,
index_of_excel_function, excel_function, '#<function()>', 1, 2, other_info, index_of_other_info)
self.assertEqual(function_name, 'function()') #test if the function_name is extracted correctly
self.assertEqual(head, '') #test if the head is extracted correctly
self.assertEqual(index_of_function, [(1,2)]) #test if the the index of function_name is assigned correctly
self.assertEqual(index_of_head,[]) #the index of head should be empty
self.assertEqual(body, []) #the body should be empty
self.assertEqual(indexes_of_body, []) #the index of body should be empty
self.assertEqual(index_of_excel_function, []) #the index of excel function should be empty
self.assertEqual(excel_function, []) #the excel function list should be empty
self.assertEqual(other_info, []) #other information should be empty
self.assertEqual(index_of_other_info, []) #index of other information should be empty
#test for head extraction
function_name, head = extract_information(index_of_function, index_of_head, body, indexes_of_body,
index_of_excel_function, excel_function, '{{head:head}}', 1, 2, other_info, index_of_other_info)
self.assertEqual(function_name, '') #test if the function_name is extracted correctly
self.assertEqual(head, 'head') #test if the head is extracted correctly
self.assertEqual(index_of_function, [(1,2)]) #test if the the index of function_name is assigned correctly
self.assertEqual(index_of_head,[(1,2)]) #the index of head should be [(1,2)]
self.assertEqual(body, []) #the body should be empty
self.assertEqual(indexes_of_body, []) #the index of body should be empty
self.assertEqual(index_of_excel_function, []) #the index of excel function should be empty
self.assertEqual(excel_function, []) #the excel function list should be empty
self.assertEqual(other_info, []) #other information should be empty
self.assertEqual(index_of_other_info, []) #index of other information should be empty
#test for body extraction
function_name, head = extract_information(index_of_function, index_of_head, body, indexes_of_body,
index_of_excel_function, excel_function, '{{body:body}}', 1, 2, other_info, index_of_other_info)
self.assertEqual(function_name, '') #test if the function_name is extracted correctly
self.assertEqual(head, '') #test if the head is extracted correctly
self.assertEqual(index_of_function, [(1,2)]) #test if the the index of function_name is assigned correctly
self.assertEqual(index_of_head,[(1,2)]) #the index of head should be [(1,2)]
self.assertEqual(body, ['body']) #the body should be ['body']
self.assertEqual(indexes_of_body, [(1,2)]) #the index of body should be empty
self.assertEqual(index_of_excel_function, []) #the index of excel function should be empty
self.assertEqual(excel_function, []) #the excel function list should be empty
self.assertEqual(other_info, []) #other information should be empty
self.assertEqual(index_of_other_info, []) #index of other information should be empty
#test for excel_function extraction
function_name, head = extract_information(index_of_function, index_of_head, body, indexes_of_body,
index_of_excel_function, excel_function, ':= "{{body:body2}}" + "tung"', 1, 2, other_info, index_of_other_info)
self.assertEqual(function_name, '') #test if the function_name is extracted correctly
self.assertEqual(head, '') #test if the head is extracted correctly
self.assertEqual(index_of_function, [(1,2)]) #test if the the index of function_name is assigned correctly
self.assertEqual(index_of_head,[(1,2)]) #the index of head should be [(1,2)]
self.assertEqual(body, ['body', 'body2']) #the body should be ['body', 'body2']
self.assertEqual(indexes_of_body, [(1,2), (1,2)]) #the index of body should be empty
self.assertEqual(index_of_excel_function, [(1,2)]) #the index of excel function should be [(1,2)]
self.assertEqual(excel_function, [':= "{{body:body2}}" + "tung"']) #the excel function list should be correct
self.assertEqual(other_info, []) #other information should be empty
self.assertEqual(index_of_other_info, []) #index of other information should be empty
#test for other information extraction:
function_name, head = extract_information(index_of_function, index_of_head, body, indexes_of_body,
index_of_excel_function, excel_function, 'tung', 1, 2, other_info, index_of_other_info)
self.assertEqual(function_name, '') #test if the function_name is extracted correctly
self.assertEqual(head, '') #test if the head is extracted correctly
self.assertEqual(index_of_function, [(1,2)]) #test if the the index of function_name is assigned correctly
self.assertEqual(index_of_head,[(1,2)]) #the index of head should be [(1,2)]
self.assertEqual(body, ['body', 'body2']) #the body should be ['body', 'body2']
self.assertEqual(indexes_of_body, [(1,2), (1,2)]) #the index of body should be empty
self.assertEqual(index_of_excel_function, [(1,2)]) #the index of excel function should be [(1,2)]
self.assertEqual(excel_function, [':= "{{body:body2}}" + "tung"']) #the excel function list should be correct
self.assertEqual(other_info, ['tung']) #other information should be correct
self.assertEqual(index_of_other_info, [(1,2)]) #index of other information should be correct
#function to test upload_result function
def test_upload_result(self):
#test for wrong email and password
message,output_link = upload_result('20121210290.xls','', 'username', 'password')
self.assertEqual(message, 'Wrong email or password!') #the message returned should be correct
self.assertEqual(output_link, '') #the returned output_link should be empty
#test for wrong filename:
message, output_link = upload_result('noname.xls','','toilatungfake1', 'toilatung')
self.assertEqual(message, 'Invalid file!')
self.assertEqual(output_link, '')
#test the success of function if the parameters are correct
message, output_link = upload_result('20121210290.xls','','toilatungfake1', 'toilatung')
self.assertEqual(message, 'ok')
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,692
|
tungvx/reporting
|
refs/heads/master
|
/report.py
|
import datetime
from django.db import models
from django import forms
from xlwt.Workbook import Workbook
import xlrd,xlwt
import re
from xlutils.styles import Styles
from xlutils.copy import copy #http://pypi.python.org/pypi/xlutils
from xlutils.filter import process,XLRDReader,XLWTWriter
import operator
from itertools import groupby
import os
from extract_information import extract_information, get_list_of_object
from django.http import HttpResponse, HttpResponseRedirect
import datetime
SITE_ROOT = os.path.dirname(os.path.realpath(__file__)) #path of the app
FILE_UPLOAD_PATH = SITE_ROOT + '/uploaded' #path to uploaded folder
FILE_GENERATE_PATH = SITE_ROOT + '/generated' #path to generated folder
#function to generate the report, receive the file name of the input file as the input
def generate(filename, request):
fname = filename #name of the input file
response = HttpResponse(mimetype='application/ms-excel')
response['Content-Disposition'] = u'attachment; filename=%s' % fname
#read input file, style list:
input_book = xlrd.open_workbook('%s/%s' % (FILE_UPLOAD_PATH, filename), formatting_info=True) #Read excel file for get data
style_list = copy2(input_book) #copy the content and the format(style) of the input file into wtbook
#create output file:
wtbook = xlwt.Workbook(encoding='utf-8') #create new workbook
for i in range(input_book.nsheets):
sheet = input_book.sheet_by_index(i) # Get the first sheet
try:
#extract the specified information
function_name, index_of_function, group, index_of_group, body, indexes_of_body, index_of_excel_function, excel_function, body_input, index_of_body_input, head, index_of_head, head_input, index_of_head_input, foot, index_of_foot, foot_input, index_of_foot_input, once, index_of_once, once_input, index_of_once_input, reserve_postions, index_of_end_group = fileExtractor(sheet)
except:
return 'Wrong input file, please check all data', response #if cannot extract the data, return wrong message
else:
message, list_objects = get_list_of_object(function_name,index_of_function, request)
if message != 'ok':
return message, response
#generate the report to the excel file, message here is the signal of the success
message = generate_output(list_objects, index_of_function, group, index_of_group, body,
indexes_of_body, fname, index_of_excel_function, excel_function,
body_input, index_of_body_input,
head, index_of_head, head_input, index_of_head_input,
foot, index_of_foot, foot_input, index_of_foot_input, request,
once, index_of_once, once_input, index_of_once_input,
sheet, style_list, wtbook, reserve_postions, index_of_end_group)
if message != 'ok':
return message, response
wtbook.save(response)
if request.session.get('is_spreadsheet'):
wtbook.save('%s/%s' % (FILE_GENERATE_PATH, fname))
return 'ok', response
#function to extract specifications from the template file
def fileExtractor(sheet):
function_name = ''#name of the function which returns the list of objects
group = {} #group
index_of_group = {} #index of group
index_of_end_group = {}
index_of_function = [] #index of the function specification
body = [] # contains the list of all the body data
indexes_of_body = [] #indexes of the body data
excel_function = [] #stores all the excel functions which user specified
index_of_excel_function = [] #indexes of excel function
body_input = [] #store input value of body
indexes_of_body_input = [] #store index of body input
head = {}#store header
index_of_head = {} #store indexes of head,
head_input = {} #store head input
index_of_head_input = {} #store index of head input
foot = {}
index_of_foot = {}
foot_input = {}
index_of_foot_input = {}
once = []
index_of_once = []
once_input = []
index_of_once_input = []
reserve_postions = []
#read information user specified
for col_x in range(sheet.ncols):
for row_x in range(sheet.nrows):
value = sheet.cell(row_x,col_x).value # value in the excel file
if value: #if the cell contains data
#call the function to extract information
temp_function_name = extract_information(index_of_function, index_of_group, body, indexes_of_body,index_of_excel_function, excel_function, value, row_x, col_x,[],[], body_input, indexes_of_body_input, head, index_of_head, head_input, index_of_head_input, foot, index_of_foot, foot_input, index_of_foot_input, once, index_of_once, once_input, index_of_once_input, group, reserve_postions, index_of_end_group)
#append the function_name and the group
function_name += temp_function_name
return function_name, index_of_function, group, index_of_group, body, indexes_of_body, index_of_excel_function, excel_function, body_input, indexes_of_body_input, head, index_of_head, head_input, index_of_head_input, foot, index_of_foot, foot_input, index_of_foot_input, once, index_of_once, once_input, index_of_once_input, reserve_postions, index_of_end_group
def generate_output(list_objects,index_of_function, group, index_of_group, body, indexes_of_body,fname, index_of_excel_function, excel_function, body_input, index_of_body_input, head, index_of_head, head_input, index_of_head_input, foot, index_of_foot, foot_input, index_of_foot_input, request, once, index_of_once, once_input, index_of_once_input, sheet, style_list, wtbook, reserve_postions, index_of_end_group):
message = 'ok'
#dict to store the values of the data fields. Dict here is used for grouping the data
#the value of the group will be the keys of the dict
dict = {}
#manipulate the data
message = manipulate_data(list_objects, group, index_of_group, body, indexes_of_body, dict, head, index_of_head, foot, index_of_foot, once, index_of_once, once_input, index_of_once_input, request, index_of_excel_function, excel_function, 0, sheet)
#if something's wrong, the return the message to raise exception
if message != 'ok':
return message
wtsheet = wtbook.add_sheet(sheet.name, cell_overwrite_ok=True)# create new sheet named as of sheet
#copy column widths to output file
for i in range(sheet.ncols):
wtsheet.col(i).width = sheet.computed_column_width(i)
#if function data is not specified:
if len(index_of_function) == 0:
#just copy the content of input file to ouput file:
for row_index in range(sheet.nrows):
if (sheet.rowinfo_map.get(row_index)):
wtsheet.row(row_index).height = sheet.rowinfo_map.get(row_index).height #copy the height
for col_index in range(sheet.ncols):
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row_index, sheet.cell(row_index, col_index).value)
return message
#get row of body part
if len(indexes_of_body) != 0:
row_of_body = indexes_of_body[0][0]
else:
row_of_body = sheet.nrows - 1
#copy information between beginning of input file and row of body part:
for row_index in range(row_of_body):
if (sheet.rowinfo_map.get(row_index)):
wtsheet.row(row_index).height = sheet.rowinfo_map.get(row_index).height #copy the height
for col_index in range(sheet.ncols):
write_to_sheet(row_index,col_index, sheet, wtsheet, style_list, row_index, sheet.cell(row_index, col_index).value)
#remove the content at the position of the function which returns the data, remains the format of the cell
write_to_sheet(index_of_function[0][0],index_of_function[0][1],sheet, wtsheet, style_list, index_of_function[0][0], '')
#begin to write the data fields to wtbook
if len(indexes_of_body) > 0:
row = indexes_of_body[0][0]#variable used to travel all the rows in the wtsheet
#call this function to recursively write the groups to ouput sheet
row, message = write_groups_to_excel(list_objects,index_of_function, group, index_of_group, body, indexes_of_body,fname, index_of_excel_function, excel_function, body_input, index_of_body_input, head, index_of_head, head_input, index_of_head_input, foot, index_of_foot, foot_input, index_of_foot_input, request, once, index_of_once, once_input, index_of_once_input, sheet, style_list,wtsheet, dict , row, 0, reserve_postions, index_of_end_group)
if message != 'ok':
return message
max_row = indexes_of_body[0][0];
for i in reserve_postions:
if max_row < i[0]:
max_row = i[0]
row += max_row - indexes_of_body[0][0]
for row_index in range(max_row + 1, sheet.nrows, 1):
row += 1
if (sheet.rowinfo_map.get(row_index)):
wtsheet.row(row).height = sheet.rowinfo_map.get(row_index).height #copy the height
for col_index in range(sheet.ncols): #iterate all the columns
if (row_index, col_index) not in reserve_postions:
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, sheet.cell(row_index, col_index).value)
#write once_input to output file
for i in range(len(once_input)):
row_index = index_of_once_input[i][0]
col_index = index_of_once_input[i][1]
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row_index, once_input[i])
#write excel functions in the once part to the output file:
for h in range(len(index_of_excel_function)):
if index_of_excel_function[h] in index_of_once:
col_index = index_of_excel_function[h][1] # get column index of the cell contain excel function
row_index = index_of_excel_function[h][0] # get row index of the cell contain excel function
#get the excel function:
temp_excel_function = excel_function[h]
#remove := at the beginning
temp_excel_function = temp_excel_function[2:]
# process error for string in the input of the excel function:
temp_excel_function = temp_excel_function.replace(unichr(8220), '"').replace(unichr(8221), '"')
# try to execute the excel function as a python function, and write the result to the ouput sheet
try:
value_of_excel_function = eval(temp_excel_function)
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row_index
, value_of_excel_function)
except: #if can not execute as a python function, we will try to parse it as a excel formula
try:
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row_index
, xlwt.Formula(temp_excel_function))
except: #if all the two above cases are failed, the raise syntax error
message = 'Error in excel formula, python function definition (at cell (' + str(
index_of_excel_function[h][0] + 1) + ', '
message = message + str(index_of_excel_function[h][1] + 1)
message = message + ')): Syntax error '
return message
wtsheet.vert_page_breaks = sheet.vertical_page_breaks
return message
def write_groups_to_excel(list_objects,index_of_function, group, index_of_group, body, indexes_of_body,fname, index_of_excel_function, excel_function, body_input, index_of_body_input, head, index_of_head, head_input, index_of_head_input, foot, index_of_foot, foot_input, index_of_foot_input, request, once, index_of_once, once_input, index_of_once_input, sheet, style_list,wtsheet, dict_values, row, key_index, reserve_postions, index_of_end_group):
message = 'ok' #message to be returned to signal the success of the function
group_key, key_all = get_group_key_and_key_all(group, key_index)
if group.get(key_all):#if the group exists
col_index = index_of_group.get(key_all)[1] #get index of column of the group
row_index = index_of_group.get(key_all)[0] #get index of row of the group
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row_index, '')
row = row - (indexes_of_body[0][0] - row_index)#start write from row of group
start_row = row_index + 1
else:#else start write from row of body
start_row = indexes_of_body[0][0]
row = row - 1
#get head input of this group:
current_head_input = head_input.get(key_all)
#and get foot input of this group:
current_foot_input = foot_input.get(key_all)
keys = dict_values.keys() #get the keys of the dict
for l in range(len(dict_values)): #iterate all the groups
if current_head_input:
temp_current_head_input = current_head_input[:]
if current_foot_input:
temp_current_foot_input = current_foot_input[:]
key = keys[l] #get the key
#if the group exists:
if index_of_group.get(key_all):
row_index = index_of_group[key_all][0] #get the row index of the current group
#set row height:
if (sheet.rowinfo_map.get(row_index)):
wtsheet.row(row).height = sheet.rowinfo_map.get(row_index).height #copy the height
#copy all data of the row containing the group:
for col_index in range(sheet.ncols):
if (row_index, col_index) not in reserve_postions:
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, sheet.cell(row_index, col_index).value)
col_index = index_of_group[key_all][1] #get index of column of the group
#copy the value and the formats of that cell to the current row and the same index
#this is the part of the grouping data. The group is repeated at each key
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, '')
#copy the information in rows between the row of the group and the row of the body
for row_index in range(start_row, indexes_of_body[0][0] + 1, 1):
row += 1 # increase the current row by one
if (sheet.rowinfo_map.get(row_index)):
wtsheet.row(row).height = sheet.rowinfo_map.get(row_index).height #copy the height
for col_index in range(sheet.ncols): #iterate all the columns
if (row_index, col_index) not in reserve_postions:
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, sheet.cell(row_index, col_index).value)
#write data fields to wtsheet
values = dict_values.get(key) #get the list of the data fields of this key
head_values = values[0]#values of header
foot_values = values[1] #values of foot
body_values = values[2] #values of body part
#replace value head_values into head input
temp_current_excel_function = excel_function[:]
if index_of_head.get(key_all):
for h in range(len(index_of_head.get(key_all))):
value = head_values[h]
if index_of_head[key_all][h] in index_of_excel_function:
#replace the data in the excel function for later formula
temp_current_excel_function[index_of_excel_function.index(index_of_head[key_all][h])] = temp_current_excel_function[
index_of_excel_function.index(
index_of_head[key_all][
h])].replace(
'{{head' + key_all + ':' + head[key_all][h] + '}}', unicode(value))
else:# else just replace the value into the body input
temp_current_head_input[index_of_head_input[key_all].index(index_of_head[key_all][h])] = temp_current_head_input[index_of_head_input[key_all].index(index_of_head[key_all][h])].replace('{{head' + key_all + ':' + head[key_all][h] + '}}', unicode(value))
#write head values to output file:
for h in range(len(index_of_head_input[key_all])):
col_index = index_of_head_input[key_all][h][1]
row_index = index_of_head_input[key_all][h][0]
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row - (indexes_of_body[0][0] - row_index), temp_current_head_input[h])
#write excel functions in the head part to the output file:
for h in range(len(index_of_excel_function)):
if index_of_excel_function[h] in index_of_head[key_all]:
col_index = index_of_excel_function[h][1] # get column index of the cell contain excel function
row_index = index_of_excel_function[h][0] # get row index of the cell contain excel function
#get the excel function:
temp_excel_function = temp_current_excel_function[h]
#remove := at the beginning
temp_excel_function = temp_excel_function[2:]
# process error for string in the input of the excel function:
temp_excel_function = temp_excel_function.replace(unichr(8220), '"').replace(unichr(8221), '"')
# try to execute the excel function as a python function, and write the result to the ouput sheet
try:
value_of_excel_function = eval(temp_excel_function)
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row - (indexes_of_body[0][0] - row_index)
, value_of_excel_function)
except: #if can not execute as a python function, we will try to parse it as a excel formula
try:
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row - (indexes_of_body[0][0] - row_index)
, xlwt.Formula(temp_excel_function))
except: #if all the two above cases are failed, the raise syntax error
message = 'Error in excel formula, python function definition (at cell (' + str(
index_of_excel_function[h][0] + 1) + ', '
message = message + str(index_of_excel_function[h][1] + 1)
message = message + ')): Syntax error '
return message
#write body values to output file:
if type(body_values) is dict:
row, message = write_groups_to_excel(list_objects,index_of_function, group, index_of_group, body, indexes_of_body,fname, index_of_excel_function, excel_function, body_input, index_of_body_input, head, index_of_head, head_input, index_of_head_input, foot, index_of_foot, foot_input, index_of_foot_input, request, once, index_of_once, once_input, index_of_once_input, sheet, style_list,wtsheet, body_values, row, key_index + 1, reserve_postions, index_of_end_group)
if message != 'ok':
return row, message
else:
increase_row = 1
row -= 1
for i in range(len(body_values)): #iterate the list to get all the data fields
temp_current_excel_function = excel_function[:]
temp_body_input = body_input[:]
row += increase_row #increase the current row
#set height of the current row equal to the row of the spcified body row
wtsheet.row(row).height = sheet.rowinfo_map.get(indexes_of_body[0][0]).height
for h in range(len(indexes_of_body)):#iterate all the fields
value = body_values[i][h] # the value of the current data
#if the index of the current data is the index of one specified excel function
if indexes_of_body[h] in index_of_excel_function:
#replace the data in the excel function for later formula
temp_current_excel_function[index_of_excel_function.index(indexes_of_body[h])] = temp_current_excel_function[index_of_excel_function.index(indexes_of_body[h])].replace('{{' + body[h] + '}}',unicode(value))
else:# else just replace the value into the body input
temp_body_input[index_of_body_input.index(indexes_of_body[h])] = temp_body_input[index_of_body_input.index(indexes_of_body[h])].replace('{{' + body[h] + '}}',unicode(value))
#write body_input to the output file:
for h in range(len(index_of_body_input)):
col_index = index_of_body_input[h][1] #get current column index of body
row_index = index_of_body_input[h][0] #get current row index of body
#write to output file
temp_increase_row = write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, ' '.join(temp_body_input[h].split()))
if temp_increase_row > increase_row:
increase_row = temp_increase_row
#write excel functions to the output file:
for h in range(len(index_of_excel_function)):
if index_of_excel_function[h] in indexes_of_body:
col_index = index_of_excel_function[h][1] # get column index of the cell contain excel function
row_index = index_of_excel_function[h][0] # get row index of the cell contain excel function
#get the excel function:
temp_excel_function = temp_current_excel_function[h]
#remove := at the beginning
temp_excel_function = temp_excel_function[2:]
# process error for string in the input of the excel function:
temp_excel_function = temp_excel_function.replace(unichr(8220),'"').replace(unichr(8221),'"')
# try to execute the excel function as a python function, and write the result to the ouput sheet
try:
value_of_excel_function = eval(temp_excel_function)
#if the value of the function is "remove_row", the delete the current data row
if (value_of_excel_function == "remove_row"):
for temp_index in range(len(indexes_of_body)):
#clear data and get increase row
temp_increase_row = write_to_sheet(row_index, indexes_of_body[temp_index][1], sheet, wtsheet, style_list, row, "")
if temp_increase_row > increase_row:
increase_row = temp_increase_row
row -= 1
break
else: #else output the value of the function to the input file
temp_increase_row = write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, value_of_excel_function)
if temp_increase_row > increase_row:
increase_row = temp_increase_row
except : #if can not execute as a python function, we will try to parse it as a excel formula
try:
temp_increase_row = write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, xlwt.Formula(temp_excel_function))
if temp_increase_row > increase_row:
increase_row = temp_increase_row
except : #if all the two above cases are failed, the raise syntax error
message = 'Error in excel formula definition (at cell (' + str(index_of_excel_function[h][0] + 1) + ', '
message = message + str(index_of_excel_function[h][1] + 1)
message = message + ')): Syntax error '
return message
#copy format of other cell in the body row
row_index = index_of_body_input[0][0]
for col_index in range(sheet.ncols):
if (row_index, col_index) not in reserve_postions:
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, '')
max_foot_row = row
index_of_this_end_group = index_of_end_group.get(key_all)
if (index_of_this_end_group):
max_foot_row = row + (index_of_this_end_group[0] - indexes_of_body[0][0])
if index_of_foot.get(key_all):
#write foot values to output file:
#insert foot values to the output file:
#replace value foot_values into foot input
temp_current_excel_function = excel_function[:]
for f in range(len(index_of_foot.get(key_all))):
value = foot_values[f]
if index_of_foot[key_all][f] in index_of_excel_function:
#replace the data in the excel function for later formula
temp_current_excel_function[index_of_excel_function.index(index_of_foot[key_all][f])] = temp_current_excel_function[
index_of_excel_function.index(
index_of_foot[key_all][
f])].replace(
'{{foot' + key_all + ':' + foot[key_all][f] + '}}', unicode(value))
else:# else just replace the value into the body input
try:
temp_current_foot_input[index_of_foot_input[key_all].index(index_of_foot[key_all][f])] = temp_current_foot_input[index_of_foot_input[key_all].index(index_of_foot[key_all][f])].replace('{{foot' + key_all + ':' + foot[key_all][f] + '}}', unicode(value))
except :
temp_current_foot_input[index_of_foot_input[key_all].index(index_of_foot[key_all][f])] = temp_current_foot_input[index_of_foot_input[key_all].index(index_of_foot[key_all][f])].replace('{{foot' + key_all + ':' + foot[key_all][f] + '}}', str(value).decode('utf-8'))
#write foot values to output file:
for f in range(len(index_of_foot_input.get(key_all))):
col_index = index_of_foot_input.get(key_all)[f][1]
row_index = index_of_foot_input.get(key_all)[f][0]
row_of_foot = row + (row_index - indexes_of_body[0][0])
if row_of_foot > max_foot_row:
max_foot_row = row_of_foot
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row_of_foot, temp_current_foot_input[f])
#write excel functions in the foot part to the output file:
for h in range(len(index_of_excel_function)):
if index_of_excel_function[h] in index_of_foot.get(key_all):
col_index = index_of_excel_function[h][1] # get column index of the cell contain excel function
row_index = index_of_excel_function[h][0] # get row index of the cell contain excel function
#get the excel function:
temp_excel_function = temp_current_excel_function[h]
#remove := at the beginning
temp_excel_function = temp_excel_function[2:]
# process error for string in the input of the excel function:
temp_excel_function = temp_excel_function.replace(unichr(8220), '"').replace(unichr(8221), '"')
# try to execute the excel function as a python function, and write the result to the ouput sheet
row_of_foot = row + (row_index - indexes_of_body[0][0])
try:
value_of_excel_function = eval(temp_excel_function)
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row_of_foot
, value_of_excel_function)
except: #if can not execute as a python function, we will try to parse it as a excel formula
try:
write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row_of_foot
, xlwt.Formula(temp_excel_function))
except: #if all the two above cases are failed, the raise syntax error
message = 'Error in excel formula, python function definition (at cell (' + str(
index_of_excel_function[h][0] + 1) + ', '
message = message + str(index_of_excel_function[h][1] + 1)
message = message + ')): Syntax error '
return message
#copy the information provided by user at the end of the report to the end of the output file
temp_row = row
for row_index in range(indexes_of_body[0][0] + 1,indexes_of_body[0][0] + max_foot_row - row + 1, 1):
temp_row += 1
if (sheet.rowinfo_map.get(row_index)):
wtsheet.row(temp_row).height = sheet.rowinfo_map.get(row_index).height #copy the height
for col_index in range(sheet.ncols):
#copy the value and the format
if (row_index, col_index) not in reserve_postions:
write_to_sheet(row_index,col_index,sheet, wtsheet, style_list, temp_row, sheet.cell(row_index,col_index).value)
if (row_index, col_index) == index_of_this_end_group:
write_to_sheet(row_index,col_index,sheet, wtsheet, style_list, temp_row, '')
if l < len(dict_values) - 1:
row = max_foot_row + 1
# return 1, 'not ok'
return row, message
# This function is used for manipulating the data:
def manipulate_data(list_objects, group, index_of_group, body, indexes_of_body, dict, head, index_of_head, foot, index_of_foot, once, index_of_once, once_input, index_of_once_input, request, index_of_excel_function, excel_function, key, sheet):
message = 'ok'
if key == 0:
#compute values for once:
if len(list_objects) > 0:
a = list_objects[0]
for o in range(len(once)):
try:
value = eval('a["%s"]' %once[o])
except :
try:
value = eval('a.%s'%once[o])
except :
try:
value = eval(once[o])
except :
value = ''
if index_of_once[o] in index_of_excel_function:
#replace the data in the excel function for later formula
try:
excel_function[index_of_excel_function.index(index_of_once[o])] = excel_function[
index_of_excel_function.index(
index_of_once[
o])].replace(
'{{once:' + once[o] + '}}', unicode(value))
except :
excel_function[index_of_excel_function.index(index_of_once[o])] = excel_function[
index_of_excel_function.index(
index_of_once[
o])].replace(
'{{once:' + once[o] + '}}', str(value).decode('utf-8'))
else:
try:
once_input[index_of_once_input.index(index_of_once[o])] = once_input[index_of_once_input.index(index_of_once[o])].replace('{{once:' + once[o] + '}}', unicode(value))
except :
once_input[index_of_once_input.index(index_of_once[o])] = once_input[index_of_once_input.index(index_of_once[o])].replace('{{once:' + once[o] + '}}', str(value).decode('utf-8'))
else:
for o in range(len(once)):
value = ''
once_input[index_of_once_input.index(index_of_once[o])] = once_input[index_of_once_input.index(index_of_once[o])].replace('{{once:' + once[o] + '}}', unicode(value))
#get groups tags
group_key, key_all = get_group_key_and_key_all(group, key)
for i in list_objects:
temp_key = '' #init the key for this object. If group is empty, then all the objects will have the same
# key (''), then the data will not be grouped
if group_key != '': #if the group is not empty
try:
temp_key = eval('i["%s"]' % group_key) #try compute the value of the group
except: #if there is error, then raise exceptions
try:
temp_key = eval('i.%s'%group_key)
except :
try:
temp_key = eval(group_key)
except :
message = 'Error in group definition at sheet ' + sheet.name + ', cell ' + xlwt.Utils.rowcol_to_cell(index_of_group[key_all][0],index_of_group[key_all][1])
message = message + ': Object has no attribute '
message = message + str(
group_key) + '; or the function you defined returns wrong result (must return a list of objects)'
return message #return the message to signal the failure of the function
if dict.get(temp_key):
dict[temp_key][2].append(i)
else:
dict[temp_key] = []
head_result = [] #store values for header of each group
if head.get(key_all):
for h in head.get(key_all):
try: #try evaluate head value
head_result.append(eval('i["%s"]' % h))#for raw sql
except :
try: #for django models
head_value = eval('i.%s'%h)
if head_value != None:
head_result.append(head_value) #if head result is not None
else:
head_result.append('')
except :
try:
head_result.append(eval(h))
except :
index = head.get(key_all).index(h)
message = 'Error in head definition at sheet ' + sheet.name + ', cell ' + xlwt.Utils.rowcol_to_cell(index_of_head.get(key_all)[index][0],index_of_head.get(key_all)[index][1])
message = message + ': Object has no attribute '
message = message + h + '; or the function you defined returns wrong result (must return a list of objects)'
return message
head_result = tuple(head_result)
dict[temp_key].append(head_result)
#store the values for footer:
foot_result = []
if foot.get(key_all):
for f in foot.get(key_all):
try:#try to evaluate foot value
foot_result.append(eval('i["%s"]' % f)) #for raw sql
except :
try: #for django models
foot_value = eval('i.%s'%f)
if (foot_value != None):
foot_result.append(foot_value) #if the foot value s not None
else:
foot_result.append('')
except:
try:
foot_result.append(eval(f))
except :
index = foot.get(key_all).index(f)
message = 'Error in foot definition at sheet ' + sheet.name + ', cell ' + xlwt.Utils.rowcol_to_cell(index_of_foot.get(key_all)[index][0],index_of_foot.get(key_all)[index][1])
message = message + ': Object has no attribute '
message = message + f + '; or the function you defined returns wrong result (must return a list of objects)'
return message
foot_result = tuple(foot_result)
dict[temp_key].append(foot_result)
dict[temp_key].append([])
dict[temp_key][2].append(i)
keys = dict.keys()
for k in keys:
sub_list_objects = dict.get(k)[2][:]
if key < len(group.keys()) - 1:
dict[k][2] = {}
message = manipulate_data(sub_list_objects, group, index_of_group, body, indexes_of_body, dict[k][2], head, index_of_head,
foot, index_of_foot, once, index_of_once, once_input, index_of_once_input, request, index_of_excel_function,
excel_function, key + 1, sheet)
if message != "ok":
return message
else:
dict[k][2] = []
for i in sub_list_objects:
result = []
for y in body: #iterate all the fields in the body part of this object
try:
result.append(eval('i["%s"]' % y)) #try to evaluate the value of the field and add them into the result
except: # if error, raise exception and return the message
try:
body_value = eval('i.%s'%y)
if body_value != None:
result.append(body_value)
else:
result.append('')
except :
try:
result.append(eval(y))
except :
index = body.index(y)
message = 'Error in body definition at sheet ' + sheet.name + ', cell ' + xlwt.Utils.rowcol_to_cell(indexes_of_body[index][0],indexes_of_body[index][1])
message = message + ': Object has no attribute '
message = message + y + '; or the function you defined returns wrong result (must return a list of objects)'
return message
result = tuple(result)# convert to tupple: [] to ()
dict[k][2].append(result)
return message
def get_group_key_and_key_all(group, key):
#get groups tags
try:
key_all = sorted(group.keys())[key]
group_key = group.get(key_all)
except :
key_all = ''
group_key = ''
return group_key, key_all
#This function is used for coping the contents of a excel file to an other one
def copy2(wb):
w = XLWTWriter()
process(
XLRDReader(wb,'unknown.xls'),
w
)
return w.style_list
def is_merged(position, sheet):
for crange in sheet.merged_cells:
if position[0] == crange[0] and position[1] == crange[2]:
return True, crange
return False, ()
#this function is used for writing values to wtsheet, prevent merged cells
def write_to_sheet(row_index, col_index, sheet, wtsheet, style_list, row, value):
merged, merged_range = is_merged((row_index, col_index), sheet)
xf_index = sheet.cell_xf_index(row_index, col_index) #the format of the copied cell
#copy the value and the format to the current cell
if merged:
wtsheet.write_merge(row, row + merged_range[1] - merged_range[0] - 1, merged_range[2], merged_range[3] - 1,
value, style_list[xf_index])
return merged_range[1] - merged_range[0]
else:
wtsheet.write(row, col_index, value, style_list[xf_index])
return 1
|
{"/generate_from_spreadsheet.py": ["/report.py"], "/tests.py": ["/extract_information.py", "/generate_from_spreadsheet.py"], "/report.py": ["/extract_information.py"]}
|
30,699
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/LBGLCM.py
|
#importing os module (file handling in os) and pillow module for images
import os
from PIL import Image
#importing the GLCM and LBP module
from skimage.feature import greycomatrix, greycoprops, local_binary_pattern
#importing numpy and pandas
import numpy as np
import pandas as pd
#function to extract features for a ***collection of images***
def extract_features(directory, dist, angle,radius):
# make list for each feature and a dictionary to have all features
directory = str(directory)
features = {}
names = ['Crazing','Inclusion','Patches','Pitted Surface','RS','Scratch']
contrasts = []
dissimilarities = []
homogeneties = []
correlations = []
energies = []
type = []
#Iterating through each image and collecting features
for defect_name in names:
foldername = directory + '/' + defect_name
for name in os.listdir(foldername):
filename = foldername + '/' + name
image = Image.open(filename) # load an image from file
img = np.array(image.getdata()).reshape(image.size[0], image.size[1]) # convert the image pixels to a numpy array
#Calulate LBP Matrix and its normalized histogram
feat_lbp = local_binary_pattern(img, 8*radius, radius, 'uniform')
feat_lbp = np.uint64((feat_lbp/feat_lbp.max())*255)
#Calculate GLCM features for LBP histogram
gcom = greycomatrix(feat_lbp, [dist], [angle], 256, symmetric=True, normed=True)
contrast = greycoprops(gcom, prop='contrast')
dissimilarity = greycoprops(gcom, prop='dissimilarity')
homogeneity = greycoprops(gcom, prop='homogeneity')
energy = greycoprops(gcom, prop='energy')
correlation = greycoprops(gcom, prop='correlation')
# Storing features in the lists
contrasts.append(contrast[0][0])
dissimilarities.append(dissimilarity[0][0])
homogeneties.append(homogeneity[0][0])
energies.append(energy[0][0])
correlations.append(correlation[0][0])
type.append(defect_name)
print('>%s' % name)
#Adding features to dictionary of features
features['contrast'] = contrasts
features['dissimilarity'] = dissimilarities
features['homogeneity'] = homogeneties
features['energy'] = energies
features['correlation'] = correlations
features['type'] = type
#Converting dictionary to dataframe
df = pd.DataFrame(features)
return df
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,700
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/Final_Results_Window.py
|
#Importing the GUI module
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog3(object):
#Method for setting up the UI
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(699, 745)
self.Photo = QtWidgets.QLabel(Dialog)
self.Photo.setGeometry(QtCore.QRect(200, 120, 341, 291))
self.Photo.setAlignment(QtCore.Qt.AlignCenter)
self.Photo.setObjectName("Photo")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(290, 70, 151, 41))
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(30, 20, 181, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(80, 450, 541, 211))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.line_5 = QtWidgets.QFrame(self.layoutWidget)
self.line_5.setFrameShape(QtWidgets.QFrame.VLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.gridLayout.addWidget(self.line_5, 3, 0, 1, 1)
self.line_8 = QtWidgets.QFrame(self.layoutWidget)
self.line_8.setFrameShape(QtWidgets.QFrame.VLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName("line_8")
self.gridLayout.addWidget(self.line_8, 3, 4, 1, 1)
self.line_3 = QtWidgets.QFrame(self.layoutWidget)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridLayout.addWidget(self.line_3, 2, 1, 1, 1)
self.line_6 = QtWidgets.QFrame(self.layoutWidget)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.gridLayout.addWidget(self.line_6, 0, 3, 1, 1)
self.line_4 = QtWidgets.QFrame(self.layoutWidget)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout.addWidget(self.line_4, 1, 0, 1, 1)
self.line = QtWidgets.QFrame(self.layoutWidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout.addWidget(self.line, 0, 1, 1, 1)
self.line_7 = QtWidgets.QFrame(self.layoutWidget)
self.line_7.setFrameShape(QtWidgets.QFrame.VLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.gridLayout.addWidget(self.line_7, 1, 4, 1, 1)
self.line_9 = QtWidgets.QFrame(self.layoutWidget)
self.line_9.setFrameShape(QtWidgets.QFrame.HLine)
self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_9.setObjectName("line_9")
self.gridLayout.addWidget(self.line_9, 2, 3, 1, 1)
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 1, 1, 1)
self.Namofclassifier = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(16)
self.Namofclassifier.setFont(font)
self.Namofclassifier.setAlignment(QtCore.Qt.AlignCenter)
self.Namofclassifier.setObjectName("Namofclassifier")
self.gridLayout.addWidget(self.Namofclassifier, 1, 3, 1, 1)
self.Typeofdefect = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(16)
self.Typeofdefect.setFont(font)
self.Typeofdefect.setAlignment(QtCore.Qt.AlignCenter)
self.Typeofdefect.setObjectName("Typeofdefect")
self.gridLayout.addWidget(self.Typeofdefect, 3, 3, 1, 1)
self.label_2 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(16)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 1, 1, 1)
self.line_10 = QtWidgets.QFrame(self.layoutWidget)
self.line_10.setFrameShape(QtWidgets.QFrame.VLine)
self.line_10.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_10.setObjectName("line_10")
self.gridLayout.addWidget(self.line_10, 1, 2, 1, 1)
self.line_11 = QtWidgets.QFrame(self.layoutWidget)
self.line_11.setFrameShape(QtWidgets.QFrame.VLine)
self.line_11.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_11.setObjectName("line_11")
self.gridLayout.addWidget(self.line_11, 3, 2, 1, 1)
self.line_2 = QtWidgets.QFrame(Dialog)
self.line_2.setGeometry(QtCore.QRect(80, 640, 541, 41))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.line_12 = QtWidgets.QFrame(Dialog)
self.line_12.setGeometry(QtCore.QRect(20, 55, 661, 31))
self.line_12.setFrameShape(QtWidgets.QFrame.HLine)
self.line_12.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_12.setObjectName("line_12")
self.line_13 = QtWidgets.QFrame(Dialog)
self.line_13.setGeometry(QtCore.QRect(200, 105, 351, 31))
self.line_13.setFrameShape(QtWidgets.QFrame.HLine)
self.line_13.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_13.setObjectName("line_13")
self.line_14 = QtWidgets.QFrame(Dialog)
self.line_14.setGeometry(QtCore.QRect(200, 400, 351, 21))
self.line_14.setFrameShape(QtWidgets.QFrame.HLine)
self.line_14.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_14.setObjectName("line_14")
self.line_15 = QtWidgets.QFrame(Dialog)
self.line_15.setGeometry(QtCore.QRect(190, 120, 16, 291))
self.line_15.setFrameShape(QtWidgets.QFrame.VLine)
self.line_15.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_15.setObjectName("line_15")
self.line_16 = QtWidgets.QFrame(Dialog)
self.line_16.setGeometry(QtCore.QRect(530, 120, 41, 291))
self.line_16.setFrameShape(QtWidgets.QFrame.VLine)
self.line_16.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_16.setObjectName("line_16")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "ID3"))
self.Photo.setText(_translate("Dialog", "TextLabel"))
self.label.setText(_translate("Dialog", "Original Image"))
self.label_4.setText(_translate("Dialog", "Prediction Result"))
self.label_2.setText(_translate("Dialog", "Classifier Used:"))
self.Namofclassifier.setText(_translate("Dialog", "TextLabel"))
self.label_3.setText(_translate("Dialog", "Type of Defect:"))
self.Typeofdefect.setText(_translate("Dialog", "TextLabel"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog4 = QtWidgets.QDialog()
ui = Ui_Dialog3()
ui.setupUi(Dialog4)
Dialog4.show()
sys.exit(app.exec_())
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,701
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/Training_Result_Window.py
|
#Impoting the GUI module
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog1(object):
#Method for setting up the UI
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(624, 633)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(40, 30, 511, 101))
font = QtGui.QFont()
font.setPointSize(14)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(60, 130, 521, 431))
self.widget.setObjectName("widget")
self.gridLayout = QtWidgets.QGridLayout(self.widget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label_4 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.lbglcmrf = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.lbglcmrf.setFont(font)
self.lbglcmrf.setAlignment(QtCore.Qt.AlignCenter)
self.lbglcmrf.setObjectName("lbglcmrf")
self.gridLayout.addWidget(self.lbglcmrf, 1, 1, 1, 1)
self.glcmxt = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.glcmxt.setFont(font)
self.glcmxt.setAlignment(QtCore.Qt.AlignCenter)
self.glcmxt.setObjectName("glcmxt")
self.gridLayout.addWidget(self.glcmxt, 2, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 6, 0, 1, 1)
self.glcmgb = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.glcmgb.setFont(font)
self.glcmgb.setAlignment(QtCore.Qt.AlignCenter)
self.glcmgb.setObjectName("glcmgb")
self.gridLayout.addWidget(self.glcmgb, 4, 1, 1, 1)
self.cnn = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.cnn.setFont(font)
self.cnn.setAlignment(QtCore.Qt.AlignCenter)
self.cnn.setObjectName("cnn")
self.gridLayout.addWidget(self.cnn, 6, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 3, 0, 1, 1)
self.lbglcmxt = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.lbglcmxt.setFont(font)
self.lbglcmxt.setAlignment(QtCore.Qt.AlignCenter)
self.lbglcmxt.setObjectName("lbglcmxt")
self.gridLayout.addWidget(self.lbglcmxt, 3, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 4, 0, 1, 1)
self.lbglcmgb = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.lbglcmgb.setFont(font)
self.lbglcmgb.setAlignment(QtCore.Qt.AlignCenter)
self.lbglcmgb.setObjectName("lbglcmgb")
self.gridLayout.addWidget(self.lbglcmgb, 5, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 5, 0, 1, 1)
self.glcmrf = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(10)
self.glcmrf.setFont(font)
self.glcmrf.setAlignment(QtCore.Qt.AlignCenter)
self.glcmrf.setObjectName("glcmrf")
self.gridLayout.addWidget(self.glcmrf, 0, 1, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "ID3"))
self.label.setText(_translate("Dialog", "Training Accuracy of Different Algorithms"))
self.label_4.setText(_translate("Dialog", "GLCM + Extra Trees Classifier"))
self.label_2.setText(_translate("Dialog", "GLCM + Random Forest"))
self.label_3.setText(_translate("Dialog", "LBGLCM + Random Forest"))
self.lbglcmrf.setText(_translate("Dialog", "TextLabel"))
self.glcmxt.setText(_translate("Dialog", "TextLabel"))
self.label_8.setText(_translate("Dialog", "CNN"))
self.glcmgb.setText(_translate("Dialog", "TextLabel"))
self.cnn.setText(_translate("Dialog", "TextLabel"))
self.label_7.setText(_translate("Dialog", "LBGLCM + Extra Trees Classifier"))
self.lbglcmxt.setText(_translate("Dialog", "TextLabel"))
self.label_6.setText(_translate("Dialog", "GLCM + Gradient Boosting"))
self.lbglcmgb.setText(_translate("Dialog", "TextLabel"))
self.label_5.setText(_translate("Dialog", "LBGLCM + Gradient Boosting"))
self.glcmrf.setText(_translate("Dialog", "TextLabel"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog1()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,702
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/Trained_Classifier_Predictions.py
|
#Importing .py files for GLCM and LBGLCM along with classifier
import GLCM_for_single_image, LBGLCM_for_single_image, Classifiers
#Importing numpy and keras
import numpy as np
from keras_preprocessing import image
#Extracting Features for single image
def extract(selected_classifier, directory_of_image):
if 'GLCM' in selected_classifier:
GLCM_feats = GLCM_for_single_image.extract_features(directory_of_image, angle= 0, dist= 1.25)
return GLCM_feats
else:
LBGLCM_feats = LBGLCM_for_single_image.extract_features(directory_of_image, angle=0, dist= 1.25, radius= 1.2)
return LBGLCM_feats
#Classifying the image using the selected classifier on the operator window
def classify(selected_classifier, directory_of_image,trained_classifiers, labels):
if selected_classifier == 'GLCM+Random Forest':
feat = extract(selected_classifier, directory_of_image)
Ans = Classifiers.pred(trained_classifiers[0], feat)
dict = labels[0]
return dict[Ans[0]]
if selected_classifier == "LBGLCM + Random Forest":
feat = extract(selected_classifier,directory_of_image)
Ans = Classifiers.pred(trained_classifiers[1], feat)
dict = labels[1]
return dict[Ans[0]]
if selected_classifier == "GLCM + Extra Trees Classifier":
feat = extract(selected_classifier,directory_of_image)
Ans = Classifiers.pred(trained_classifiers[2], feat)
dict = labels[2]
return dict[Ans[0]]
if selected_classifier == "LBGLCM + Extra Trees Classifier":
feat = extract(selected_classifier,directory_of_image)
Ans = Classifiers.pred(trained_classifiers[3], feat)
dict = labels[3]
return dict[Ans[0]]
if selected_classifier == "GLCM + Gradient Boosting Classifier":
feat = extract(selected_classifier,directory_of_image)
Ans = Classifiers.pred(trained_classifiers[4], feat)
dict = labels[4]
return dict[Ans[0]]
if selected_classifier == "LBGLCM + Gradient Boosting Classifier":
feat = extract(selected_classifier,directory_of_image)
Ans = Classifiers.pred(trained_classifiers[5], feat)
dict = labels[5]
return dict[Ans[0]]
if selected_classifier == 'Convolutional Neural Networks':
test_image = image.load_img(directory_of_image, target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
test_image /= 255.
Ans = Classifiers.pred(trained_classifiers[6], test_image)
final_ans = Ans[0]
dict = {}
dict[0] = 'Crazing'
dict[1] = 'Inclusion'
dict[2] = 'Patches'
dict[3] = 'Pitted Surface'
dict[4] = 'RS'
dict[5] = 'Scratch'
return dict[np.argmax(final_ans)]
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,703
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/Training_Window.py
|
#importing modules
import os
#import GUI modules
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QSize
from PyQt5.QtGui import QIcon, QImage, QPalette, QBrush
from PyQt5.QtWidgets import QFileDialog
#importing files for classifiers and feature extraction methods
import Classifiers
import GLCM
import LBGLCM
#importing other GUIs (testing window and training-result window
from Operator_Window import Ui_Dialog2
from Training_Result_Window import Ui_Dialog1
#declaring variables which would be used to collect accuracies of different algorithms, classifiers and image labels
accuracies = []
all_classifiers = []
labels = []
class Ui_Dialog(object):
#Method for opening training results window
def opentrainresults(self):
global accuracies
self.window = QtWidgets.QDialog()
self.ui = Ui_Dialog1()
self.ui.setupUi(self.window)
self.ui.glcmrf.setText(str(accuracies[0]))
self.ui.lbglcmrf.setText(str(accuracies[1]))
self.ui.glcmxt.setText(str(accuracies[2]))
self.ui.lbglcmxt.setText(str(accuracies[3]))
self.ui.glcmgb.setText(str(accuracies[4]))
self.ui.lbglcmgb.setText(str(accuracies[5]))
self.ui.cnn.setText(str(accuracies[6]))
self.window.show()
#Method for oepning operator window
def operatorwindow(self):
self.window = QtWidgets.QDialog()
self.ui = Ui_Dialog2()
self.ui.setupUi(self.window)
self.window.show()
Ui_Dialog2.getclf(Ui_Dialog2, all_classifiers)
Ui_Dialog2.getlabels(Ui_Dialog2, labels)
#Defining setup and other methods
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1140, 766)
self.label_9 = QtWidgets.QLabel(Dialog)
self.label_9.setGeometry(QtCore.QRect(30, 80, 221, 41))
font = QtGui.QFont()
font.setPointSize(16)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(Dialog)
self.label_10.setGeometry(QtCore.QRect(30, 280, 221, 41))
font = QtGui.QFont()
font.setPointSize(16)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
#Setting button actions for Random Forest
self.TrainRF = QtWidgets.QPushButton(Dialog)
self.TrainRF.setGeometry(QtCore.QRect(140, 460, 93, 28))
self.TrainRF.setAutoDefault(False)
self.TrainRF.setObjectName("TrainRF")
self.TrainRF.clicked.connect(self.RandomTrees_GLCM)
self.TrainRF.clicked.connect(self.RandomTrees_LBGLCM)
#Setting button actions for Extra Trees Classifiers
self.TrainXtra = QtWidgets.QPushButton(Dialog)
self.TrainXtra.setGeometry(QtCore.QRect(510, 460, 121, 31))
self.TrainXtra.setAutoDefault(False)
self.TrainXtra.setObjectName("TrainXtra")
self.TrainXtra.clicked.connect(self.ExtraTrees_GLCM)
self.TrainXtra.clicked.connect(self.ExtraTrees_LBGLCM)
#Setting button actions for Gradient Boosting
self.TrainGB = QtWidgets.QPushButton(Dialog)
self.TrainGB.setGeometry(QtCore.QRect(890, 470, 93, 28))
self.TrainGB.setAutoDefault(False)
self.TrainGB.setObjectName("TrainGB")
self.TrainGB.clicked.connect(self.GB_GLCM)
self.TrainGB.clicked.connect(self.GB_LBGLCM)
#Setting button actions for displaying training result
self.displaytrainres = QtWidgets.QPushButton(Dialog)
self.displaytrainres.setGeometry(QtCore.QRect(480, 630, 181, 51))
self.displaytrainres.setStyleSheet("background-color: rgb(252, 1, 7);")
self.displaytrainres.setAutoDefault(False)
self.displaytrainres.setObjectName("displaytrainres")
self.displaytrainres.clicked.connect(self.opentrainresults)
#Setting button actions for proceeding to operator window
self.Proceedtoclass = QtWidgets.QPushButton(Dialog)
self.Proceedtoclass.setGeometry(QtCore.QRect(870, 630, 221, 51))
self.Proceedtoclass.setStyleSheet("background-color: rgb(51, 153, 102);")
self.Proceedtoclass.setAutoDefault(False)
self.Proceedtoclass.setObjectName("Proceedtoclass")
self.Proceedtoclass.clicked.connect(self.operatorwindow)
#Setting up the layouts
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(240, 30, 581, 41))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.FileLocation = QtWidgets.QLineEdit(self.layoutWidget)
self.FileLocation.setObjectName("FileLocation")
self.horizontalLayout.addWidget(self.FileLocation)
self.Browse = QtWidgets.QPushButton(self.layoutWidget)
self.Browse.setAutoDefault(True)
self.Browse.setObjectName("Browse")
self.horizontalLayout.addWidget(self.Browse)
self.layoutWidget1 = QtWidgets.QWidget(Dialog)
self.layoutWidget1.setGeometry(QtCore.QRect(50, 120, 291, 131))
self.layoutWidget1.setObjectName("layoutWidget1")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget1)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.AngleforGLCM = QtWidgets.QLineEdit(self.layoutWidget1)
self.AngleforGLCM.setObjectName("AngleforGLCM")
self.gridLayout.addWidget(self.AngleforGLCM, 1, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 2)
self.label_3 = QtWidgets.QLabel(self.layoutWidget1)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.layoutWidget1)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.DistanceforGLCM = QtWidgets.QLineEdit(self.layoutWidget1)
self.DistanceforGLCM.setObjectName("DistanceforGLCM")
self.gridLayout.addWidget(self.DistanceforGLCM, 2, 1, 1, 1)
self.layoutWidget2 = QtWidgets.QWidget(Dialog)
self.layoutWidget2.setGeometry(QtCore.QRect(690, 120, 401, 121))
self.layoutWidget2.setObjectName("layoutWidget2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.layoutWidget2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.AngleforLBGLCM = QtWidgets.QLineEdit(self.layoutWidget2)
self.AngleforLBGLCM.setObjectName("AngleforLBGLCM")
self.gridLayout_2.addWidget(self.AngleforLBGLCM, 2, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(self.layoutWidget2)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 1, 0, 1, 1)
self.label_8 = QtWidgets.QLabel(self.layoutWidget2)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 1, 2, 1, 1)
self.RadiusforLBGLCM = QtWidgets.QLineEdit(self.layoutWidget2)
self.RadiusforLBGLCM.setObjectName("RadiusforLBGLCM")
self.gridLayout_2.addWidget(self.RadiusforLBGLCM, 1, 1, 1, 1)
self.DistanceforLBGLCM = QtWidgets.QLineEdit(self.layoutWidget2)
self.DistanceforLBGLCM.setObjectName("DistanceforLBGLCM")
self.gridLayout_2.addWidget(self.DistanceforLBGLCM, 1, 3, 1, 1)
self.label_7 = QtWidgets.QLabel(self.layoutWidget2)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, 2, 0, 1, 1)
self.label_5 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 0, 1, 1, 1)
self.layoutWidget3 = QtWidgets.QWidget(Dialog)
self.layoutWidget3.setGeometry(QtCore.QRect(50, 340, 291, 111))
self.layoutWidget3.setObjectName("layoutWidget3")
self.gridLayout_3 = QtWidgets.QGridLayout(self.layoutWidget3)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_13 = QtWidgets.QLabel(self.layoutWidget3)
font = QtGui.QFont()
font.setPointSize(9)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.gridLayout_3.addWidget(self.label_13, 2, 0, 1, 1)
self.notreesRF = QtWidgets.QLineEdit(self.layoutWidget3)
self.notreesRF.setObjectName("notreesRF")
self.gridLayout_3.addWidget(self.notreesRF, 1, 1, 1, 1)
self.FeaturesRF = QtWidgets.QComboBox(self.layoutWidget3)
self.FeaturesRF.setFrame(True)
self.FeaturesRF.setObjectName("FeaturesRF")
self.FeaturesRF.addItem("")
self.FeaturesRF.addItem("")
self.FeaturesRF.addItem("")
self.gridLayout_3.addWidget(self.FeaturesRF, 2, 1, 1, 1)
self.label_12 = QtWidgets.QLabel(self.layoutWidget3)
font = QtGui.QFont()
font.setPointSize(9)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.gridLayout_3.addWidget(self.label_12, 1, 0, 1, 1)
self.label_11 = QtWidgets.QLabel(self.layoutWidget3)
font = QtGui.QFont()
font.setPointSize(12)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.gridLayout_3.addWidget(self.label_11, 0, 0, 1, 2)
self.layoutWidget4 = QtWidgets.QWidget(Dialog)
self.layoutWidget4.setGeometry(QtCore.QRect(420, 340, 291, 111))
self.layoutWidget4.setObjectName("layoutWidget4")
self.gridLayout_4 = QtWidgets.QGridLayout(self.layoutWidget4)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.NotreesXtra = QtWidgets.QLineEdit(self.layoutWidget4)
self.NotreesXtra.setObjectName("NotreesXtra")
self.gridLayout_4.addWidget(self.NotreesXtra, 1, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.layoutWidget4)
font = QtGui.QFont()
font.setPointSize(9)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.gridLayout_4.addWidget(self.label_15, 1, 0, 1, 1)
self.FeaturesXtra = QtWidgets.QComboBox(self.layoutWidget4)
self.FeaturesXtra.setObjectName("FeaturesXtra")
self.FeaturesXtra.addItem("")
self.FeaturesXtra.addItem("")
self.FeaturesXtra.addItem("")
self.gridLayout_4.addWidget(self.FeaturesXtra, 2, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.layoutWidget4)
font = QtGui.QFont()
font.setPointSize(9)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.gridLayout_4.addWidget(self.label_16, 2, 0, 1, 1)
self.label_14 = QtWidgets.QLabel(self.layoutWidget4)
font = QtGui.QFont()
font.setPointSize(12)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.gridLayout_4.addWidget(self.label_14, 0, 0, 1, 2)
self.layoutWidget5 = QtWidgets.QWidget(Dialog)
self.layoutWidget5.setGeometry(QtCore.QRect(790, 340, 301, 121))
self.layoutWidget5.setObjectName("layoutWidget5")
self.gridLayout_6 = QtWidgets.QGridLayout(self.layoutWidget5)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.Estimators_gb = QtWidgets.QLineEdit(self.layoutWidget5)
self.Estimators_gb.setObjectName("Estimators_gb")
self.gridLayout_5.addWidget(self.Estimators_gb, 1, 1, 1, 1)
self.label_18 = QtWidgets.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setPointSize(9)
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.gridLayout_5.addWidget(self.label_18, 1, 0, 1, 1)
self.Features_gb = QtWidgets.QComboBox(self.layoutWidget5)
self.Features_gb.setObjectName("Features_gb")
self.Features_gb.addItem("")
self.Features_gb.addItem("")
self.Features_gb.addItem("")
self.gridLayout_5.addWidget(self.Features_gb, 2, 1, 1, 1)
self.label_19 = QtWidgets.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setPointSize(9)
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
self.gridLayout_5.addWidget(self.label_19, 2, 0, 1, 1)
self.label_17 = QtWidgets.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setPointSize(12)
self.label_17.setFont(font)
self.label_17.setObjectName("label_17")
self.gridLayout_5.addWidget(self.label_17, 0, 0, 1, 2)
self.gridLayout_6.addLayout(self.gridLayout_5, 0, 0, 1, 2)
self.label_20 = QtWidgets.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setPointSize(9)
self.label_20.setFont(font)
self.label_20.setObjectName("label_20")
self.gridLayout_6.addWidget(self.label_20, 1, 0, 1, 1)
self.lineEdit_4 = QtWidgets.QLineEdit(self.layoutWidget5)
self.lineEdit_4.setObjectName("lineEdit_4")
self.gridLayout_6.addWidget(self.lineEdit_4, 1, 1, 1, 1)
self.Train_CNN = QtWidgets.QPushButton(Dialog)
self.Train_CNN.setGeometry(QtCore.QRect(40, 710, 93, 28))
self.Train_CNN.setAutoDefault(False)
self.Train_CNN.setObjectName("Train_CNN")
self.layoutWidget6 = QtWidgets.QWidget(Dialog)
self.layoutWidget6.setGeometry(QtCore.QRect(40, 530, 291, 171))
self.layoutWidget6.setObjectName("layoutWidget6")
self.gridLayout_7 = QtWidgets.QGridLayout(self.layoutWidget6)
self.gridLayout_7.setContentsMargins(0, 0, 0, 0)
self.gridLayout_7.setObjectName("gridLayout_7")
self.label_21 = QtWidgets.QLabel(self.layoutWidget6)
font = QtGui.QFont()
font.setPointSize(12)
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.gridLayout_7.addWidget(self.label_21, 0, 0, 1, 2)
self.label_22 = QtWidgets.QLabel(self.layoutWidget6)
font = QtGui.QFont()
font.setPointSize(9)
self.label_22.setFont(font)
self.label_22.setObjectName("label_22")
self.gridLayout_7.addWidget(self.label_22, 1, 0, 1, 1)
self.epochs = QtWidgets.QLineEdit(self.layoutWidget6)
self.epochs.setObjectName("epochs")
self.gridLayout_7.addWidget(self.epochs, 1, 1, 1, 1)
self.label_23 = QtWidgets.QLabel(self.layoutWidget6)
font = QtGui.QFont()
font.setPointSize(9)
self.label_23.setFont(font)
self.label_23.setObjectName("label_23")
self.gridLayout_7.addWidget(self.label_23, 2, 0, 1, 1)
self.validation_split = QtWidgets.QLineEdit(self.layoutWidget6)
self.validation_split.setObjectName("validation_split")
self.gridLayout_7.addWidget(self.validation_split, 2, 1, 1, 1)
self.Pretrainmodel = QtWidgets.QPushButton(Dialog)
self.Pretrainmodel.setGeometry(QtCore.QRect(190, 710, 141, 31))
self.Pretrainmodel.setAutoDefault(False)
self.Pretrainmodel.setObjectName("Pretrainmodel")
self.topBrowseHorizLine = QtWidgets.QFrame(Dialog)
self.topBrowseHorizLine.setGeometry(QtCore.QRect(20, 10, 1101, 21))
self.topBrowseHorizLine.setFrameShape(QtWidgets.QFrame.HLine)
self.topBrowseHorizLine.setFrameShadow(QtWidgets.QFrame.Sunken)
self.topBrowseHorizLine.setObjectName("topBrowseHorizLine")
self.bottomBrowseHorizLine = QtWidgets.QFrame(Dialog)
self.bottomBrowseHorizLine.setGeometry(QtCore.QRect(20, 70, 1101, 21))
self.bottomBrowseHorizLine.setFrameShape(QtWidgets.QFrame.HLine)
self.bottomBrowseHorizLine.setFrameShadow(QtWidgets.QFrame.Sunken)
self.bottomBrowseHorizLine.setObjectName("bottomBrowseHorizLine")
self.bottomFEHorizLine = QtWidgets.QFrame(Dialog)
self.bottomFEHorizLine.setGeometry(QtCore.QRect(20, 270, 1101, 16))
self.bottomFEHorizLine.setFrameShape(QtWidgets.QFrame.HLine)
self.bottomFEHorizLine.setFrameShadow(QtWidgets.QFrame.Sunken)
self.bottomFEHorizLine.setObjectName("bottomFEHorizLine")
self.bottomClassifierHorizLine = QtWidgets.QFrame(Dialog)
self.bottomClassifierHorizLine.setGeometry(QtCore.QRect(20, 500, 1101, 21))
self.bottomClassifierHorizLine.setFrameShape(QtWidgets.QFrame.HLine)
self.bottomClassifierHorizLine.setFrameShadow(QtWidgets.QFrame.Sunken)
self.bottomClassifierHorizLine.setObjectName("bottomClassifierHorizLine")
self.windowLeftVertLine = QtWidgets.QFrame(Dialog)
self.windowLeftVertLine.setGeometry(QtCore.QRect(3, 20, 31, 731))
self.windowLeftVertLine.setFrameShape(QtWidgets.QFrame.VLine)
self.windowLeftVertLine.setFrameShadow(QtWidgets.QFrame.Sunken)
self.windowLeftVertLine.setObjectName("windowLeftVertLine")
self.windowRightVertLine = QtWidgets.QFrame(Dialog)
self.windowRightVertLine.setGeometry(QtCore.QRect(1100, 20, 41, 731))
self.windowRightVertLine.setFrameShape(QtWidgets.QFrame.VLine)
self.windowRightVertLine.setFrameShadow(QtWidgets.QFrame.Sunken)
self.windowRightVertLine.setObjectName("windowRightVertLine")
self.bottomCNNHorizLine = QtWidgets.QFrame(Dialog)
self.bottomCNNHorizLine.setGeometry(QtCore.QRect(20, 740, 1101, 21))
self.bottomCNNHorizLine.setFrameShape(QtWidgets.QFrame.HLine)
self.bottomCNNHorizLine.setFrameShadow(QtWidgets.QFrame.Sunken)
self.bottomCNNHorizLine.setObjectName("bottomCNNHorizLine")
self.retranslateUi(Dialog)
self.Browse.clicked.connect(self.browseSlot)
QtCore.QMetaObject.connectSlotsByName(Dialog)
#Setting button actions for CNN model
self.Pretrainmodel.clicked.connect(self.load_pretrained_model)
self.Train_CNN.clicked.connect(self.CNN)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
#Setting up the logo for GUI
Dialog.setWindowTitle(_translate("Dialog", "ID3"))
Dialog.setWindowIcon(QtGui.QIcon('logo.png'))
#Setting other text box labels
self.label_9.setText(_translate("Dialog", "Feature Extraction"))
self.label_10.setText(_translate("Dialog", "Classifiers"))
self.TrainRF.setText(_translate("Dialog", "Train RF"))
self.TrainXtra.setText(_translate("Dialog", "Train XtraTrees"))
self.TrainGB.setText(_translate("Dialog", "Train GB"))
self.displaytrainres.setText(_translate("Dialog", "Display Validation Accuracy"))
self.Proceedtoclass.setText(_translate("Dialog", "Proceed to Classification"))
self.label.setText(_translate("Dialog", "Dataset Location"))
self.Browse.setText(_translate("Dialog", "Browse"))
self.label_2.setText(_translate("Dialog", "GLCM"))
self.label_3.setText(_translate("Dialog", "Angle"))
self.label_4.setText(_translate("Dialog", "Distance"))
self.label_5.setText(_translate("Dialog", "LBGLCM"))
self.label_6.setText(_translate("Dialog", "Radius"))
self.label_8.setText(_translate("Dialog", "Distance"))
self.label_7.setText(_translate("Dialog", "Angle"))
self.label_11.setText(_translate("Dialog", "Random Forest"))
self.label_12.setText(_translate("Dialog", "No. of Trees"))
self.label_13.setText(_translate("Dialog", "Max_Features"))
self.FeaturesRF.setItemText(0, _translate("Dialog", "auto"))
self.FeaturesRF.setItemText(1, _translate("Dialog", "sqrt"))
self.FeaturesRF.setItemText(2, _translate("Dialog", "log2"))
self.label_14.setText(_translate("Dialog", "Extra Trees Classifier"))
self.label_15.setText(_translate("Dialog", "No. of Trees"))
self.label_16.setText(_translate("Dialog", "Max_Features"))
self.FeaturesXtra.setItemText(0, _translate("Dialog", "auto"))
self.FeaturesXtra.setItemText(1, _translate("Dialog", "sqrt"))
self.FeaturesXtra.setItemText(2, _translate("Dialog", "log2"))
self.label_18.setText(_translate("Dialog", "No. of est "))
self.label_19.setText(_translate("Dialog", "Max_Features"))
self.Features_gb.setItemText(0, _translate("Dialog", "auto"))
self.Features_gb.setItemText(1, _translate("Dialog", "sqrt"))
self.Features_gb.setItemText(2, _translate("Dialog", "log2"))
self.label_17.setText(_translate("Dialog", "Gradient Boosting"))
self.label_20.setText(_translate("Dialog", "Learning Rate"))
self.Train_CNN.setText(_translate("Dialog", "Train CNN"))
self.label_21.setText(_translate("Dialog", "Convolutional Neural Networks"))
self.label_22.setText(_translate("Dialog", "Epochs"))
self.label_23.setText(_translate("Dialog", "Validation Split"))
self.Pretrainmodel.setText(_translate("Dialog", "Pre-trained Model"))
#Method for browse button
def browseSlot(self):
folder_path = str(QFileDialog.getExistingDirectory())
self.FileLocation.setText(folder_path)
#Method for computing GLCM
def compute_GLCM(self):
ang_glcm = self.AngleforGLCM.text()
dist_glcm = self.DistanceforGLCM.text()
loc_glcm = self.FileLocation.text()
glcm_feat = GLCM.extract_features(loc_glcm, dist_glcm, ang_glcm)
return glcm_feat
#Method for computing LBGLCM
def compute_LBGLCM(self):
ang_lbglcm = self.AngleforLBGLCM.text()
dist_lbglcm = self.DistanceforLBGLCM.text()
loc_lbglcm = self.FileLocation.text()
rad_lbglcm = int(self.RadiusforLBGLCM.text())
lbglcm_feat = LBGLCM.extract_features(loc_lbglcm, dist_lbglcm, ang_lbglcm, rad_lbglcm)
return lbglcm_feat
#Method for training Random Forest with GLCM
def RandomTrees_GLCM(self):
global accuracies, all_classifiers, labels
glcm_feat = self.compute_GLCM()
n_trees = self.notreesRF.text()
max_feats = self.FeaturesRF.currentText()
clf, x_rf1, y_rf1, dict1 = Classifiers.RF_train(glcm_feat, n_trees, max_feats) #Collecting the trained classifier, x_test, y_test and labels
Y_pred_rf1 = Classifiers.pred(clf, x_rf1) #Predicting the x_test labels
acc_test = Classifiers.display_results(Y_pred_rf1, y_rf1) #accuracy of prediction
all_classifiers.append(clf)
accuracies.append(acc_test)
labels.append(dict1)
#Method for training Random Forest with LBGLCM
def RandomTrees_LBGLCM(self):
global accuracies, all_classifiers, labels
lbglcm_feat = self.compute_LBGLCM()
n_trees = self.notreesRF.text()
max_feats = self.FeaturesRF.currentText()
clf, x_rf2, y_rf2, dict2 = Classifiers.RF_train(lbglcm_feat, n_trees, max_feats)#Collecting the trained classifier, x_test, y_test and labels
Y_pred_rf2 = Classifiers.pred(clf, x_rf2)
acc_test = Classifiers.display_results(Y_pred_rf2, y_rf2)
all_classifiers.append(clf)
accuracies.append(acc_test)
labels.append(dict2)
#Method for training Extra Trees Classifiers with GLCM
def ExtraTrees_GLCM(self):
global accuracies, all_classifiers, labels
glcm_feat = self.compute_GLCM()
n_trees = int(self.NotreesXtra.text())
max_feats = self.FeaturesXtra.currentText()
clf, x_x1, y_x1, dict3 = Classifiers.Xtra(glcm_feat, n_trees, max_feats)#Collecting the trained classifier, x_test, y_test and labels
Y_pred_x1 = Classifiers.pred(clf, x_x1)
acc_test = Classifiers.display_results(Y_pred_x1, y_x1)
all_classifiers.append(clf)
accuracies.append(acc_test)
labels.append(dict3)
#Method for training Extra Trees Classifiers with LBGLCM
def ExtraTrees_LBGLCM(self):
global accuracies, all_classifiers, labels
lbglcm_feat = self.compute_LBGLCM()
n_trees = int(self.NotreesXtra.text())
max_feats = self.FeaturesXtra.currentText()
clf, x_x2, y_x2, dict4 = Classifiers.Xtra(lbglcm_feat, n_trees, max_feats)#Collecting the trained classifier, x_test, y_test and labels
Y_pred_x2 = Classifiers.pred(clf, x_x2)
acc_test = Classifiers.display_results(Y_pred_x2, y_x2)
all_classifiers.append(clf)
accuracies.append(acc_test)
labels.append(dict4)
#Method for training Gradient Boosting with GLCM
def GB_GLCM(self):
global accuracies, all_classifiers
glcm_feat = self.compute_GLCM()
n_est = int(self.Estimators_gb.text())
max_feats = self.Features_gb.currentText()
Lrate = float(self.lineEdit_4.text())
clf, x_g1, y_g1, dict5 = Classifiers.GB(glcm_feat, n_est, max_feats, Lrate)#Collecting the trained classifier, x_test, y_test and labels
Y_pred_gb1 = Classifiers.pred(clf, x_g1)
acc_test = Classifiers.display_results(Y_pred_gb1, y_g1)
all_classifiers.append(clf)
accuracies.append(acc_test)
labels.append(dict5)
#Method for training Gradient Boosting with LBGLCM
def GB_LBGLCM(self):
global accuracies, all_classifiers
lbglcm_feat = self.compute_LBGLCM()
n_est = int(self.Estimators_gb.text())
max_feats = self.Features_gb.currentText()
Lrate = float(self.lineEdit_4.text())
clf, x_g2, y_g2, dict6 = Classifiers.GB(lbglcm_feat, n_est, max_feats, Lrate)#Collecting the trained classifier, x_test, y_test and labels
Y_pred_g2 = Classifiers.pred(clf, x_g2)
acc_test = Classifiers.display_results(Y_pred_g2, y_g2)
all_classifiers.append(clf)
accuracies.append(acc_test)
labels.append(dict6)
#Method for training CNN
def CNN(self):
global accuracies, all_classifiers
epoch = int(self.epochs.text())
dataset_loc = self.FileLocation.text()
val_split = float(self.validation_split.text())
accuracy, clf, val_datagen = Classifiers.CNN(dataset_loc, epoch, val_split)#Collecting the test accuracy, trained classifier and y_test
accuracies.append(accuracy[0])
all_classifiers.append(clf)
#Method for loading pretrained model of CNN
def load_pretrained_model(self):
global accuracies, all_classifiers
acc1, clf = Classifiers.pretrained_CNN(self.FileLocation.text())#Collecting accuracy and the trained classifier
accuracies.append(acc1)
all_classifiers.append(clf)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
path = os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'appLogo-1.png')
app.setWindowIcon(QIcon(path))
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,704
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/Operator_Window.py
|
#Loading Modules for GUI
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog, QWidget
#Loading Trained Classifiers
import Trained_Classifier_Predictions
#Loading the Final results window
from Final_Results_Window import Ui_Dialog3
#declaring variables to be used for storing the name of defect, directory of image under consideration, classifier selected, trained classifiers and labels
defect_name = None
directory_of_image = None
classifier_selected = None
trained_classifiers = []
labels_for_classifiers = []
class Ui_Dialog2(object):
#Method for displaying final classification result
def finalresults(self):
self.window = QtWidgets.QDialog()
self.ui = Ui_Dialog3()
self.ui.setupUi(self.window)
self.ui.Namofclassifier.setText(classifier_selected)
self.ui.Typeofdefect.setText(defect_name)
pixmap = QtGui.QPixmap(directory_of_image)
self.ui.Photo.setPixmap(pixmap.scaled(192, 192))
self.window.show()
#Method for setting up the UI
def setupUi(self, Dialog2):
Dialog2.setObjectName("Dialog2")
Dialog2.resize(592, 400)
self.comboBox = QtWidgets.QComboBox(Dialog2)
self.comboBox.setGeometry(QtCore.QRect(150, 130, 291, 61))
font = QtGui.QFont()
font.setPointSize(12)
self.comboBox.setFont(font)
self.comboBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
#Setting up button action for classifying image
self.Classify = QtWidgets.QPushButton(Dialog2)
self.Classify.setGeometry(QtCore.QRect(230, 270, 121, 41))
self.Classify.setObjectName("Classify")
self.Classify.clicked.connect(self.Classifies)
self.Classify.clicked.connect(self.finalresults)
#Setting up layout
self.layoutWidget = QtWidgets.QWidget(Dialog2)
self.layoutWidget.setGeometry(QtCore.QRect(30, 30, 531, 51))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.Imageloc = QtWidgets.QLineEdit(self.layoutWidget)
self.Imageloc.setObjectName("Imageloc")
self.gridLayout.addWidget(self.Imageloc, 0, 1, 1, 1)
self.Browseforimage = QtWidgets.QPushButton(self.layoutWidget)
self.Browseforimage.setObjectName("Browseforimage")
self.Browseforimage.clicked.connect(self.openImage)
self.gridLayout.addWidget(self.Browseforimage, 0, 2, 1, 1)
self.retranslateUi(Dialog2)
QtCore.QMetaObject.connectSlotsByName(Dialog2)
def retranslateUi(self, Dialog2):
_translate = QtCore.QCoreApplication.translate
Dialog2.setWindowTitle(_translate("Dialog2", "ID3"))
self.comboBox.setItemText(0, _translate("Dialog2", "GLCM+Random Forest"))
self.comboBox.setItemText(1, _translate("Dialog2", "LBGLCM + Random Forest"))
self.comboBox.setItemText(2, _translate("Dialog2", "GLCM + Extra Trees Classifier"))
self.comboBox.setItemText(3, _translate("Dialog2", "LBGLCM + Extra Trees Classifier"))
self.comboBox.setItemText(4, _translate("Dialog2", "GLCM + Gradient Boosting Classifier"))
self.comboBox.setItemText(5, _translate("Dialog2", "LBGLCM + Gradient Boosting Classifier"))
self.comboBox.setItemText(6, _translate("Dialog2", "Convolutional Neural Networks"))
self.Classify.setText(_translate("Dialog2", "Classify"))
self.label.setText(_translate("Dialog2", "Image Location:"))
self.Browseforimage.setText(_translate("Dialog2", "Browse"))
#Setting up the browse button
def openImage(self):
folder_path = QFileDialog.getOpenFileNames()
self.Imageloc.setText(str(folder_path[0][0]))
#Collecting trained classifiers from Training Window
def getclf(self, clf):
global trained_classifiers
trained_classifiers = clf
#Collecting labels associated with each classifier
def getlabels(self, labels):
global labels_for_classifiers
labels_for_classifiers = labels
#Classifying the image
def Classifies(self):
global trained_classifiers, defect_name, directory_of_image, classifier_selected, labels_for_classifiers
classifier_selected = self.comboBox.currentText()
directory_of_image = self.Imageloc.text()
defect_name = Trained_Classifier_Predictions.classify(classifier_selected, directory_of_image, trained_classifiers, labels_for_classifiers)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog2 = QtWidgets.QDialog()
ui = Ui_Dialog2()
ui.setupUi(Dialog2)
Dialog2.show()
sys.exit(app.exec_())
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,705
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/Classifiers.py
|
#importing os module
import os
#importing numpy and pandas for computation and storage
import numpy as np
import pandas as pd
#keras for CNN
from keras.layers import Dense, Conv2D, Flatten, MaxPooling2D
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
#importing modules for supervised learning algorithms
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
#importing module for computing accuracy and splitting dataset
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
#function to capture labels of images (defactorizing labels post classification)
def keep_dict(Y_codes, Y_unique):
dict = {}
j = 0
for i in range(len(Y_codes)):
if Y_codes[i] in dict:
continue
else:
dict[Y_codes[i]] = Y_unique[j]
j += 1
return dict
#Random Forest Classifier
def RF_train(feat, n_trees, max_feat):
Y = feat.pop('type')
X = feat
Y_codes, Y_unique = pd.factorize(Y) #factorizing labels
dict1 = keep_dict(Y_codes, Y_unique)
# Make training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(X, Y_codes, test_size=0.25, random_state=42)
# classify using Random Forest
clf = RandomForestClassifier(n_estimators=int(n_trees), n_jobs=-1, random_state=25, max_features=str(max_feat),
max_leaf_nodes=1500, oob_score=True, max_depth=None, min_samples_leaf=1)
#fitting data using the classifier
clf.fit(X_train, y_train)
return clf, X_test, y_test, dict1
#Extra Trees Classifier
def Xtra(feat, n_trees, max_feat):
Y = feat.pop('type')
X = feat
Y_codes, Y_unique = pd.factorize(Y) #factorizing labels
dict2 = keep_dict(Y_codes, Y_unique)
# Make training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(X, Y_codes, test_size=0.25, random_state=42)
# classify using Extra Trees Classifier
clf = ExtraTreesClassifier(n_estimators=n_trees, n_jobs=-1, random_state=0, max_leaf_nodes=1500,
max_features=str(max_feat), oob_score=True, max_depth=15, min_samples_leaf=1,
bootstrap=True)
#fitting data using classifier
clf.fit(X_train, y_train)
return clf, X_test, y_test, dict2
#Gradient Boosting Classifier
def GB(feat, n_est, max_feat, lrate):
Y = feat.pop('type')
X = feat
Y_codes, Y_unique = pd.factorize(Y) #factorizing labels
dict3 = keep_dict(Y_codes, Y_unique)
# Make training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(X, Y_codes, test_size=0.25, random_state=42)
#classify using GB
gb = GradientBoostingClassifier(loss='deviance', n_estimators=n_est, learning_rate=float(lrate),
max_features=str(max_feat), max_depth=None, max_leaf_nodes=81, random_state=9,
subsample=0.5)
#fitting data using classifier
gb.fit(X_train, y_train)
return gb, X_test, y_test, dict3
#Convolutional Neural Networks
def CNN(dataset_loc, epoch, val_split):
#Creating the model
def create_model():
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(6, activation='softmax')])
#Compiling Model using optimizer and loss functions
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
return model
#Defining class labels
class_labels = np.array(['Crazing', 'Inclusion', 'Patches', 'Pitted Surface', 'RS', 'Scratch'])
#Setting up directory and validation split for the dataset
data_dir = dataset_loc
val_split = val_split
dataset_image_generator = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True, vertical_flip=True,
validation_split=val_split)
#Accessing directories to get images
data_Cr_dir = os.path.join(data_dir, 'Crazing') # directory with our Cr defect pictures
data_In_dir = os.path.join(data_dir, 'Inclusion') # directory with our In defect pictures
data_Pa_dir = os.path.join(data_dir, 'Patches') # directory with our Pa defect pictures
data_Ps_dir = os.path.join(data_dir, 'Pitted Surface') # directory with our Ps defect pictures
data_Rs_dir = os.path.join(data_dir, 'RS') # directory with our Rs pictures
data_Sc_dir = os.path.join(data_dir, 'Scratch') # directory with our Sc defect pictures
#Setting up batch size and image parameters
batch_size_train = 600
batch_size_test = 400
epochs = epoch
IMG_HEIGHT = 64
IMG_WIDTH = 64
#Generating training and test dataset
train_data_gen = dataset_image_generator.flow_from_directory(batch_size=batch_size_train, directory=data_dir,
subset="training", shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical')
val_data_gen = dataset_image_generator.flow_from_directory(batch_size=batch_size_test, directory=data_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical', subset="validation")
model = create_model()
# ******for saving model if necessary******
# filepath = "D:/Work/Academics/AME 505-Engineering Information Modelling/Project/CNN.h5"
# model.save(filepath, overwrite=True, include_optimizer=True)
#Generating history of the model and fitting dataset
history = model.fit(
train_data_gen,
steps_per_epoch=batch_size_train,
epochs=epochs,
validation_data=val_data_gen, validation_steps=batch_size_test)
#Getting validation accuracy
val_acc = history.history['val_accuracy']
return val_acc, model, val_data_gen
#Load Pretrained CNN Model
def pretrained_CNN(data_dir):
#Defining Class Labels
class_labels = np.array(['Crazing', 'Inclusion', 'Patches', 'Pitted Surface', 'RS', 'Scratch'])
# give validation split here
val_split = 0.2
#Training and test data generation with needed batch size
dataset_image_generator = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True, vertical_flip=True,
validation_split=val_split)
data_Cr_dir = os.path.join(data_dir, 'Crazing') # directory with our Cr defect pictures
data_In_dir = os.path.join(data_dir, 'Inclusion') # directory with our In defect pictures
data_Pa_dir = os.path.join(data_dir, 'Patches') # directory with our Pa defect pictures
data_Ps_dir = os.path.join(data_dir, 'Pitted Surface') # directory with our Ps defect pictures
data_Rs_dir = os.path.join(data_dir, 'RS') # directory with our Rs pictures
data_Sc_dir = os.path.join(data_dir, 'Scratch') # directory with our Sc defect pictures
batch_size_train = 600
batch_size_test = 400
IMG_HEIGHT = 64
IMG_WIDTH = 64
#Creating a model
def create_model():
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(6, activation='softmax')])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
return model
train_data_gen = dataset_image_generator.flow_from_directory(batch_size=batch_size_train, directory=data_dir,
subset="training", shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical')
val_data_gen = dataset_image_generator.flow_from_directory(batch_size=batch_size_test, directory=data_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical', subset="validation")
#Model creation to load the checkpoint
new_model = create_model()
#*************************Loading Checkpoint Path***********************************#
check_path = "/Users/Shaz/Google Drive/AME505Project/AME 505_Final/cp2.ckpt" #Need to specify the .ckpt file location
#Loading weights from the checkpoint
new_model.load_weights(check_path)
#Getting loss and accuracy values
loss, acc = new_model.evaluate(val_data_gen)
return acc, new_model
#Predicting a new image or dataset of images
def pred(clf, X_test):
Y_pred = clf.predict(X_test)
return Y_pred
#Displaying the validation accuracy of an algorithm
def display_results(Y_pred, y_test):
return accuracy_score(y_test, Y_pred)
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,706
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/LBGLCM_for_single_image.py
|
#importing pillow module for images
from PIL import Image
#importing the GLCM and LBP module
from skimage.feature import greycomatrix, greycoprops, local_binary_pattern
#importing numpy and pandas
import numpy as np
import pandas as pd
#function to extract features for a ***single image***
def extract_features(directory, dist, angle, radius):
# make list for each feature and a dictionary to have all features
features = []
directory = str(directory)
contrasts = []
dissimilarities = []
homogeneties = []
correlations = []
energies = []
# load an image from file
image = Image.open(directory)
# convert the image pixels to a numpy array
img = np.array(image.getdata()).reshape(image.size[0], image.size[1])
#Calulate LBP Features and normalized LBP Histogram
feat_lbp = local_binary_pattern(img, 8*radius, radius, 'uniform')
feat_lbp = np.uint64((feat_lbp/feat_lbp.max())*255)
#Calculate GLCM Matrix and features from the LBP Histogram
gcom = greycomatrix(feat_lbp, [dist], [angle], 256, symmetric=True, normed=True)
contrast = greycoprops(gcom, prop='contrast')
dissimilarity = greycoprops(gcom, prop='dissimilarity')
homogeneity = greycoprops(gcom, prop='homogeneity')
energy = greycoprops(gcom, prop='energy')
correlation = greycoprops(gcom, prop='correlation')
# store feature
contrasts.append(contrast[0][0])
dissimilarities.append(dissimilarity[0][0])
homogeneties.append(homogeneity[0][0])
energies.append(energy[0][0])
correlations.append(correlation[0][0])
#Add features to dictionary of features
features['contrast'] = contrasts
features['dissimilarity'] = dissimilarities
features['homogeneity'] = homogeneties
features['energy'] = energies
features['correlation'] = correlations
#convert dictionary to dataframe
df = pd.DataFrame(features)
return df
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,707
|
omeym/AME-505-Group-3-Deep-learning-based-Surface-Defect-Classifier
|
refs/heads/master
|
/GLCM.py
|
#importing os module (file handling in os) and pillow module for images
import os
from PIL import Image
#importing the GLCM module
from skimage.feature import greycomatrix, greycoprops
#importing numpy and pandas
import numpy as np
import pandas as pd
#function to extract features for a ***collection of images***
def extract_features(directory, dist, angle):
# make list for each feature and a dictionary to have all features
directory = str(directory)
features = {}
names = ['Crazing','Inclusion','Patches','Pitted Surface','RS','Scratch']
contrasts = []
dissimilarities = []
homogeneities = []
correlations = []
energies = []
type = []
#Iterating through each image and collecting features
for defect_name in names:
foldername = directory + '/' + defect_name
for name in os.listdir(foldername):
filename = foldername + '/' + name
image = Image.open(filename) # load an image from file
img = np.array(image.getdata()).reshape(image.size[0], image.size[1]) # convert the image pixels to a numpy array
#Calulating GLCM Features and GLCM Matrix
gcom = greycomatrix(img, [dist], [angle], 256, symmetric=True, normed=True)
contrast = greycoprops(gcom, prop='contrast')
dissimilarity = greycoprops(gcom, prop='dissimilarity')
homogeneity = greycoprops(gcom, prop='homogeneity')
energy = greycoprops(gcom, prop='energy')
correlation = greycoprops(gcom, prop='correlation')
# Storing features in the lists
contrasts.append(contrast[0][0])
dissimilarities.append(dissimilarity[0][0])
homogeneities.append(homogeneity[0][0])
energies.append(energy[0][0])
correlations.append(correlation[0][0])
type.append(defect_name)
print('>%s' % name)
#Adding features to dictionary of features
features['contrast'] = contrasts
features['dissimilarity'] = dissimilarities
features['homogeneity'] = homogeneities
features['energy'] = energies
features['correlation'] = correlations
features['type'] = type
#convert dictionary to dataframe
df = pd.DataFrame(features)
return df
|
{"/Trained_Classifier_Predictions.py": ["/LBGLCM_for_single_image.py", "/Classifiers.py"], "/Training_Window.py": ["/Classifiers.py", "/GLCM.py", "/LBGLCM.py", "/Operator_Window.py", "/Training_Result_Window.py"], "/Operator_Window.py": ["/Trained_Classifier_Predictions.py", "/Final_Results_Window.py"]}
|
30,708
|
matthewswogger/tensorflow_speech_recognition_demo
|
refs/heads/master
|
/speech_data.py
|
"""
Utilities for downloading and providing data from openslr.org,
libriSpeech, Pannous, Gutenberg, WMT, tokenizing, vocabularies.
"""
# TODO! see https://github.com/pannous/caffe-speech-recognition for some data sources
import os
import re
import sys
import wave
import numpy
import numpy as np
import skimage.io # scikit-image
import librosa
import matplotlib
# import extensions as xx
from random import shuffle
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
SOURCE_URL = 'http://pannous.net/files/' #spoken_numbers.tar'
DATA_DIR = 'data/'
pcm_path = "data/spoken_numbers_pcm/" # 8 bit
wav_path = "data/spoken_numbers_wav/" # 16 bit s16le
path = pcm_path
CHUNK = 4096
test_fraction=0.1 # 10% of data for test / verification
# http://pannous.net/files/spoken_numbers_pcm.tar
class Source: # labels
DIGIT_WAVES = 'spoken_numbers_pcm.tar'
DIGIT_SPECTROS = 'spoken_numbers_spectros_64x64.tar' # 64x64 baby data set, works astonishingly well
NUMBER_WAVES = 'spoken_numbers_wav.tar'
NUMBER_IMAGES = 'spoken_numbers.tar' # width=256 height=256
WORD_SPECTROS = 'https://dl.dropboxusercontent.com/u/23615316/spoken_words.tar' # width,height=512# todo: sliding window!
TEST_INDEX = 'test_index.txt'
TRAIN_INDEX = 'train_index.txt'
from enum import Enum
class Target(Enum): # labels
digits=1
speaker=2
words_per_minute=3
word_phonemes=4
word=5#characters=5
sentence=6
sentiment=7
first_letter=8
def progresshook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
def maybe_download(file, work_directory):
"""Download the data from Pannous's website, unless it's already here."""
print("Looking for data %s in %s"%(file,work_directory))
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, re.sub('.*\/','',file))
if not os.path.exists(filepath):
if not file.startswith("http"): url_filename = SOURCE_URL + file
else: url_filename=file
print('Downloading from %s to %s' % (url_filename, filepath))
filepath, _ = urllib.request.urlretrieve(url_filename, filepath, progresshook)
statinfo = os.stat(filepath)
print('Successfully downloaded', file, statinfo.st_size, 'bytes.')
# os.system('ln -s '+work_directory)
if os.path.exists(filepath):
print('Extracting %s to %s' % ( filepath, work_directory))
os.system('tar xf %s -C %s' % ( filepath, work_directory))
print('Data ready!')
return filepath.replace(".tar","")
def mfcc_batch_generator(batch_size=10, source=Source.DIGIT_WAVES, target=Target.digits):
maybe_download(source, DATA_DIR)
if target == Target.speaker:
speakers = get_speakers()
batch_features = []
labels = []
files = os.listdir(path)
while True:
print("loaded batch of %d files" % len(files))
shuffle(files)
for wav in files:
if not wav.endswith(".wav"):
continue
wave, sr = librosa.load(path+wav, mono=True)
if target==Target.speaker:
label=one_hot_from_item(speaker(wav), speakers)
elif target==Target.digits:
label=dense_to_one_hot(int(wav[0]),10)
elif target==Target.first_letter:
label=dense_to_one_hot((ord(wav[0]) - 48) % 32,32)
else:
raise Exception("todo : labels for Target!")
labels.append(label)
mfcc = librosa.feature.mfcc(wave, sr)
# print(np.array(mfcc).shape)
mfcc=np.pad(mfcc,((0,0),(0,80-len(mfcc[0]))), mode='constant', constant_values=0)
batch_features.append(np.array(mfcc))
if len(batch_features) >= batch_size:
# print(np.array(batch_features).shape)
# yield np.array(batch_features), labels
yield batch_features, labels # basic_rnn_seq2seq inputs must be a sequence
batch_features = [] # Reset for next batch
labels = []
def one_hot_from_item(item, items):
# items=set(items) # assure uniqueness
x=[0]*len(items)# numpy.zeros(len(items))
i=items.index(item)
x[i]=1
return x
def dense_to_one_hot(batch, batch_size, num_labels):
sparse_labels = tf.reshape(batch, [batch_size, 1])
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
concatenated = tf.concat(1, [indices, sparse_labels])
concat = tf.concat(0, [[batch_size], [num_labels]])
output_shape = tf.reshape(concat, [2])
sparse_to_dense = tf.sparse_to_dense(concatenated, output_shape, 1.0, 0.0)
return tf.reshape(sparse_to_dense, [batch_size, num_labels])
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
return numpy.eye(num_classes)[labels_dense]
if __name__ == "__main__":
print("downloading speech datasets")
maybe_download( Source.DIGIT_SPECTROS)
maybe_download( Source.DIGIT_WAVES)
maybe_download( Source.NUMBER_IMAGES)
maybe_download( Source.NUMBER_WAVES)
|
{"/run_model.py": ["/speech_data.py"]}
|
30,709
|
matthewswogger/tensorflow_speech_recognition_demo
|
refs/heads/master
|
/run_model.py
|
from __future__ import division, print_function, absolute_import
import tflearn as tf
import speech_data
import numpy as np
from sklearn.cross_validation import train_test_split
def score_model(X, y):
y_predicted = np.array(model.predict(X))
bool_arr = np.argmax(y_predicted,axis=1) == np.argmax(np.array(y),axis=1)
bool_sum = np.sum(bool_arr)
return ('model accuracy: {}'.format(round(float(bool_sum)/bool_arr.shape[0],2)))
LEARNING_RATE = 0.0001
BATCH_SIZE = 64
WIDTH = 20 # mfcc features
HEIGHT = 80 # (max) length of utterance
CLASSES = 10 # digits
data_set = speech_data.mfcc_batch_generator(2400)
X, Y = next(data_set)
X, Y = np.array(X), np.array(Y)
# get train, test, validation split
X_train_val, X_test, y_train_val, y_test = train_test_split(X, Y, test_size=0.2,
random_state=0)
X_train, X_val, y_train, y_val = train_test_split(X_train_val, y_train_val,
test_size=0.2, random_state=0)
# Network building
net = tf.input_data([None, WIDTH, HEIGHT])
net = tf.lstm(net, 128, dropout=0.8)
net = tf.fully_connected(net, CLASSES, activation='softmax')
net = tf.regression(net, optimizer='adam', learning_rate=LEARNING_RATE,
loss='categorical_crossentropy')
model = tf.DNN(net, tensorboard_verbose=0)
# Training
# model.load("saved_model/epoch_2000.tfl")
# EPOCHS = 20
# epochs_performed = 2000
# for _ in xrange(50):
# # Fit model
# model.fit(X_train, y_train, n_epoch=EPOCHS, validation_set=(X_val, y_val),
# show_metric=True, batch_size=BATCH_SIZE)
# # Save model
# epochs_performed += 20
# model_name = "saved_model/epoch_{}.tfl".format(epochs_performed)
# model.save(model_name)
# model evaluation
model.load("saved_model/epoch_3000.tfl")
print (score_model(X_test, y_test))
|
{"/run_model.py": ["/speech_data.py"]}
|
30,756
|
sshivaji/dgt-uci-engine
|
refs/heads/master
|
/pydgt.py
|
from Queue import Queue
import serial
import sys
import time
from threading import Thread
from threading import RLock
from threading import Condition
from struct import unpack
import signal
import glob
from itertools import cycle
clock_blink_iterator = cycle(range(2))
BOARD = "Board"
FEN = "FEN"
CLOCK_BUTTON_PRESSED = "CLOCK_BUTTON_PRESSED"
CLOCK_ACK = "CLOCK_ACK"
CLOCK_LEVER = "CLOCK_LEVER"
DGTNIX_MSG_UPDATE = 0x05
_DGTNIX_SEND_BRD = 0x42
_DGTNIX_MESSAGE_BIT = 0x80
_DGTNIX_BOARD_DUMP = 0x06
_DGTNIX_BWTIME = 0x0d
_DGTNIX_MSG_BOARD_DUMP = _DGTNIX_MESSAGE_BIT|_DGTNIX_BOARD_DUMP
_DGTNIX_SEND_UPDATE_NICE = 0x4b
# message emitted when a piece is added onto the board
DGTNIX_MSG_MV_ADD = 0x00
#message emitted when a piece is removed from the board
DGTNIX_MSG_MV_REMOVE = 0x01
DGT_SIZE_FIELD_UPDATE = 5
_DGTNIX_FIELD_UPDATE = 0x0e
_DGTNIX_EMPTY = 0x00
_DGTNIX_WPAWN = 0x01
_DGTNIX_WROOK = 0x02
_DGTNIX_WKNIGHT = 0x03
_DGTNIX_WBISHOP = 0x04
_DGTNIX_WKING = 0x05
_DGTNIX_WQUEEN = 0x06
_DGTNIX_BPAWN = 0x07
_DGTNIX_BROOK = 0x08
_DGTNIX_BKNIGHT = 0x09
_DGTNIX_BBISHOP = 0x0a
_DGTNIX_BKING = 0x0b
_DGTNIX_BQUEEN = 0x0c
_DGTNIX_CLOCK_MESSAGE = 0x2b
_DGTNIX_SEND_CLK = 0x41
_DGTNIX_SEND_UPDATE = 0x43
_DGTNIX_SEND_UPDATE_BRD = 0x44
_DGTNIX_SEND_SERIALNR = 0x45
_DGTNIX_SEND_BUSADDRESS = 0x46
_DGTNIX_SEND_TRADEMARK = 0x47
_DGTNIX_SEND_VERSION = 0x4d
_DGTNIX_SEND_EE_MOVES = 0x49
_DGTNIX_SEND_RESET = 0x40
_DGTNIX_SIZE_BOARD_DUMP = 67
_DGTNIX_NONE = 0x00
_DGTNIX_BOARD_DUMP = 0x06
_DGTNIX_EE_MOVES = 0x0f
_DGTNIX_BUSADDRESS = 0x10
_DGTNIX_SERIALNR = 0x11
_DGTNIX_TRADEMARK = 0x12
_DGTNIX_VERSION = 0x13
DGTNIX_RIGHT_DOT = 0x01
DGTNIX_RIGHT_SEMICOLON = 0x02
DGTNIX_RIGHT_1 = 0x04
DGTNIX_LEFT_DOT = 0x08
DGTNIX_LEFT_SEMICOLON = 0x10
DGTNIX_LEFT_1 = 0x20
piece_map = {
_DGTNIX_EMPTY : ' ',
_DGTNIX_WPAWN : 'P',
_DGTNIX_WROOK : 'R',
_DGTNIX_WKNIGHT : 'N',
_DGTNIX_WBISHOP : 'B',
_DGTNIX_WKING : 'K',
_DGTNIX_WQUEEN : 'Q',
_DGTNIX_BPAWN : 'p',
_DGTNIX_BROOK : 'r',
_DGTNIX_BKNIGHT : 'n',
_DGTNIX_BBISHOP : 'b',
_DGTNIX_BKING : 'k',
_DGTNIX_BQUEEN : 'q'
}
dgt_send_message_list = [_DGTNIX_CLOCK_MESSAGE, _DGTNIX_SEND_CLK, _DGTNIX_SEND_BRD, _DGTNIX_SEND_UPDATE,
_DGTNIX_SEND_UPDATE_BRD, _DGTNIX_SEND_SERIALNR, _DGTNIX_SEND_BUSADDRESS, _DGTNIX_SEND_TRADEMARK,
_DGTNIX_SEND_VERSION, _DGTNIX_SEND_UPDATE_NICE, _DGTNIX_SEND_EE_MOVES, _DGTNIX_SEND_RESET]
def scan():
# scan for available ports. return a list of device names.
return glob.glob('/dev/tty.usb*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*')
class Event(object):
pass
class DGTBoard(object):
def __init__(self, device, virtual = False, send_board = True):
self.board_reversed = False
self.clock_ack_recv = False
# self.clock_queue = Queue()
self.dgt_clock = False
self.dgt_clock_lock = RLock()
# self.dgt_clock_ack_lock = RLock()
# self.dgt_clock_ack_queue = Queue()
if not virtual:
self.ser = serial.Serial(device,stopbits=serial.STOPBITS_ONE)
self.write(chr(_DGTNIX_SEND_UPDATE_NICE))
if send_board:
self.write(chr(_DGTNIX_SEND_BRD))
self.callbacks = []
def get_board(self):
self.write(chr(_DGTNIX_SEND_BRD))
def subscribe(self, callback):
self.callbacks.append(callback)
def fire(self, **attrs):
e = Event()
e.source = self
for k, v in attrs.iteritems():
setattr(e, k, v)
for fn in self.callbacks:
fn(e)
def convertInternalPieceToExternal(self, c):
if piece_map.has_key(c):
return piece_map[c]
def sendMessageToBoard(self, i):
if i in dgt_send_message_list:
self.write(i)
else:
raise "Critical, cannot send - Unknown command: {0}".format(unichr(i))
def dump_board(self, board):
pattern = '>'+'B'*len(board)
buf = unpack(pattern, board)
if self.board_reversed:
buf = buf[::-1]
output = "__"*8+"\n"
for square in xrange(0,len(board)):
if square and square%8 == 0:
output+= "|\n"
output += "__"*8+"\n"
output+= "|"
output+= self.convertInternalPieceToExternal(buf[square])
output+= "|\n"
output+= "__"*8
return output
# Two reverse calls will bring back board to original orientation
def reverse_board(self):
print ("Reversing board!")
self.board_reversed = not self.board_reversed
def extract_base_fen(self, board):
FEN = []
empty = 0
for sq in range(0, 64):
if board[sq] != 0:
if empty > 0:
FEN.append(str(empty))
empty = 0
FEN.append(self.convertInternalPieceToExternal(board[sq]))
else:
empty += 1
if (sq + 1) % 8 == 0:
if empty > 0:
FEN.append(str(empty))
empty = 0
if sq < 63:
FEN.append("/")
empty = 0
return FEN
def get_fen(self, board, tomove='w'):
pattern = '>'+'B'*len(board)
board = unpack(pattern, board)
if self.board_reversed:
board = board[::-1]
FEN = self.extract_base_fen(board)# Check if board needs to be reversed
if ''.join(FEN) == "RNBKQBNR/PPPPPPPP/8/8/8/8/pppppppp/rnbkqbnr":
self.reverse_board()
board = board[::-1]
# Redo FEN generation - should be a fast operation
FEN = self.extract_base_fen(board)# Check if board needs to be reversed
FEN.append(' ')
FEN.append(tomove)
FEN.append(' ')
# possible castlings
FEN.append('K')
FEN.append('Q')
FEN.append('k')
FEN.append('q')
FEN.append(' ')
FEN.append('-')
FEN.append(' ')
FEN.append('0')
FEN.append(' ')
FEN.append('1')
FEN.append('0')
return ''.join(FEN)
def read(self, message_length):
return self.ser.read(message_length)
def write(self, message):
self.ser.write(message)
# Converts a lowercase ASCII character or digit to DGT Clock representation
@staticmethod
def char_to_lcd_code(c):
if c == '0':
return 0x01 | 0x02 | 0x20 | 0x08 | 0x04 | 0x10
if c == '1':
return 0x02 | 0x04
if c == '2':
return 0x01 | 0x40 | 0x08 | 0x02 | 0x10
if c == '3':
return 0x01 | 0x40 | 0x08 | 0x02 | 0x04
if c == '4':
return 0x20 | 0x04 | 0x40 | 0x02
if c == '5':
return 0x01 | 0x40 | 0x08 | 0x20 | 0x04
if c == '6':
return 0x01 | 0x40 | 0x08 | 0x20 | 0x04 | 0x10
if c == '7':
return 0x02 | 0x04 | 0x01
if c == '8':
return 0x01 | 0x02 | 0x20 | 0x40 | 0x04 | 0x10 | 0x08
if c == '9':
return 0x01 | 0x40 | 0x08 | 0x02 | 0x04 | 0x20
if c == 'a':
return 0x01 | 0x02 | 0x20 | 0x40 | 0x04 | 0x10
if c == 'b':
return 0x20 | 0x04 | 0x40 | 0x08 | 0x10
if c == 'c':
return 0x01 | 0x20 | 0x10 | 0x08
if c == 'd':
return 0x10 | 0x40 | 0x08 | 0x02 | 0x04
if c == 'e':
return 0x01 | 0x40 | 0x08 | 0x20 | 0x10
if c == 'f':
return 0x01 | 0x40 | 0x20 | 0x10
if c == 'g':
return 0x01 | 0x20 | 0x10 | 0x08 | 0x04
if c == 'h':
return 0x20 | 0x10 | 0x04 | 0x40
if c == 'i':
return 0x02 | 0x04
if c == 'j':
return 0x02 | 0x04 | 0x08 | 0x10
if c == 'k':
return 0x01 | 0x20 | 0x40 | 0x04 | 0x10
if c == 'l':
return 0x20 | 0x10 | 0x08
if c == 'm':
return 0x01 | 0x40 | 0x04 | 0x10
if c == 'n':
return 0x40 | 0x04 | 0x10
if c == 'o':
return 0x40 | 0x04 | 0x10 | 0x08
if c == 'p':
return 0x01 | 0x40 | 0x20 | 0x10 | 0x02
if c == 'q':
return 0x01 | 0x40 | 0x20 | 0x04 | 0x02
if c == 'r':
return 0x40 | 0x10
if c == 's':
return 0x01 | 0x40 | 0x08 | 0x20 | 0x04
if c == 't':
return 0x20 | 0x10 | 0x08 | 0x40
if c == 'u':
return 0x08 | 0x02 | 0x20 | 0x04 | 0x10
if c == 'v':
return 0x08 | 0x02 | 0x20
if c == 'w':
return 0x40 | 0x08 | 0x20 | 0x02
if c == 'x':
return 0x20 | 0x10 | 0x04 | 0x40 | 0x02
if c == 'y':
return 0x20 | 0x08 | 0x04 | 0x40 | 0x02
if c == 'z':
return 0x01 | 0x40 | 0x08 | 0x02 | 0x10
return 0
@staticmethod
def compute_dgt_time_string(t):
print ("time : {0}".format(t))
if t < 0:
return " "
t /= 1000
if t < 1200:
#minutes.seconds mode
minutes = t / 60
seconds = t - minutes * 60
if minutes >= 10:
minutes -= 10
# print "seconds : {0}".format(seconds)
return "{0}{1:02d}".format(minutes, int(seconds))
# oss << minutes << setfill ('0') << setw (2) << seconds;
else:
#hours:minutes mode
hours = t / 3600
minutes = (t - (hours * 3600)) / 60
return "{0}{1:02d}".format(hours, minutes)
def print_time_on_clock(self, w_time, b_time, w_blink=True, b_blink=True):
dots = 0
w_dots = True
b_dots = True
if w_blink and w_time >= 1200000:
w_dots = clock_blink_iterator.next()
if b_blink and b_time >= 1200000:
b_dots = clock_blink_iterator.next()
if not self.board_reversed:
s = self.compute_dgt_time_string(w_time) + self.compute_dgt_time_string(b_time)
if w_time < 1200000: #minutes.seconds mode
if w_dots:
dots |= DGTNIX_LEFT_DOT
if w_time >= 600000:
dots |= DGTNIX_LEFT_1
elif w_dots:
dots |= DGTNIX_LEFT_SEMICOLON #hours:minutes mode
#black
if b_time < 1200000:
#minutes.seconds mode
if b_dots:
dots |= DGTNIX_RIGHT_DOT
if b_time >= 600000:
dots |= DGTNIX_RIGHT_1
elif b_dots:
dots |= DGTNIX_RIGHT_SEMICOLON #hours:minutes mode
else:
s = self.compute_dgt_time_string(b_time) + self.compute_dgt_time_string(w_time)
if b_time < 1200000: #minutes.seconds mode
if b_dots:
dots |= DGTNIX_LEFT_DOT
if b_time >= 600000:
dots |= DGTNIX_LEFT_1
elif b_dots:
dots |= DGTNIX_LEFT_SEMICOLON #hours:minutes mode
#black
if w_time < 1200000:
#minutes.seconds mode
if w_dots:
dots |= DGTNIX_RIGHT_DOT
if w_time >= 600000:
dots |= DGTNIX_RIGHT_1
elif w_dots:
dots |= DGTNIX_RIGHT_SEMICOLON #hours:minutes mode
# }
# else
# {
# s = getDgtTimeString (bClockTime) + getDgtTimeString (wClockTime);
# //black
# if (bClockTime < 1200000) //minutes.seconds mode
# {
# if (bDots) dots |= DGTNIX_LEFT_DOT;
# if (bClockTime >= 600000) dots |= DGTNIX_LEFT_1;
# }
# else if (bDots) dots |= DGTNIX_LEFT_SEMICOLON; //hours:minutes mode
# //white
# if (wClockTime < 1200000) //minutes.seconds mode
# {
# if (wDots) dots |= DGTNIX_RIGHT_DOT;
# if (wClockTime >= 600000) dots |= DGTNIX_RIGHT_1;
# }
# else if (wDots) dots |= DGTNIX_RIGHT_SEMICOLON; //hours:minutes mode
# }
# dgtnixPrintMessageOnClock (s.c_str (), false, dots);
self.send_message_to_clock(s, False, dots)
def send_message_to_clock(self, message, beep, dots, move=False, test_clock=False, max_num_tries = 5):
# Todo locking?
print ("info string Got message to clock: {0}".format(message))
if move:
message = self.format_move_for_dgt(message)
else:
message = self.format_str_for_dgt(message)
with self.dgt_clock_lock:
# self.clock_ack_recv = False
# time.sleep(5)
self._sendMessageToClock(self.char_to_lcd_code(message[0]), self.char_to_lcd_code(message[1]),
self.char_to_lcd_code(message[2]), self.char_to_lcd_code(message[3]),
self.char_to_lcd_code(message[4]), self.char_to_lcd_code(message[5]),
beep, dots, test_clock=test_clock, max_num_tries = max_num_tries)
# self.clock_ack_recv = False
if test_clock and not self.dgt_clock:
tries = 1
while True:
time.sleep(1)
if not self.dgt_clock:
tries += 1
if tries > max_num_tries:
break
self._sendMessageToClock(self.char_to_lcd_code(message[0]), self.char_to_lcd_code(message[1]),
self.char_to_lcd_code(message[2]), self.char_to_lcd_code(message[3]),
self.char_to_lcd_code(message[4]), self.char_to_lcd_code(message[5]),
beep, dots, test_clock=test_clock, max_num_tries = max_num_tries)
else:
break
def test_for_dgt_clock(self, message="pic023", wait_time = 1):
# try:
# signal.signal(signal.SIGALRM, self.dgt_clock_test_post_handler)
# signal.alarm(wait_time)
self.send_message_to_clock(message, True, False, test_clock=True, max_num_tries=wait_time)
# signal.alarm(0)
# except serial.serialutil.SerialException:
# return False
# return True
def dgt_clock_test_post_handler(self, signum, frame):
if self.dgt_clock:
print ("info string Clock found")
# self.dgt_clock = True
else:
print ("info string No DGT Clock found")
# self.dgt_clock = False
def format_str_for_dgt(self, s):
if len(s)>6:
s = s[:6]
if len(s) < 6:
remainder = 6 - len(s)
s = " "*remainder + s
return s
def format_move_for_dgt(self, s):
mod_s = s[:2]+' '+s[2:]
if len(mod_s)<6:
mod_s+=" "
return mod_s
def _sendMessageToClock(self, a, b, c, d, e, f, beep, dots, test_clock = False, max_num_tries = 5):
# pthread_mutex_lock (&clock_ack_mutex);
# if(!(g_debugMode == DGTNIX_DEBUG_OFF))
# {
# _debug("Sending message to clock\n");
# if(g_descriptorDriverBoard < 0)
# {
# perror("dgtnix critical:sendMessageToBoard: invalid file descriptor\n");
# exit(-1);
# }
# }
print ("info string Sending Message to Clock..")
# num_tries = 0
# self.clock_queue.empty()
# self.dgt_clock_ack_lock.acquire()
# while not self.clock_ack_recv:
# num_tries += 1
# if num_tries > 1:
# time.sleep(1) # wait a bit longer for ack
# if self.clock_ack_recv:
# break
self.ser.write(chr(_DGTNIX_CLOCK_MESSAGE))
self.ser.write(chr(0x0b))
self.ser.write(chr(0x03))
self.ser.write(chr(0x01))
self.ser.write(chr(c))
self.ser.write(chr(b))
self.ser.write(chr(a))
self.ser.write(chr(f))
self.ser.write(chr(e))
self.ser.write(chr(d))
if dots:
self.ser.write(chr(dots))
else:
self.ser.write(chr(0))
if beep:
self.ser.write(chr(0x03))
else:
self.ser.write(chr(0x01))
self.ser.write(chr(0x00))
# if test_clock:
# time.sleep(5)
# time.sleep(1)
# if num_tries>1:
# print "try : {0}".format(num_tries)
# if self.dgt_clock and num_tries>=5:
# break
# if num_tries>=max_num_tries:
# break
# if not test_clock:
# Retry logic?
# time.sleep(1)
# Check clock ack?
def read_message_from_board(self, head=None):
# print "acquire"
# self.dgt_clock_ack_lock.acquire()
print ("info string got DGT message")
header_len = 3
if head:
header = head + self.read(header_len-1)
else:
header = self.read(header_len)
if not header:
# raise
raise Exception("info string Invalid First char in message")
pattern = '>'+'B'*header_len
buf = unpack(pattern, header)
# print buf
# print buf[0] & 128
# if not buf[0] & 128:
# raise Exception("Invalid message -2- readMessageFromBoard")
command_id = buf[0] & 127
print ("info string command_id: {0}".format(command_id))
#
# if buf[1] & 128:
# raise Exception ("Invalid message -4- readMessageFromBoard")
#
# if buf[2] & 128:
# raise Exception ("Invalid message -6- readMessageFromBoard")
message_length = (buf[1] << 7) + buf[2]
message_length-=3
# if command_id == _DGTNIX_NONE:
# print "Received _DGTNIX_NONE from the board\n"
# message = self.ser.read(message_length)
if command_id == _DGTNIX_BOARD_DUMP:
print ("info string Received DGTNIX_DUMP message")
message = self.read(message_length)
# self.dump_board(message)
# print self.get_fen(message)
self.fire(type=FEN, message=self.get_fen(message))
self.fire(type=BOARD, message=self.dump_board(message))
elif command_id == _DGTNIX_BWTIME:
print ("info string Received DGTNIX_BWTIME message from the board\n")
message = self.read(message_length)
pattern = '>'+'B'*message_length
buf = unpack(pattern, message)
# print buf
if buf:
if buf[0] == buf[1] == buf[2] == buf[3] == buf[4] == buf[5] == 0:
self.fire(type=CLOCK_LEVER, message=buf[6])
if buf[0] == 10 and buf[1] == 16 and buf[2] == 1 and buf[3] == 10 and not buf[4] and not buf[5] and not buf[6]:
# print "clock ACK received!"
# self.clock_ack_recv = True
# self.dgt_clock_ack_lock.acquire()
# self.clock_queue.get()
# self.clock_queue.task_done()
if not self.dgt_clock:
self.dgt_clock = True
self.fire(type=CLOCK_ACK, message='')
if 5 <= buf[4] <= 6 and buf[5] == 49:
self.fire(type=CLOCK_BUTTON_PRESSED, message=0)
if 33 <= buf[4] <= 34 and buf[5] == 52:
self.fire(type=CLOCK_BUTTON_PRESSED, message=1)
if 17 <= buf[4] <= 18 and buf[5] == 51:
self.fire(type=CLOCK_BUTTON_PRESSED, message=2)
if 9 <= buf[4] <= 10 and buf[5] == 50:
self.fire(type=CLOCK_BUTTON_PRESSED, message=3)
if 65 <= buf[4] <= 66 and buf[5] == 53:
self.fire(type=CLOCK_BUTTON_PRESSED, message=4)
elif command_id == _DGTNIX_EE_MOVES:
print ("info string Received _DGTNIX_EE_MOVES from the board\n")
elif command_id == _DGTNIX_BUSADDRESS:
print ("info string Received _DGTNIX_BUSADDRESS from the board\n")
elif command_id == _DGTNIX_SERIALNR:
print ("info string Received _DGTNIX_SERIALNR from the board\n")
message = self.read(message_length)
elif command_id == _DGTNIX_TRADEMARK:
print ("info string Received _DGTNIX_TRADEMARK from the board\n")
message = self.read(message_length)
elif command_id == _DGTNIX_VERSION:
print ("info string Received _DGTNIX_VERSION from the board\n")
elif command_id == _DGTNIX_FIELD_UPDATE:
print("info string Received _DGTNIX_FIELD_UPDATE from the board")
print("info string message_length : {0}".format(message_length))
if message_length == 2:
message = self.read(message_length)
self.write(chr(_DGTNIX_SEND_BRD))
else:
message = self.read(4)
# pattern = '>'+'B'*message_length
# buf = unpack(pattern, message)
# print buf[0]
# print buf[1]
else:
# Not a regular command id
# Piece remove/add codes?
# header = header + self.ser.read(1)
# print "message_length : {0}".format(len(header))
# print [header]
#message[0] = code;
#message[1] = intern_column;
#message[2] = intern_line;
#message[3] = piece;
# print "diff command : {0}".format(command_id)
if command_id == DGTNIX_MSG_MV_ADD:
print("info string Add piece message")
# board.ser.write(chr(_DGTNIX_SEND_BRD))
elif command_id == DGTNIX_MSG_UPDATE:
print("info string Update piece message")
# board.ser.write(chr(_DGTNIX_SEND_BRD))
# Warning, this method must be in a thread
def poll(self):
while True:
c = self.read(1)
# print "got msg"
if c:
self.read_message_from_board(head=c)
def _dgt_observer(self, attrs):
if attrs.type == FEN:
print("info string FEN: {0}".format(attrs.message))
elif attrs.type == BOARD:
print("info string Board: ")
print("info string" + attrs.message)
# self.send_message_to_clock(['c','h','a','n','g','e'], False, False)
# time.sleep(1)
# self.send_message_to_clock(['b','o','a','r','d','c'], False, False)
class VirtualDGTBoard(DGTBoard):
def __init__(self, device, virtual = True):
super(VirtualDGTBoard, self).__init__(device, virtual = virtual)
self.fen = None
self.callbacks = []
def read(self, bits):
if self.fen:
return True
def read_message_from_board(self, head = None):
fen = self.fen
self.fen = None
return self.fire(type=FEN, message = fen)
def write(self, message):
if message == chr(_DGTNIX_SEND_UPDATE_NICE):
print("info string Got Update Nice")
elif message == chr(_DGTNIX_SEND_BRD):
print("info string Got Send board")
def set_fen(self, fen):
self.fen = fen
def poll_dgt(dgt):
thread = Thread(target=dgt.poll)
thread.start()
if __name__ == "__main__":
for port in scan():
if port.startswith("/dev/tty.usbmodem"):
device = port
break
device = port
print("info string device : {0}".format(device))
if len(sys.argv)> 1:
device = sys.argv[1]
# else:
# device = "/dev/cu.usbserial-00001004"
board = DGTBoard(device, send_board=False)
board.subscribe(board._dgt_observer)
# poll_dgt(board)
# if board.test_for_dgt_clock():
# print "Clock found!"
# else:
# print "Clock not present"
# board.send_message_to_clock(['a','y',' ','d','g', 't'], False, False)
board.poll()
# poll_dgt(board)
|
{"/engine.py": ["/pydgt.py"]}
|
30,757
|
sshivaji/dgt-uci-engine
|
refs/heads/master
|
/engine.py
|
import sys
import threading
import cmd
import chess
from chess import polyglot
import tables
import os
import glob
import platform
# DGT
from pydgt import DGTBoard
from pydgt import FEN
from pydgt import CLOCK_BUTTON_PRESSED
from pydgt import CLOCK_LEVER
from pydgt import CLOCK_ACK
from pydgt import scan as dgt_port_scan
from threading import Thread, RLock
## Some code adapted from https://github.com/alexsyrom/chess-engine
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
logfile = open(os.path.join(__location__, 'input.log'), 'w')
LINUX = "Linux"
MAC = "Darwin"
WINDOWS = "Windows"
SPOKEN_PIECE_SOUNDS = {
"B": " Bishop ",
"N": " Knight ",
"R": " Rook ",
"Q": " Queen ",
"K": " King ",
"O-O": " Castles ",
"++": " Double Check ",
}
ENGINE_NAME = 'DGT UCI chess engine'
AUTHOR_NAME = 'Shivkumar Shivaji'
ENGINE_PLAY = 'engine_play'
START_FEN = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'
def scan():
# scan for available ports. return a list of device names.
return glob.glob('/dev/cu.usb*') + glob.glob('/dev/tty.DGT*') + glob.glob('/dev/ttyACM*')
class KThread(Thread):
"""A subclass of threading.Thread, with a kill()
method."""
def __init__(self, *args, **keywords):
Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
Thread.start(self)
def __run(self):
"""Hacked run function, which installs the
trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
class Analyzer(threading.Thread):
MIN_VALUE = -10 * tables.piece[chess.KING]
BETA = tables.piece[chess.ROOK]
ALPHA = -BETA
MAX_ITER = 2
MULTIPLIER = 4
MAX_NEGAMAX_ITER = 2
NEGAMAX_DIVISOR = 3
def set_default_values(self):
self.infinite = False
self.possible_first_moves = set()
self.max_depth = 3
self.number_of_nodes = 100
def __init__(self, call_if_ready, call_to_inform, opening_book):
super(Analyzer, self).__init__()
if opening_book:
self.opening_book = polyglot.open_reader(opening_book)
else:
self.opening_book = None
self.debug = False
self.set_default_values()
self.board = chess.Board()
self.is_working = threading.Event()
self.is_working.clear()
self.is_conscious = threading.Condition()
self.termination = threading.Event()
self.termination.clear()
self._call_if_ready = call_if_ready
self._call_to_inform = call_to_inform
self._bestmove = chess.Move.null()
@property
def bestmove(self):
return self._bestmove
class Communicant:
def __call__(self, func):
def wrap(instance, *args, **kwargs):
if instance.termination.is_set():
sys.exit()
with instance.is_conscious:
instance.is_conscious.notify()
result = func(instance, *args, **kwargs)
with instance.is_conscious:
instance.is_conscious.notify()
if instance.termination.is_set():
sys.exit()
return result
return wrap
@property
def number_of_pieces(self):
number = sum(1 for square in chess.SQUARES
if self.board.piece_at(square))
return number
def evaluate_material_position(self, phase, color, pieces):
value = 0
for piece in pieces:
squares = self.board.pieces(piece, color)
for square in squares:
value += tables.piece_square[phase][color][piece][square]
return value
def evaluate_material(self, color):
value = 0
for piece in chess.PIECE_TYPES:
squares = self.board.pieces(piece, color)
value += len(squares) * tables.piece[piece]
return value
def evaluate(self):
if self.board.is_checkmate():
return self.MIN_VALUE
if self.board.is_stalemate():
return 0
colors = list(map(int, chess.COLORS))
values = [0 for i in tables.PHASES]
phase = tables.OPENING
pieces = list(range(1, 6)) # pieces without king
for color in colors:
values[phase] += (self.evaluate_material_position
(phase, color, pieces)
*
(-1 + 2 * color))
values[tables.ENDING] = values[tables.OPENING]
for phase in tables.PHASES:
for color in colors:
values[phase] += (self.evaluate_material_position
(phase, color, (chess.KING,))
*
(-1 + 2 * color))
material = [0 for i in colors]
for color in colors:
material[color] = self.evaluate_material(color)
material_sum = sum(material)
for color in colors:
for phase in tables.PHASES:
values[phase] += material[color] * (-1 + 2 * color)
value = ((values[tables.OPENING] * material_sum +
values[tables.ENDING] * (tables.PIECE_SUM - material_sum))
// tables.PIECE_SUM)
if self.board.turn == chess.BLACK:
value *= -1
return value
def moves(self, depth):
if depth == 0 and self.possible_first_moves:
for move in self.board.legal_moves:
if move in self.possible_first_moves:
yield move
else:
for move in self.board.legal_moves:
yield move
def inner_negamax(self, depth, alpha, beta):
best_value = alpha
for move in self.moves(depth):
if self.debug:
self._call_to_inform('currmove {}'.format(move.uci()))
self.board.push(move)
value = -self.negamax(depth+1, -beta, -best_value)
if self.debug:
self._call_to_inform('string value {}'.format(value))
self.board.pop()
if value >= beta:
if depth == 0:
self._bestmove = move
return beta
elif value > best_value:
best_value = value
if depth == 0:
self._bestmove = move
elif depth == 0 and not bool(self._bestmove):
self._bestmove = move
return best_value
@Communicant()
def negamax(self, depth, alpha, beta):
if depth == self.max_depth or not self.is_working.is_set():
return self.evaluate()
if self.debug:
self._call_to_inform('depth {}'.format(depth))
self._call_to_inform('string alpha {} beta {}'.format(alpha, beta))
value = alpha
left_borders = [beta - (beta - alpha) // self.NEGAMAX_DIVISOR ** i
for i in range(self.MAX_NEGAMAX_ITER, -1, -1)]
for left in left_borders:
value = self.inner_negamax(depth, left, beta)
if value > left:
break
return value
def run(self):
while self.is_working.wait():
if self.termination.is_set():
sys.exit()
self._bestmove = chess.Move.null()
try:
if not self.possible_first_moves:
entry = self.opening_book.find(self.board)
self._bestmove = entry.move()
else:
for entry in self.opening_book.find_all(self.board):
move = entry.move()
if move in self.possible_first_moves:
self._bestmove = move
break
except:
pass
if not bool(self._bestmove):
middle = self.evaluate()
alpha = self.ALPHA
beta = self.BETA
for i in range(self.MAX_ITER):
value = self.negamax(depth=0,
alpha=middle+alpha,
beta=middle+beta)
if value >= middle + beta:
beta *= self.MULTIPLIER
elif value <= middle + alpha:
alpha *= self.MULTIPLIER
else:
break
self._call_to_inform('pv score cp {}'.format(value))
else:
self._call_to_inform('string opening')
if not self.infinite:
self._call_if_ready()
self.set_default_values()
self.is_working.clear()
class EngineShell(cmd.Cmd):
intro = ''
prompt = ''
file = None
opening_book_list = ['gm2001',
'komodo',
'Human']
opening_book = 'Human'
opening_dir = 'opening'
opening_book_extension = '.bin'
go_parameter_list = ['infinite', 'searchmoves', 'depth', 'nodes']
def __init__(self):
# super(EngineShell, self).__init__()
# super(self).__init__()
cmd.Cmd.__init__(self)
self.postinitialized = False
self.dgt_fen = None
self.computer_move_FEN_reached = False
self.mode = ENGINE_PLAY
self.bestmove = None
self.moves = []
@staticmethod
def get_system():
return platform.system()
def discover_usb_devices(self):
for port in scan():
# if port.startswith("/dev/tty.DGT"):
if port.startswith("/dev/cu.usbmodem"):
device = port
print("info string device : {0}".format(device))
return device
# cu.DGT_BT_21265 - SPP
def discover_bluetooth_devices(self, duration=15):
import bluetooth
print("info string importing bluetooth")
nearby_devices = bluetooth.discover_devices(lookup_names=True, duration=duration)
print("info string found %d devices" % len(nearby_devices))
for addr, name in nearby_devices:
print("info string %s - %s" % (addr, name))
# return nearby_devices
if name.startswith("DGT_"):
self.dgt_device = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.dgt_device.connect(addr, 1)
print("info string Finished")
def speak_command(self, command, immediate=True):
if self.get_system() == MAC:
if immediate:
os.system("say " + command)
def speak_move(self, san, immediate=True):
if self.get_system() == MAC:
# print "best_move:{0}".format(best_move)
# print sf.position()
# try:
# san = self.get_san([best_move])[0]
# except IndexError:
# return
# print san
spoken_san = san
spoken_san = spoken_san.replace('O-O-O', ' castles long ')
spoken_san = spoken_san.replace('+', ' check ')
for k, v in SPOKEN_PIECE_SOUNDS.iteritems():
spoken_san = spoken_san.replace(k, v)
spoken_san = spoken_san.replace('x', ' captures ')
spoken_san = spoken_san.replace('=', ' promotes to ')
# print spoken_san
if immediate:
os.system("say " + spoken_san)
# else:
# if spoken_san not in self.speak_move_queue:
# self.speak_move_queue.append(spoken_san)
def try_dgt_legal_moves(self, from_fen, to_fen):
to_fen_first_tok = to_fen.split()[0]
temp_board = chess.Board(fen=from_fen)
for m in temp_board.legal_moves:
temp_board2 = chess.Board(fen=from_fen)
# print("move: {}".format(m))
temp_board2.push(m)
cur_fen = temp_board2.fen()
cur_fen_first_tok = str(cur_fen).split()[0]
# print "cur_token:{0}".format(cur_fen_first_tok)
# print "to_token:{0}".format(to_fen_first_tok)
if cur_fen_first_tok == to_fen_first_tok:
self.dgt_fen = to_fen
# print("info string Move received is : {}".format(m))
self.bestmove = str(m)
san = temp_board.san(m)
self.speak_move(san)
self.output_bestmove()
# self.process_move(move=str(m))
return True
def dgt_probe(self, attr, *args):
if attr.type == FEN:
new_dgt_fen = attr.message
# print "length of new dgt fen: {0}".format(len(new_dgt_fen))
# print "new_dgt_fen just obtained: {0}".format(new_dgt_fen)
if self.dgt_fen and new_dgt_fen:
if new_dgt_fen != self.dgt_fen:
if self.mode == ENGINE_PLAY:
self.computer_move_FEN_reached = False
if not self.try_dgt_legal_moves(self.analyzer.board.fen(), new_dgt_fen):
dgt_fen_start = new_dgt_fen.split()[0]
curr_fen_start = self.analyzer.board.fen().split()[0]
if curr_fen_start == dgt_fen_start and self.mode == ENGINE_PLAY:
self.computer_move_FEN_reached = True
# if self.chessboard.parent:
# prev_fen_start = self.chessboard.parent.board().fen().split()[0]
# if dgt_fen_start == prev_fen_start:
# self.back('dgt')
# if self.engine_mode != ENGINE_PLAY and self.engine_mode != ENGINE_ANALYSIS:
# if self.lcd:
# self.write_lcd_prev_move()
elif new_dgt_fen:
self.dgt_fen = new_dgt_fen
# if attr.type == CLOCK_BUTTON_PRESSED:
# print("Clock button {0} pressed".format(attr.message))
# e = ButtonEvent(attr.message)
# self.dgt_button_event(e)
# if attr.type == CLOCK_ACK:
# self.clock_ack_queue.put('ack')
# print
# "Clock ACK Received"
# if attr.type == CLOCK_LEVER:
# if self.clock_lever != attr.message:
# if self.clock_lever:
# # not first clock level read
# # print "clock level changed to {0}!".format(attr.message)
# e = ButtonEvent(5)
# self.dgt_button_event(e)
#
# self.clock_lever = attr.message
def poll_dgt(self):
self.dgt_thread = KThread(target=self.dgtnix.poll)
self.dgt_thread.daemon = True
self.dgt_thread.start()
def dgt_board_connect(self, device):
self.device=""
self.dgtnix = DGTBoard(device)
# self.dgtnix.subscribe(self.dgt_probe)
# poll_dgt()
self.dgtnix.subscribe(self.dgt_probe)
self.poll_dgt()
# sleep(1)
self.dgtnix.test_for_dgt_clock()
# p
# if self.dgtnix.dgt_clock:
# print ("Found DGT Clock")
# self.dgt_clock_ack_thread()
# else:
# print ("No DGT Clock found")
self.dgtnix.get_board()
if not self.dgtnix:
print ("info strong Unable to connect to the device on {0}".format(self.device))
else:
print("info string The board was found")
self.dgt_connected = True
def postinit(self):
opening_book = self.opening_book + self.opening_book_extension
opening_book = os.path.join(self.opening_dir, opening_book)
self.analyzer = Analyzer(
self.output_bestmove,
self.output_info,
os.path.join(__location__, opening_book))
self.analyzer.start()
device = self.discover_usb_devices()
if device:
self.dgt_board_connect(device)
# self.discover_bluetooth_devices()
self.postinitialized = True
def do_uci(self, arg):
print('id name {}'.format(ENGINE_NAME) )
print('id author {}'.format(AUTHOR_NAME))
# for book in self.opening_book_list:
# print('var {}'.format(book))
print('option name OpeningBook type combo default {} {}'.format(self.opening_book, ' var '.join(self.opening_book_list)))
# print()
print('uciok')
def do_debug(self, arg):
arg = arg.split()
if arg:
arg = arg[0]
else:
return
if arg == 'on':
self.analyzer.debug = True
elif arg == 'off':
self.analyzer.debug = False
def do_isready(self, arg):
if not self.postinitialized:
self.postinit()
if self.analyzer.is_working.is_set():
with self.analyzer.is_conscious:
self.analyzer.is_conscious.wait()
print('readyok')
def do_setoption(self, arg):
arg = arg.split()
try:
if arg[0] != 'name':
return
arg.pop(0)
if (arg[0] == 'OpeningBook' and
arg[1] == 'value' and
arg[2] in self.opening_book_list):
self.opening_book = arg[2]
except:
pass
def do_ucinewgame(self, arg):
print("info string newgame called")
self.speak_command("new game started")
self.dgt_fen = START_FEN
def do_position(self, arg):
arg = arg.split()
if not arg:
return
if self.analyzer.is_working.is_set():
'''
something strange
according to the protocol I should ignore it
*if I ignore it, maybe it will go away*
'''
return
if arg[0] == 'fen' and len(arg) >= 7:
self.analyzer.board.set_fen(' '.join(arg[1:7]))
del arg[:7]
else:
if arg[0] == 'startpos':
arg.pop(0)
self.analyzer.board.reset()
if arg and arg[0] == 'moves':
for move in arg[1:]:
san = self.analyzer.board.san(chess.Move.from_uci(move))
self.moves.append(san)
# self.speak_move(san)
self.analyzer.board.push_uci(move)
# announce last move
print("info string last san {}".format(self.moves[-1]))
self.speak_move(self.moves[-1])
def do_go(self, arg):
print("info string go called")
# self.output_bestmove()
# arg = arg.split()
# for parameter in self.go_parameter_list:
# try:
# index = arg.index(parameter)
# except:
# pass
# else:
# getattr(self, 'go_' + arg[index])(arg[index + 1:])
# try:
# index = arg.index('movetime')
# time = float(arg[index + 1]) / 1000
# except:
# pass
# else:
# self.stop_timer = threading.Timer(time, self.do_stop)
# self.stop_timer.start()
# self.analyzer.is_working.set()
def do_stop(self, arg=None):
if hasattr(self, 'stop_timer'):
self.stop_timer.cancel()
if self.analyzer.is_working.is_set():
self.analyzer.is_working.clear()
else:
self.output_bestmove()
def do_quit(self, arg):
if hasattr(self, 'analyzer'):
self.analyzer.termination.set()
self.analyzer.is_working.set()
self.analyzer.join()
sys.exit()
def output_bestmove(self):
# print('bestmove: {}'.format(self.analyzer.bestmove.uci()))
print('bestmove {}'.format(self.bestmove))
# file=self.stdout, flush=True)
def output_info(self, info_string):
print('info {}'.format(info_string))
# file=self.stdout, flush=True)
def go_infinite(self, arg):
self.analyzer.infinite = True
def go_searchmoves(self, arg):
self.analyzer.possible_first_moves = set()
for uci_move in arg:
try:
move = chess.Move.from_uci(uci_move)
except:
break
else:
self.analyzer.possible_first_moves.add(move)
def go_depth(self, arg):
if not self.analyzer.debug:
return
try:
depth = int(arg[0])
except:
pass
else:
self.analyzer.max_depth = depth
def go_nodes(self, arg):
try:
number_of_nodes = int(arg[0])
except:
pass
else:
self.analyzer.depth = number_of_nodes
def default(self, arg):
pass
def precmd(self, line):
print(line)
return line
def postcmd(self, stop, line):
self.stdout.flush()
return stop
if __name__ == '__main__':
# print('new start')
EngineShell().cmdloop()
|
{"/engine.py": ["/pydgt.py"]}
|
30,760
|
Richard-Cod/django_restaurant
|
refs/heads/master
|
/client/migrations/0006_info_googlemapslink.py
|
# Generated by Django 2.2 on 2020-07-14 20:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0005_auto_20200714_2040'),
]
operations = [
migrations.AddField(
model_name='info',
name='googleMapsLink',
field=models.TextField(default='https://www.google.com/maps/embed?pb=!1m14!1m8!1m3!1d3995.242123767053!2d-17.450418873410932!3d14.704142033889514!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x0%3A0x55899975fc4381f8!2sImmeuble%20Marega%20Zone%20A%20Grand%20Dakar%20www.dial221.com!5e0!3m2!1sfr!2ssn!4v1594743273319!5m2!1sfr!2ssn'),
preserve_default=False,
),
]
|
{"/client/admin.py": ["/client/models.py"], "/client/forms.py": ["/client/models.py"], "/client/views.py": ["/client/models.py", "/client/forms.py"]}
|
30,761
|
Richard-Cod/django_restaurant
|
refs/heads/master
|
/client/admin.py
|
from django.contrib import admin
# Register your models here.
from .models import Category,Food,Reason,Event,Info,Reservation
admin.site.register(Category)
admin.site.register(Food)
admin.site.register(Reason)
admin.site.register(Event)
admin.site.register(Info)
admin.site.register(Reservation)
|
{"/client/admin.py": ["/client/models.py"], "/client/forms.py": ["/client/models.py"], "/client/views.py": ["/client/models.py", "/client/forms.py"]}
|
30,762
|
Richard-Cod/django_restaurant
|
refs/heads/master
|
/client/forms.py
|
from django.forms import ModelForm
from .models import Reservation
class ReservationForm(ModelForm):
class Meta:
model = Reservation
fields = '__all__'
|
{"/client/admin.py": ["/client/models.py"], "/client/forms.py": ["/client/models.py"], "/client/views.py": ["/client/models.py", "/client/forms.py"]}
|
30,763
|
Richard-Cod/django_restaurant
|
refs/heads/master
|
/client/migrations/0005_auto_20200714_2040.py
|
# Generated by Django 2.2 on 2020-07-14 20:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('client', '0004_info'),
]
operations = [
migrations.RemoveField(
model_name='info',
name='created_at',
),
migrations.RemoveField(
model_name='info',
name='updated_at',
),
]
|
{"/client/admin.py": ["/client/models.py"], "/client/forms.py": ["/client/models.py"], "/client/views.py": ["/client/models.py", "/client/forms.py"]}
|
30,764
|
Richard-Cod/django_restaurant
|
refs/heads/master
|
/client/models.py
|
from django.db import models
from datetime import datetime
# Create your models here.
from django.contrib.auth.models import User
class TimeStampMixin(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Category(TimeStampMixin):
name = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
return f'{self.name}'
class Food(TimeStampMixin):
name = models.CharField(max_length=100,unique=True)
description = models.TextField()
image = models.ImageField(upload_to='foodsimage')
category = models.ForeignKey(Category, on_delete=models.CASCADE)
price = models.BigIntegerField()
def __str__(self):
return f'{self.name} => {self.category.name} {self.updated_at}'
class Reason(TimeStampMixin):
name = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
return f'{self.name}'
class Event(TimeStampMixin):
name = models.CharField(max_length=100)
description = models.TextField()
price = models.BigIntegerField()
image = models.ImageField(upload_to='eventsimage')
def __str__(self):
return f'{self.name}'
class Info(models.Model):
location = models.CharField(max_length=100)
dateOuvertes = models.CharField(max_length=100)
heureOuvertes = models.CharField(max_length=100)
email = models.CharField(max_length=100)
phoneNumber = models.CharField(max_length=20)
googleMapsLink = models.TextField()
def __str__(self):
return f'Les infos '
#user = models.ForeignKey(User, related_name='following')
class Reservation(TimeStampMixin):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=254)
phoneNumber = models.CharField(max_length=20)
date = models.DateField()
time = models.TimeField()
nbOfPeople = models.IntegerField()
message = models.TextField()
def __str__(self):
return f'{self.name} pour {self.date} à {self.time} et {self.nbOfPeople} pers'
|
{"/client/admin.py": ["/client/models.py"], "/client/forms.py": ["/client/models.py"], "/client/views.py": ["/client/models.py", "/client/forms.py"]}
|
30,765
|
Richard-Cod/django_restaurant
|
refs/heads/master
|
/client/views.py
|
from django.shortcuts import render,HttpResponseRedirect
from django.http import HttpResponse,JsonResponse
# Create your views here.
from client.models import Category, Food ,Event ,Reason,Info,Reservation
from client.forms import ReservationForm
from django.views.decorators.csrf import csrf_exempt
CATEGORIES = [
{
'id':1,
'name' : "Categorie name 1",
'description' : "Categorie description 1",
},
{
'id':2,
'name' : "Categorie name 2",
'description' : "Categorie description 2",
},
{
'id':3,
'name' : "Categorie name 3",
'description' : "Categorie description 3",
}
]
MENUS = [
{
'name':"Plat 1",
'description': "Description 1",
'image' : 'https://s3.amazonaws.com/medias.recettesdici.com/recettes-photos/p/pizza-aux-3-fromages/pizza-aux-3-fromages-1-1200x630.jpg',
'category': CATEGORIES[0],
'price': 1501,
},
{
'name':"Plat 2",
'description': "Description 2",
'image' : 'https://s3.amazonaws.com/medias.recettesdici.com/recettes-photos/p/pizza-aux-3-fromages/pizza-aux-3-fromages-1-1200x630.jpg',
'category': CATEGORIES[1],
'price': 1502,
},
{
'name':"Plat 3",
'description': "Description 3",
'image' : 'https://s3.amazonaws.com/medias.recettesdici.com/recettes-photos/p/pizza-aux-3-fromages/pizza-aux-3-fromages-1-1200x630.jpg',
'category': CATEGORIES[2],
'price': 1503,
}
]
REASONS_TO_CHOOSE = [
{'name' : "Nom 1",'description' : "Description 1"},
{'name' : "Nom 2",'description' : "Description 2"},
{'name' : "Nom 3",'description' : "Description 3"},
]
TESTIMONIALS = [
{
'client':{
'name':"Richard Bathiebo",
'profession': "Web developer",
},
'description' : "aaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaa aaaaaaaaaaaaaaaaaa",
},
{
'client':{
'name':"Richard Bathiebo",
'profession': "Web developer",
},
'description' : "bbbbbbbbbbbbbbbbbbbbbbbbbbbbb bbbbbbbbbbbbbbb bbbbbbbbbb",
},
{
'client':{
'name':"Richard Bathiebo",
'profession': "Web developer",
},
'description' : "ccccccccccccccc cccccccccccccccccccccc ccccccccccccccccccc",
}
]
INFOS = {
'location': "Dakar,Senegal Sacré coeur 3",
'openTime': ["Lundi - Samedi","10h00 - 22h30"],
'email' : "restaurantly@gmail.com",
'phoneNumber': "+221 78 159 78 69",
}
EVENTS = [
{
'name':"Fêtes d'anniversaire",
'price':15000,
'description': "Fêtes d'anniversaireFêtes d'anniversaireFêtes d'anniversaireFêtes d'anniversaireFêtes d'anniversaire",
'image' : "",
},
]
GALERIE = [
{},{},{},{},
{},{},{},{},
]
CHEFS = [
{
'name':"Richard ",
'poste':"Chef numéro 1 (plats Africains)"
},
{
'name':"Lucas ",
'poste':"Chef numéro 2 (plats Européens)"
}
]
def home(request):
reservationForm = ReservationForm()
return render(request,"home.html",{
'CATEGORIES':Category.objects.all(),
'MENUS' : Food.objects.all(),
'REASONS_TO_CHOOSE':Reason.objects.all(),
'TESTIMONIALS':TESTIMONIALS,
'INFOS':Info.objects.first(),
'EVENTS':Event.objects.all(),
'GALERIE':GALERIE,
'CHEFS':CHEFS,
'ReservationForm':reservationForm
})
@csrf_exempt
def makeReservation(request):
print("Reservation demande")
if request.method == 'POST':
form = ReservationForm(request.POST)
if form.is_valid():
print(form.cleaned_data)
obj = ReservationForm(form.cleaned_data)
obj.save()
return JsonResponse(data={
"message":"Quel succès ! Votre Reservation a bien été prise en compte",
"status":201,
})
else:
return JsonResponse(data={
"message":"Il y'a des erreurs dans le formulaire",
"status":400,
"formError":form.errors
})
|
{"/client/admin.py": ["/client/models.py"], "/client/forms.py": ["/client/models.py"], "/client/views.py": ["/client/models.py", "/client/forms.py"]}
|
30,766
|
Richard-Cod/django_restaurant
|
refs/heads/master
|
/client/migrations/0004_info.py
|
# Generated by Django 2.2 on 2020-07-14 20:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0003_event_reason'),
]
operations = [
migrations.CreateModel(
name='Info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('location', models.CharField(max_length=100)),
('dateOuvertes', models.CharField(max_length=100)),
('heureOuvertes', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('phoneNumber', models.CharField(max_length=20)),
],
options={
'abstract': False,
},
),
]
|
{"/client/admin.py": ["/client/models.py"], "/client/forms.py": ["/client/models.py"], "/client/views.py": ["/client/models.py", "/client/forms.py"]}
|
30,770
|
alxdmg/Conway-s-Game-of-Life
|
refs/heads/master
|
/GameOfLife_parse.py
|
"""
This module contains the parse implementation of the GameOfLife object
used in Animation.py
"""
import numpy as np
import os
class GameOfLife():
"""
---March 2020---
This class implements a simple way of simulating Conway's Game of Life.
The class has two main attributes:
-living: A list of tuples of the living cells for every generation
-figures: A list with all the seed figures you can initialize through new_elem()
-
"""
def __init__(self, x_size=50, y_size=50):
self.living = []
self.deadnear = []
self.board_dim = (x_size, y_size)
self.Board = None
# Gliders and different figures are stored in a folder in the same directory as this file
try:
self.figure_dir = os.getcwd() + "\\figures"
os.chdir(self.figure_dir)
aux = os.listdir()
self.figures = []
for elem in aux:
thing = elem.split(".")[0]
self.figures.append(thing)
os.chdir('..')
except:
print("No figures found in figures folder, check the file system")
def list_elems(self):
"""
Prints a list of available elements that can be passed as strings to new_elem()
"""
print(self.figures)
def init_board(self, initializer, pad=0, ones="*", zeros="."):
"""
This method sets the board to the passed parameter, and it accepts:
-n x 2 sized list (x & y pairs)
-np.array that will be cast to a np-bool_ array
"""
self.living = []
# initializer can be a list with x,y pair lists or tuples as its elements
if isinstance(initializer, list):
for elem in initializer:
self.living.append((elem[0], elem[1]))
# initializer can also be a n x np.array (bool or int)
# the board will be resized accordingly
elif isinstance(initializer, type(np.array)):
for row in initializer:
for col in row:
if initializer[row][col] == 1:
self.living.append((row, col))
# initializer can also be a filename with padding
elif isinstance(initializer, str):
loaded_board = self.LoadFromTxt(figure=initializer, ones="*", zeros=".")
for row in loaded_board:
for col in row:
if loaded_board[row][col] == 1:
self.living.append((row, col))
def new_elem(self, figure=None, top_left_x=0, top_left_y=0, x_dir=1, y_dir=1):
"""
This method adds any available element to the board, by first clearing the area.
Parameters are the x and y coordinates of the corner, and the figure can be flipped
on either axis with x_dir and y_dir parameters
Available figures are listed through the list_elems() method.
"""
loaded_board = self.LoadFromTxt(figure=figure, ones="*", zeros=".")
# Clear existing cells to introduce the figure (neighbouring cells aren't cleared!)
for i in range(loaded_board.shape[0]):
row = top_left_x + x_dir * i
for j in range(loaded_board.shape[1]):
col = top_left_y + x_dir * j
if self.__isvalidcell_(row, col):
if loaded_board[i][j] == 1:
self.living.append((row, col))
else: # If the loaded value is 0, try to remove from living
try:
self.living.remove((row, col))
except:
pass
def next_gen(self):
"""
This method takes no parameters, and simply returns a np.array of ints containing
the next generation of the board
"""
next_living = []
checked_dead = []
for cell in self.living:
if self.num_neighbours(cell, self.living, cell_status=1) in [2, 3]:
next_living.append(cell)
else: # If the living cell doesn't have 2 or 3 neighbours, it doesn't survive
pass
# loop through all neighbouring cells
for i in range(cell[0] - 1, cell[0] + 2):
for j in range(cell[1] - 1, cell[1] + 2):
# Check if cell is on the board, and if it isn't the "center" living cell
if self.__isvalidcell_(i, j) and (i, j) != cell:
# Check if it has been checked already this round, and if it is dead
if ((i, j) not in checked_dead) and ((i, j) not in self.living):
checked_dead.append((i, j))
if self.num_neighbours((i, j), self.living, cell_status=0) == 3:
next_living.append((i, j))
self.living = next_living
return self.ListToNumpy(self.living)
def num_neighbours(self, cell, living_list, cell_status=0):
"""
Returns the number of neighbours a cell has given the x and y coordinates,
and a np.array board (the board can be np.int or np.bool_).
If no board is given, self.Board is passed as default
"""
num = 0
for i in range(cell[0] - 1, cell[0] + 2):
for j in range(cell[1] - 1, cell[1] + 2):
if (i, j) in living_list:
num = num + 1
if cell_status == 1:
return num - 1
else:
return num
def __isvalidcell_(self, i, j):
"""
Checks if a set of indexes is within the range of self.Board
"""
if i in range(self.board_dim[0]) and j in range(self.board_dim[1]):
return True
else:
return False
def getBoard(self):
return self.ListToNumpy(self.living)
def LoadFromTxt(self, figure, ones="*", zeros="."):
aux_list = []
os.chdir(self.figure_dir)
with open(figure + ".txt", "r") as f:
for num_rows, line in enumerate(f, 1):
clean_line = line.strip("\n")
clean_line = clean_line.replace(ones, "1")
clean_line = clean_line.replace(zeros, "0")
num_cols = len(clean_line)
for char in clean_line:
aux_list.append(np.uint8(char))
aux_mat = np.asarray(aux_list, dtype=np.uint8)
aux_mat = np.reshape(aux_mat, (num_rows, num_cols))
os.chdir('..')
return aux_mat
def ListToNumpy(self, list_to_convert):
aux_mat = np.zeros(self.board_dim, dtype=np.uint8)
for elem in list_to_convert:
aux_mat[elem[0], elem[1]] = 1
return aux_mat
if __name__ == "__main__":
Test_obj = GameOfLife(12, 12)
print("-----")
Test_obj.list_elems()
print("-----")
Test_obj.new_elem(figure="glider", top_left_x=3, top_left_y=3)
print(Test_obj.getBoard())
print("Gen 1")
print("-----")
print(Test_obj.next_gen())
print("Gen 2")
print("-----")
print(Test_obj.next_gen())
print("Gen 3")
print("-----")
print(Test_obj.next_gen())
print("Gen 4")
print("-----")
|
{"/Animations.py": ["/GameOfLife_parse.py"]}
|
30,771
|
alxdmg/Conway-s-Game-of-Life
|
refs/heads/master
|
/GameOfLife_dense.py
|
"""
This module contains the dense implementation of the GameOfLife object
used in Animation.py
"""
import numpy as np
import os
class GameOfLife():
"""
---March 2020---
This class implements a simple way of simulating Conway's Game of Life.
The class has two main attributes:
-Board: The numpy boolean matrix that contains the current status of the game
-elems: A dict with all the seed figures you can initialize through new_elem()
"""
def __init__(self, x_size=50, y_size=50):
self.Board = np.zeros((x_size, y_size), dtype=np.uint8)
# Gliders and different figures are stored in a folder in the same directory as this file
try:
self.figure_dir = os.getcwd() + "\\figures"
os.chdir(self.figure_dir)
aux = os.listdir()
self.figures = []
for elem in aux:
thing = elem.split(".")[0]
self.figures.append(thing)
os.chdir('..')
except:
print("No figures found in figures folder, check the file system")
def list_elems(self):
"""
Prints a list of available elements that can be passed as strings to new_elem()
"""
print(self.figures)
def init_board(self, initializer, pad=0, ones="*", zeros="."):
"""
This method sets the board to the passed parameter, and it accepts:
-n x 2 sized list (x & y pairs)
-np.array that will be cast to a np-bool_ array
"""
# initializer can be a n x 2 sized python list
if isinstance(initializer, list):
for elem in initializer:
self.Board[elem[0], elem[1]] = 1
# initializer can also be a n x np.array (bool or int)
# the board will be resized accordingly
elif isinstance(initializer, type(np.array)):
self.Board = np.copy(initializer.astype(np.uint8))
# initializer can also be a filename with padding
elif isinstance(initializer, str):
self.Board = self.LoadFromTxt(figure=initializer, ones="*", zeros=".")
# Finally add padding if it was passed as argument
if pad is not 0:
self.Board = np.pad(self.Board, ((pad, pad), (pad, pad)), mode='constant', constant_values=0)
def new_elem(self, figure=None, top_left_x=0, top_left_y=0, x_dir=1, y_dir=1):
"""
This method adds any available element to the board, by first clearing the area.
Parameters are the x and y coordinates of the corner, and the figure can be flipped
on either axis with x_dir and y_dir parameters
Available figures are listed through the list_elems() method.
"""
loaded_board = self.LoadFromTxt(figure=figure, ones="*", zeros=".")
# Clear existing cells to introduce the figure (neighbouring cells aren't cleared!)
for i in range(loaded_board.shape[0]):
for j in range(loaded_board.shape[1]):
self.Board[top_left_x + x_dir * i, top_left_y + y_dir * j] = loaded_board[i][j]
def next_gen(self):
"""
This method takes no parameters, and simply returns a np.array of ints containing
the next generation of the board, updating the self.Board attribute
"""
temp_old_gen = np.copy(self.Board)
for i in range(temp_old_gen.shape[0]):
for j in range(temp_old_gen.shape[1]):
neighbours = self.num_neighbours(i, j, temp_old_gen)
self.Board[i, j] = self.new_cell_value(temp_old_gen[i][j], neighbours)
return self.Board
def num_neighbours(self, x_orig, y_orig, board=None):
"""
Returns the number of neighbours a cell has given the x and y coordinates,
and a np.array board (the board can be np.int or np.bool_).
If no board is given, self.Board is passed as default
"""
if isinstance(board, type(None)):
tmp = self.Board # default board is self.board
else:
tmp = board
num = 0
for i in range(x_orig - 1, x_orig + 2):
for j in range(y_orig - 1, y_orig + 2):
if self.__isvalidcell_(i, j):
if tmp[i, j] == 1:
num = num + 1
if tmp[x_orig, y_orig] == 1:
return num - 1
else:
return num
def __isvalidcell_(self, i, j):
"""
Checks if a set of indexes is within the range of self.Board
"""
if i in range(self.Board.shape[0]) and j in range(self.Board.shape[1]):
return True
else:
return False
def new_cell_value(self, curr_cell_status, num):
"""
Returns the updated cell status according to Conway's rules
"""
# if cell is alive
if curr_cell_status == 1:
if num == 2 or num == 3:
return 1
else:
return 0
# if cell is dead
elif curr_cell_status == 0:
if num == 3:
return 1
else:
return 0
else:
raise Exception("Cell isn't 1 or 0")
def getBoard(self):
return self.Board
def LoadFromTxt(self, figure, ones="*", zeros="."):
aux_list = []
os.chdir(self.figure_dir)
with open(figure + ".txt", "r") as f:
for num_rows, line in enumerate(f, 1):
clean_line = line.strip("\n")
clean_line = clean_line.replace(ones, "1")
clean_line = clean_line.replace(zeros, "0")
num_cols = len(clean_line)
for char in clean_line:
aux_list.append(np.uint8(char))
aux_mat = np.asarray(aux_list, dtype=np.uint8)
aux_mat = np.reshape(aux_mat, (num_rows, num_cols))
os.chdir('..')
return aux_mat
if __name__ == "__main__":
Test_obj = GameOfLife(12, 12)
print("-----")
Test_obj.list_elems()
print("-----")
Test_obj.new_elem(figure="acorn", top_left_x=3, top_left_y=3)
print(Test_obj.getBoard())
print("Gen 1")
print("-----")
print(Test_obj.next_gen())
print("Gen 2")
print("-----")
exit()
print(Test_obj.next_gen())
print("Gen 3")
print("-----")
print(Test_obj.next_gen())
print("Gen 4")
print("-----")
|
{"/Animations.py": ["/GameOfLife_parse.py"]}
|
30,772
|
alxdmg/Conway-s-Game-of-Life
|
refs/heads/master
|
/Animations.py
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from GameOfLife_parse import GameOfLife
# from GameOfLife_dense import GameOfLife
game = GameOfLife(130, 150)
print("available figures:")
print(game.list_elems())
game.new_elem(figure="acorn", top_left_x=70, top_left_y=100)
NumGen = 500 # Number of generations of the game
fig = plt.figure(dpi=150)
plt.axis('off')
plt.title(f"Acorn seed evolution")
ims = []
for i in range(NumGen):
im = plt.imshow(255 * game.next_gen(), animated=True)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=30, blit=True)
# ani.save('Acorn_500gen_150dpi.gif', dpi=150, writer='imagemagick')
# ani.save('dynamic_images.mp4')
plt.show()
|
{"/Animations.py": ["/GameOfLife_parse.py"]}
|
30,774
|
jshirius/kaggle_cassava
|
refs/heads/main
|
/src/utils.py
|
from contextlib import contextmanager
import os
from pathlib import Path
import random
import time
import numpy as np
import torch
import cv2
@contextmanager
def timer(message: str):
print(f'[{message}] start.')
t0 = time.time()
yield
elapsed_time = time.time() - t0
print(f'[{message}] done in {elapsed_time / 60:.1f} min.')
def set_seed(seed: int = 42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def init_logger(log_file='train.log'):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
def transform_image_plot(img_path, transform, figsize =(8, 5) ):
#画像出力用の関数
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Augment an image
transformed = transform(image=image)
transformed_image = transformed["image"]
plt.figure(figsize=figsize)
plt.imshow(transformed_image);
|
{"/src/learning.py": ["/src/data_set.py", "/src/model/train_model.py"], "/train_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"], "/inference_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"]}
|
30,775
|
jshirius/kaggle_cassava
|
refs/heads/main
|
/create_mixup_images.py
|
# ====================================================
# MixUP画像作成スクリプト
# ====================================================
import cv2
import pandas as pd
import numpy as np
from src.mixup_generator import MixupGenerator
import os
import matplotlib.pyplot as plt
from PIL import Image
import shutil
import keras
from tqdm import tqdm
import random
#mixup後の画像を格納するフォルダ名
folder = "./mixup_alpha_1/"
alpha = 1.0 #ドキュメントによると0.5あたりが良いらしい
batch_size = 32
end_count = 10 #画像はbatch_size * end_count分作成される
num_classes = 5
#csvファイルを読み込む
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv' , nrows = 12000)
data_root = '../input/cassava-leaf-disease-classification/train_images/'
#フォルダ作成
os.makedirs(folder, exist_ok=True)
#ラベル補正
noisy_label = pd.read_csv("./src/data/noisy_label.csv")
#clean labelで推測された方に置き換える
train["label"] = noisy_label["guess_label"]
print("train label clean change")
def get_mixup_data(train, label_id):
train_X =[]
train_y = []
j_count = 0
for index, row in tqdm(train.iterrows()):
file_name = row['image_id']
file_path = f'{data_root}/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
train_X.append(image)
#擬似的にランダムなラベルを設定する
train_y.append(random.randrange(num_classes))
#numpyに変換する
train_X = np.array(train_X)
train_y = np.array(train_y)
train_y = keras.utils.to_categorical(train_y, num_classes)
#MixUPを実行する
generator = MixupGenerator(train_X, train_y, alpha=alpha, batch_size=batch_size )()
#画像作成したら随時格納
images_names = []
label_names = []
for batch_index, (x, y) in enumerate(generator):
if(end_count < batch_index):
break
y = np.argmax(y, axis = 1)
for index in range(0, len(y)):
#画像作成する
j_name = "mix_1_" + str(label_id) + "_"+ str(j_count) + ".jpg"
path = folder + j_name
cv2.imwrite(path, x[index])
#画像ファイルとラベルを作る
images_names.append(j_name)
label_names.append(y[index])
j_count +=1
#dfデータ作成
df = pd.DataFrame()
df["image_id"] = images_names
df["label"] = label_id
return df
#画像を作成する
df_0 = train[train['label'] == 0][0:2000]
df_0 = get_mixup_data(df_0, 0)
df_1 = train[train['label'] == 1][0:1400]
df_1 = get_mixup_data(df_1, 1)
df_2 = train[train['label'] == 2][0:1400]
df_2 = get_mixup_data(df_2, 2)
df_3 = train[train['label'] == 3][0:100]
df_3 = get_mixup_data(df_3, 3)
df_4 = train[train['label'] == 4][0:1400]
df_4 = get_mixup_data(df_4, 4)
df = pd.concat([df_0, df_1, df_2, df_3, df_4])
df = df.reset_index(drop=True)
#dfファイル作成
#df = pd.DataFrame()
#df["image_id"] = images_names
#df["label"] = label_names
df.to_csv("mix_train.csv", index = False)
#フォルダを圧縮
shutil.make_archive(folder, 'zip', root_dir=folder)
#元のフォルダ削除
shutil.rmtree(folder)
#ラベルの割合表示
vc = df['label'].nunique()
print(vc)
|
{"/src/learning.py": ["/src/data_set.py", "/src/model/train_model.py"], "/train_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"], "/inference_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"]}
|
30,776
|
jshirius/kaggle_cassava
|
refs/heads/main
|
/src/learning.py
|
# 訓練と評価
import time
from tqdm import tqdm
import torch
from torch import nn
from torch.cuda.amp import autocast, GradScaler
import numpy as np
import pandas as pd
from src.data_set import TestDataset, LABEL_NUM
from src.model.train_model import CassvaImgClassifier, LabelSmoothingLoss, TaylorCrossEntropyLoss, CutMixCriterion, TaylorSmoothedLoss
import os
from fmix import sample_mask
def get_criterion(config, criterion_name=""):
if config["criterion"] =='CrossEntropyLoss':
criterion = nn.CrossEntropyLoss()
elif config["criterion"] =='LabelSmoothing':
criterion = LabelSmoothingLoss(classes=config['target_size'], smoothing=config['smoothing'])
elif config["criterion"] =='FocalLoss':
criterion = FocalLoss().to(device)
elif config["criterion"] =='FocalCosineLoss':
criterion = FocalCosineLoss()
elif config["criterion"] =='SymmetricCrossEntropyLoss':
criterion = SymmetricCrossEntropy().to(device)
elif config["criterion"] =='BiTemperedLoss':
criterion = BiTemperedLogisticLoss(t1=CFG.t1, t2=CFG.t2, smoothing=CFG.smoothing)
elif config["criterion"] =='TaylorCrossEntropyLoss':
criterion = TaylorCrossEntropyLoss(smoothing=config['smoothing'])
elif config["criterion"] =='TaylorSmoothedLoss':
criterion = TaylorSmoothedLoss(smoothing=config['smoothing'])
elif criterion_name == 'CutMix':
criterion = CutMixCriterion(get_criterion(config["criterion"]))
return criterion
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def cutmix_single(data, target, alpha):
indices = torch.randperm(data.size(0))
shuffled_data = data[indices]
shuffled_target = target[indices]
lam = np.clip(np.random.beta(alpha, alpha),0.3,0.4)
bbx1, bby1, bbx2, bby2 = rand_bbox(data.size(), lam)
new_data = data.clone()
new_data[:, :, bby1:bby2, bbx1:bbx2] = data[indices, :, bby1:bby2, bbx1:bbx2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (data.size()[-1] * data.size()[-2]))
targets = (target, shuffled_target, lam)
return new_data, targets
def fmix(device, data, targets, alpha, decay_power, shape, max_soft=0.0, reformulate=False):
lam, mask = sample_mask(alpha, decay_power, shape, max_soft, reformulate)
indices = torch.randperm(data.size(0))
shuffled_data = data[indices]
shuffled_targets = targets[indices]
x1 = torch.from_numpy(mask).to(device)*data
x2 = torch.from_numpy(1-mask).to(device)*shuffled_data
targets=(targets, shuffled_targets, lam)
return (x1+x2), targets
def cutmix(batch):
#print(batch[0])
#print(batch[0][2])
img_size = 512 #ハードコーディング
batch_size = len(batch)
data = np.zeros((batch_size, 3, img_size, img_size))
targets = np.zeros((batch_size))
file_names = [""] * batch_size
for i in range(batch_size):
data[i,:,:,:] = batch[i][0]
targets[i] = batch[i][1]
file_names[i] = batch[i][2]
indices = torch.randperm(batch_size)
shuffled_data = data[indices]
shuffled_targets = targets[indices]
lam = np.random.beta(1 , 1)
image_h, image_w = data.shape[2:]
cx = np.random.uniform(0, image_w)
cy = np.random.uniform(0, image_h)
w = image_w * np.sqrt(1 - lam)
h = image_h * np.sqrt(1 - lam)
x0 = int(np.round(max(cx - w / 2, 0)))
x1 = int(np.round(min(cx + w / 2, image_w)))
y0 = int(np.round(max(cy - h / 2, 0)))
y1 = int(np.round(min(cy + h / 2, image_h)))
data[:, :, y0:y1, x0:x1] = shuffled_data[:, :, y0:y1, x0:x1]
return_targets = torch.zeros((batch_size,3),dtype=torch.int64)
return_targets[:,0] = torch.from_numpy(targets)
return_targets[:,1] = torch.from_numpy(shuffled_targets)
return_targets[0,2] = lam
#print(return_targets)
#return_filename = torch.zeros((batch_size,3),dtype=torch.int64)
#return_filename[:,0] = torch.from_numpy(file_names)
#return_filename[:,1] = torch.from_numpy(shuffled_file_names)
#return_filename[0,2] = lam
#file_namesはダミー
return torch.from_numpy(data), return_targets, file_names
class CutMixCollator:
def __call__(self, batch):
#batch = torch.utils.data.dataloader.default_collate(batch)
batch = cutmix(batch)
return batch
#https://www.kaggle.com/takiyu/pytorch-efficientnet-baseline-train-amp-aug
#訓練
def train_one_epoch(epoch, config, model, loss_fn, optimizer, train_loader, device, scheduler=None, schd_batch_update=False):
model.train()
t = time.time()
running_loss = None
scaler = GradScaler()
pbar = tqdm(enumerate(train_loader), total=len(train_loader))
for step, (imgs, image_labels, file_names) in pbar:
imgs = imgs.to(device).float()
image_labels = image_labels.to(device).long()
#cutmixの対応
use_cutmix = False
if("use_cutmix" in config and config["use_cutmix"] == True):
mix_decision = np.random.rand()
#mix_decision = 0.1
if(mix_decision < 0.25):
t = "use_cutmix step:%d" % step
#print(t)
imgs, image_labels = cutmix_single(imgs, image_labels, 1.)
use_cutmix = True
elif(mix_decision >=0.25 and mix_decision < 0.5):
t = "use_fmix step:%d" % step
#print(t)
imgs, image_labels = fmix(device, imgs, image_labels, alpha=1., decay_power=5., shape=(512,512))
use_cutmix = True
#print(image_labels.shape, exam_label.shape)
with autocast():
image_preds = model(imgs.float()) #output = model(input)
#print(image_preds.shape)
#loss = loss_fn(image_preds, image_labels)
if(use_cutmix == True):
#cutmix用
loss = loss_fn(image_preds, image_labels[0]) * image_labels[2] + loss_fn(image_preds, image_labels[1]) * (1. - image_labels[2])
else:
loss = loss_fn(image_preds, image_labels)
scaler.scale(loss).backward()
if running_loss is None:
running_loss = loss.item()
else:
running_loss = running_loss * .99 + loss.item() * .01
if ((step + 1) % config['accum_iter'] == 0) or ((step + 1) == len(train_loader)):
# may unscale_ here if desired (e.g., to allow clipping unscaled gradients)
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if scheduler is not None and schd_batch_update:
scheduler.step()
if ((step + 1) % config['verbose_step'] == 0) or ((step + 1) == len(train_loader)):
description = f'epoch {epoch} loss: {running_loss:.4f}'
pbar.set_description(description)
if scheduler is not None and not schd_batch_update:
scheduler.step()
#https://www.kaggle.com/takiyu/pytorch-efficientnet-baseline-train-amp-aug
# 評価
def valid_one_epoch(epoch, config, model,loss_fn, val_loader, device, scheduler=None, schd_loss_update=False):
model.eval()
t = time.time()
loss_sum = 0
sample_num = 0
image_preds_all = []
image_targets_all = []
pbar = tqdm(enumerate(val_loader), total=len(val_loader))
for step, (imgs, image_labels, file_names) in pbar:
imgs = imgs.to(device).float()
image_labels = image_labels.to(device).long()
image_preds = model(imgs) #output = model(input)
#print(image_preds.shape, exam_pred.shape)
image_preds_all += [torch.argmax(image_preds, 1).detach().cpu().numpy()]
image_targets_all += [image_labels.detach().cpu().numpy()]
loss = loss_fn(image_preds, image_labels)
loss_sum += loss.item()*image_labels.shape[0]
sample_num += image_labels.shape[0]
if ((step + 1) % config['verbose_step'] == 0) or ((step + 1) == len(val_loader)):
description = f'epoch {epoch} loss: {loss_sum/sample_num:.4f}'
pbar.set_description(description)
image_preds_all = np.concatenate(image_preds_all)
image_targets_all = np.concatenate(image_targets_all)
accuracy = (image_preds_all==image_targets_all).mean()
print('validation multi-class accuracy = {:.4f}'.format(accuracy))
if scheduler is not None:
if schd_loss_update:
scheduler.step(loss_sum/sample_num)
else:
scheduler.step()
return accuracy
#推論
def inference_one_epoch(model, data_loader, device):
model.eval()
image_preds_all = []
pbar = tqdm(enumerate(data_loader), total=len(data_loader))
for step, (imgs) in pbar:
imgs = imgs.to(device).float()
image_preds = model(imgs) #output = model(input)
image_preds_all += [torch.softmax(image_preds, 1).detach().cpu().numpy()]
image_preds_all = np.concatenate(image_preds_all, axis=0)
return image_preds_all
def inference_single(model_name, model_root_path, param, transform):
""" fold対応の推論処理
Args:
model_name ([type]): モデル名
model_root_path ([type]): モデルがあるroot path
param ([type]): 設定
transform ([type]): [description]
Returns:
[type]: 推論の結果
"""
folds = param["fold_num"]
tst_preds = []
for fold in range(folds):
# we'll train fold 0 first
if param["fold_limit"] <= fold:
break
print('Inference fold {} started'.format(fold))
test = pd.DataFrame()
test['image_id'] = list(os.listdir('../input/cassava-leaf-disease-classification/test_images/'))
test_ds = TestDataset(test, '../input/cassava-leaf-disease-classification/test_images/', transform=transform())
tst_loader = torch.utils.data.DataLoader(
test_ds,
batch_size=param['valid_bs'],
num_workers=param['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(param['device'])
model = CassvaImgClassifier(model_name, LABEL_NUM).to(device)
#tst_preds = []
for i, epoch in enumerate(param['used_epochs']):
load_path = model_root_path + '{}_fold_{}_{}'.format(model_name, fold, epoch)
model.load_state_dict(torch.load(load_path))
with torch.no_grad():
for _ in range(param['tta']):
#print(model)
tst_preds += [param['weights'][i]/sum(param['weights'])/param['tta']*inference_one_epoch(model, tst_loader, device)]
#tst_preds = np.mean(tst_preds, axis=0)
del model
torch.cuda.empty_cache()
tst_preds = np.mean(tst_preds, axis=0)
return tst_preds
|
{"/src/learning.py": ["/src/data_set.py", "/src/model/train_model.py"], "/train_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"], "/inference_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"]}
|
30,777
|
jshirius/kaggle_cassava
|
refs/heads/main
|
/train_main.py
|
# ====================================================
# メイン処理
# ====================================================
package_path = './FMix-master'
import sys; sys.path.append(package_path)
"""
import sys
package_path = '../input/pytorch-image-models/pytorch-image-models-master' #'../input/efficientnet-pytorch-07/efficientnet_pytorch-0.7.0'
sys.path.append(package_path)
"""
from src.utils import set_seed
from src.data_set import prepare_dataloader
from src.model.train_model import CassvaImgClassifier
from src.learning import train_one_epoch, valid_one_epoch, inference_single, get_criterion, CutMixCollator, cutmix_single
from sklearn.model_selection import GroupKFold, StratifiedKFold
import torch
from torch import nn
import os
import torch.nn.functional as F
import sklearn
import warnings
import joblib
from sklearn.metrics import roc_auc_score, log_loss
from sklearn import metrics
import timm
import cv2
import pandas as pd
import numpy as np
from torch.cuda.amp import autocast, GradScaler
from albumentations import (
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop,ToGray,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize
)
from albumentations.pytorch import ToTensorV2
import matplotlib.pyplot as plt
#import sys
#sys.path.insert(0,"fmix")
#設定
CFG = {
'fold_num': 5,
'fold_limit': 2, #foldで実際にやるもの fold_num以下
'seed': 42,
'model_arch': 'resnext50_32x4d', #resnext50_32x4d #tf_efficientnet_b4_ns #tf_efficientnet_b7_nsはメモリに乗らない #tf_efficientnet_b5_nsはメモリに乗るようだ
'img_size': 512,
'epochs': 10, #epochsを10にする
'train_bs': 32,
'valid_bs': 32,
"drop_rate" : 0.2222, #dropout
'T_0': 10,
'lr': 1e-4,
'min_lr': 1e-6,
'weight_decay':1e-6,
#'num_workers': 4,
'num_workers': 0, #ローカルPCの設定
'accum_iter': 2, # suppoprt to do batch accumulation for backprop with effectively larger batch size
'verbose_step': 1,
#'device': 'cuda:0'
'device': 'cpu', #ローカルPCのときの設定
'debug': True,
'train_mode' :True,
'collate' :None, #mixcutのときに使用する
'use_cutmix':True, # cutmixを使うか(cutmixは未完成)
'inference_mode' :True, #internetONだと提出できないので注意が必要
'inference_model_path' : "./", #推論時のモデルパス
'tta': 4, #Inference用 どこの
'used_epochs': [4, 5, 6], #Inference用 どこのepocheを使うか 0始まり
'weights': [1,1,1] ,#Inference用比率
"noisy_label_csv" :"./src/data/noisy_label.csv", #ノイズラベル修正用のcsvファイルの場所(ノイズ補正しない場合は空白にする)
"append_data":"", # "../input/cassava_append_data",
"criterion":'TaylorSmoothedLoss', # ['CrossEntropyLoss', LabelSmoothing', 'FocalLoss' 'FocalCosineLoss', 'SymmetricCrossEntropyLoss', 'BiTemperedLoss', 'TaylorCrossEntropyLoss',"TaylorSmoothedLoss"] 損失関数のアルゴリズム
"smoothing": 0.05,#LabelSmoothingの値
"target_size":5, #ラベルの数
}
def get_train_transforms():
return Compose([
RandomResizedCrop(CFG['img_size'], CFG['img_size']),
Transpose(p=0.5), #転換
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
ShiftScaleRotate(p=0.5), #アフィン変換をランダムに適用します。入力を変換、スケーリング、回転します
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), #色彩などを変更する
RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5), # 輝度
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), #ピクセル値を255 = 2 ** 8-1で除算し、チャネルごとの平均を減算し、チャネルごとのstdで除算します
#CoarseDropout(p=0.5),#粗いドロップアウト
CoarseDropout(max_holes=12, max_height=int(0.11*CFG['img_size']), max_width=int(0.11*CFG['img_size']),
min_holes=1, min_height=int(0.03*CFG['img_size']), min_width=int(0.03*CFG['img_size']),
always_apply=False, p=0.5),
#RandomCrop(height= CFG.HEIGHT, width = CFG.WIDTH,always_apply=True, p=1.0)
Cutout(p=0.5),
ToGray(p=0.01), #これを反映させたほうがスコアが上がる 0.001上がった
ToTensorV2(p=1.0),
], p=1.)
# 参考に0.9を叩き出したもの
# https://www.kaggle.com/takiyu/cassava-leaf-disease-tpu-v2-pods-inference/
#Pixel-level transforms, Crops(画像の中央領域をトリミング)
# ここから過去のコンペのナレッジ
# https://www.kaggle.com/stonewst98/what-a-pity-only-0-0001-away-from-0-77/notebook
# ToGray
def get_valid_transforms():
return Compose([
CenterCrop(CFG['img_size'], CFG['img_size'], p=1.),
Resize(CFG['img_size'], CFG['img_size']),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
#推論で使うもの(こちらのほうが、get_valid_transformsよりもスコアが0.005も高い)
#https://www.kaggle.com/takiyu/cassava-resnext50-32x4d-inference?scriptVersionId=52803745
def get_inference_transforms():
return Compose([
RandomResizedCrop(CFG['img_size'], CFG['img_size']),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
#以下のパターンも試す
# ものすごくスコアが悪くなった
#def get_test_transforms():
# return A.Compose([
# A.Resize(height=img_size, width=img_size, p=1.0),
# ToTensorV2(p=1.0),
# ], p=1.0)
if __name__ == '__main__':
#SEED
set_seed()
#訓練データを読み込む
if(CFG["debug"] == True):
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv' , nrows = 30)
else:
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
print(train)
#noisy labelを読み込む
if(len(CFG["noisy_label_csv"]) > 0):
noisy_label = pd.read_csv(CFG["noisy_label_csv"])
#clean labelで推測された方に置き換える
train["label"] = noisy_label["guess_label"]
print("train label clean change")
#追加画像を読み込む
append_data_dict = None
if(len(CFG["append_data"]) > 0):
#訓練データ追加
p = CFG["append_data"] + "/" + "mix_train.csv"
append_df = pd.read_csv(p)
print(append_df)
train = pd.concat([train, append_df])
train = train.reset_index(drop=True)
#image_path, exist_name
append_data_dict = {}
append_data_dict['image_path'] = CFG["append_data"] + "/" + "mixup_alpha_1"
append_data_dict['exist_name'] = "mix"
if(CFG["train_mode"] == True):
#ラベルを元に分ける
folds = StratifiedKFold(n_splits=CFG['fold_num'], shuffle=True, random_state=CFG['seed']).split(np.arange(train.shape[0]), train.label.values)
print(folds)
#デバイス情報取得
device = torch.device(CFG['device'])
for fold, (trn_idx, val_idx) in enumerate(folds):
# we'll train fold 0 first
if CFG["fold_limit"] <= fold:
break
print('Training with {} started'.format(fold))
print(len(trn_idx), len(val_idx))
#損失関数の取得
criterion = get_criterion(CFG)
val_criterion = criterion
"""
if(CFG["use_cutmix"] == True):
#cutmixの設定(未完成)
CFG["collator"] = CutMixCollator()
criterion = get_criterion(CFG, 'CutMix')
val_criterion = get_criterion(CFG)
else:
criterion = get_criterion(CFG)
val_criterion = criterion
"""
print(f'Criterion: {criterion}')
loss_tr = criterion.to(device)
loss_fn = val_criterion.to(device)
#loss_tr = nn.CrossEntropyLoss().to(device) #MyCrossEntropyLoss().to(device)
#loss_fn = nn.CrossEntropyLoss().to(device)
#train_loader,val_loader,scaler = get_loaders(dev=CFG.device,train_set=train_set,val_set=val_set)
#データのローダーを設定する
train_loader, val_loader = prepare_dataloader(train, trn_idx, val_idx, CFG, get_train_transforms, get_valid_transforms, data_root='../input/cassava-leaf-disease-classification/train_images/', append_data_dict = append_data_dict)
#画像を表示する(デバッグ用普段はコメント化)
"""
train_iter = iter(train_loader)
images, label, file_name = train_iter.next()
image = images[0]
img = image[:,:,0]
plt.imshow(img)
plt.imsave(file_name[0], img)
"""
#print(train_data)
###########################
#モデルの読み込み
###########################
model = CassvaImgClassifier(CFG['model_arch'], train.label.nunique(), pretrained=True, drop_rate=CFG["drop_rate"]).to(device)
#Feature Scaling(正規化)を作成する
scaler = GradScaler()
optimizer = torch.optim.Adam(model.parameters(), lr=CFG['lr'], weight_decay=CFG['weight_decay'])
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, gamma=0.1, step_size=CFG['epochs']-1)
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=CFG['T_0'], T_mult=1, eta_min=CFG['min_lr'], last_epoch=-1)
#scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=25,
# max_lr=CFG['lr'], epochs=CFG['epochs'], steps_per_epoch=len(train_loader))
best_accuracy = 0
for epoch in range(CFG['epochs']):
t = "train_one_epoch fold:%s epoch:%s" % ( str(fold), str(epoch))
print(t)
train_one_epoch(epoch, CFG, model ,loss_tr, optimizer, train_loader, device, scheduler=scheduler, schd_batch_update=False)
with torch.no_grad():
accuracy = valid_one_epoch(epoch, CFG, model,loss_fn, val_loader, device, scheduler=None, schd_loss_update=False)
print("accuracy")
print(accuracy)
if(best_accuracy < accuracy):
t = "best_accuracy_update accuracy:%s fold:%s epoch:%s" % (str(accuracy), str(fold), str(epoch))
print(t)
best_accuracy = accuracy
torch.save(model.state_dict(),'{}_fold_{}'.format(CFG['model_arch'], fold))
torch.save(model.state_dict(),'{}_fold_{}_{}'.format(CFG['model_arch'], fold, epoch))
#torch.save(model.cnn_model.state_dict(),'{}/cnn_model_fold_{}_{}'.format(CFG['model_path'], fold, CFG['tag']))
del model, optimizer, train_loader, val_loader, scaler, scheduler
torch.cuda.empty_cache()
if(CFG["inference_mode"] == True):
#推論モード
#res net
tst_preds = inference_single("resnext50_32x4d", CFG["inference_model_path"], CFG, get_inference_transforms)
#tf_efficientnet_b4_ns
#tst_preds = inference_single("tf_efficientnet_b4_ns", "../input/cassava-tf-efficientnet-b4-ns-train/")
test = pd.DataFrame()
test['image_id'] = list(os.listdir('../input/cassava-leaf-disease-classification/test_images/'))
test['label'] = np.argmax(tst_preds, axis=1)
test.to_csv('submission.csv', index=False)
test.head()
|
{"/src/learning.py": ["/src/data_set.py", "/src/model/train_model.py"], "/train_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"], "/inference_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"]}
|
30,778
|
jshirius/kaggle_cassava
|
refs/heads/main
|
/src/feature.py
|
# 特徴量
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import pandas as pd
# Use Numpy [may cause Out-of-Memory (OOM) error]
def rolling_window(a, shape): # rolling window for 2D array
#次元数が増える
#a(1次元に対して、先の2つの配列を入れるイメージ)
s = (a.shape[0] - shape[0] + 1,) + (a.shape[1] - shape[1] + 1,) + shape
strides = a.strides + a.strides
return np.squeeze(np.lib.stride_tricks.as_strided(a, shape = s, strides = strides), axis = 1)
def median_fillna(df:pd.DataFrame):
#欠損値に対して、各カラムごとに中央値を埋め込む
#背景, featureにはNANが多いから
# https://www.kaggle.com/wongguoxuan/eda-pca-xgboost-classifier-for-beginners
train_median = df.median()
df = df.fillna(train_median)
return df, train_median
def feature_pca(train_x:pd.DataFrame, n_components = 50 , scaler= None):
# Before we perform PCA, we need to normalise the features so that they have zero mean and unit variance
# https://www.kaggle.com/wongguoxuan/eda-pca-xgboost-classifier-for-beginners
if(scaler == None):
scaler = StandardScaler()
scaler.fit(train_x)
train_x_norm = scaler.transform(train_x)
pca = PCA(n_components=n_components).fit(train_x_norm)
train_x_transform = pca.transform(train_x_norm)
return train_x_transform, scaler
# We impute the missing values with the medians
def fillna_npwhere(array, values):
# numpyにした状態でNULLがあるとき、valuesで穴埋めをするときに使う
# https://www.kaggle.com/wongguoxuan/eda-pca-xgboost-classifier-for-beginners
if np.isnan(array.sum()):
array = np.where(np.isnan(array), values, array)
return array
|
{"/src/learning.py": ["/src/data_set.py", "/src/model/train_model.py"], "/train_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"], "/inference_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"]}
|
30,779
|
jshirius/kaggle_cassava
|
refs/heads/main
|
/inference_main.py
|
# ====================================================
# 推論メイン処理
# ====================================================
"""
import sys
package_path = '../input/pytorch-image-models/pytorch-image-models-master' #'../input/efficientnet-pytorch-07/efficientnet_pytorch-0.7.0'
sys.path.append(package_path)
sys.path.append("../input/cassava-script")
"""
from src.utils import set_seed
from src.data_set import prepare_dataloader, TestDataset
from src.model.train_model import CassvaImgClassifier
from src.learning import train_one_epoch, valid_one_epoch
from sklearn.model_selection import GroupKFold, StratifiedKFold
import torch
from torch import nn
import os
import torch.nn.functional as F
import sklearn
import warnings
import joblib
from sklearn.metrics import roc_auc_score, log_loss
from sklearn import metrics
import pandas as pd
import numpy as np
from torch.cuda.amp import autocast, GradScaler
from albumentations import (
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize
)
from albumentations.pytorch import ToTensorV2
from tqdm import tqdm
#設定
CFG = {
'fold_num': 5,
'seed': 42,
'model_arch': 'tf_efficientnet_b4_ns',
'img_size': 512,
'epochs': 10,
'train_bs': 16,
'valid_bs': 32,
'T_0': 10,
'lr': 1e-4,
'min_lr': 1e-6,
'weight_decay':1e-6,
#'num_workers': 4,
'num_workers': 0, #ローカルPCの設定
'accum_iter': 2, # suppoprt to do batch accumulation for backprop with effectively larger batch size
'verbose_step': 1,
#'device': 'cuda:0'
'device': 'cpu', #ローカルPCのときの設定
'tta': 4, #Inference用 どこの
'used_epochs': [4, 5, 6], #Inference用 どこのepocheを使うか
'weights': [1,1,1] ,#Inference用比率
}
def get_inference_transforms():
return Compose([
RandomResizedCrop(CFG['img_size'], CFG['img_size']),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
if __name__ == '__main__':
# for training only, need nightly build pytorch
#意図としてトレーニングしたときのvalの確認をしたい
set_seed(CFG['seed'])
#訓練データを読み込む
if(CFG["debug"] == True):
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv' , nrows = 50)
else:
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
print(train)
folds = StratifiedKFold(n_splits=CFG['fold_num']).split(np.arange(train.shape[0]), train.label.values)
for fold, (trn_idx, val_idx) in enumerate(folds):
# we'll train fold 0 first
if fold > 0:
break
print('Inference fold {} started'.format(fold))
#検証用のデータセットを作成する
valid_ = train.loc[val_idx,:].reset_index(drop=True)
#valid_ds = CassavaDataset(valid_, '../input/cassava-leaf-disease-classification/train_images/', transforms=get_inference_transforms(), output_label=False)
#__init__(self, df, data_root, transform=None):
valid_ds = TestDataset(valid_, '../input/cassava-leaf-disease-classification/train_images/', transform=get_inference_transforms())
test = pd.DataFrame()
test['image_id'] = list(os.listdir('../input/cassava-leaf-disease-classification/test_images/'))
#test_ds = CassavaDataset(test, '../input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms(), output_label=False)
test_ds = TestDataset(test, '../input/cassava-leaf-disease-classification/test_images/', transform=get_inference_transforms())
val_loader = torch.utils.data.DataLoader(
valid_ds,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
tst_loader = torch.utils.data.DataLoader(
test_ds,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model = CassvaImgClassifier(CFG['model_arch'], train.label.nunique()).to(device)
val_preds = []
tst_preds = []
#for epoch in range(CFG['epochs']-3):
for i, epoch in enumerate(CFG['used_epochs']):
model.load_state_dict(torch.load('../input/cassava-efficientnet-model/{}_fold_{}_{}'.format(CFG['model_arch'], fold, epoch)))
with torch.no_grad():
for _ in range(CFG['tta']):
#print(model)
val_preds += [CFG['weights'][i]/sum(CFG['weights'])/CFG['tta']*inference_one_epoch(model, val_loader, device)]
tst_preds += [CFG['weights'][i]/sum(CFG['weights'])/CFG['tta']*inference_one_epoch(model, tst_loader, device)]
val_preds = np.mean(val_preds, axis=0)
tst_preds = np.mean(tst_preds, axis=0)
print('fold {} validation loss = {:.5f}'.format(fold, log_loss(valid_.label.values, val_preds)))
print('fold {} validation accuracy = {:.5f}'.format(fold, (valid_.label.values==np.argmax(val_preds, axis=1)).mean()))
del model
torch.cuda.empty_cache()
|
{"/src/learning.py": ["/src/data_set.py", "/src/model/train_model.py"], "/train_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"], "/inference_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"]}
|
30,780
|
jshirius/kaggle_cassava
|
refs/heads/main
|
/src/data_set.py
|
# ====================================================
# Dataset
# ====================================================
import torch
from torch.utils.data import Dataset,DataLoader
import cv2
import numpy as np
#Mixup, Cutmix, FMix Visualisations
#from fmix.fmix import sample_mask, make_low_freq_image, binarise_mask
#ラベルの最大数
LABEL_NUM = 5
class TrainDataset(Dataset):
def __init__(self, df, data_root, append_data_dict,transform=None):
"""[summary]
Args:
df ([type]): [description]
data_root ([type]): [description]
append_data_dict ([dict]): image_path, exist_name
transform ([type], optional): [description]. Defaults to None.
"""
self.df = df
self.file_names = df['image_id'].values
self.labels = df['label'].values
self.transform = transform
self.data_root = data_root
self.append_data_dict = append_data_dict
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{self.data_root}/{file_name}'
#appendの確認
if(self.append_data_dict != None):
#appendデータ
if(self.append_data_dict['exist_name'] in file_name):
file_path = self.append_data_dict["image_path"] + "/" + file_name
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
label = torch.tensor(self.labels[idx]).long()
return image, label, file_name
class TestDataset(Dataset):
def __init__(self, df, data_root, transform=None):
self.df = df
self.file_names = df['image_id'].values
self.transform = transform
self.data_root = data_root
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{self.data_root}/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
return image
def prepare_dataloader(df, trn_idx, val_idx, param:dict, get_train_transforms, get_valid_transforms, data_set_mode = 1, data_root='../input/cassava-leaf-disease-classification/train_images/', append_data_dict:dict=None):
#from catalyst.data.sampler import BalanceClassSampler
train_ = df.loc[trn_idx,:].reset_index(drop=True)
valid_ = df.loc[val_idx,:].reset_index(drop=True)
if(data_set_mode == 1):
train_ds = TrainDataset(train_, data_root, append_data_dict,transform = get_train_transforms())
valid_ds = TrainDataset(valid_, data_root, append_data_dict,transform = get_valid_transforms())
else:
#from fmixが必要
train_ds = CassavaDataset(train_, data_root, param, transforms=get_train_transforms(), output_label=True, one_hot_label=False, do_fmix=False, do_cutmix=False)
valid_ds = CassavaDataset(valid_, data_root, param, transforms=get_valid_transforms(), output_label=True)
train_loader = torch.utils.data.DataLoader(
train_ds,
batch_size=param['train_bs'],
pin_memory=False,
drop_last=False,
shuffle=True,
num_workers=param['num_workers'],
collate_fn=param['collate'], #cutmixのために追加
#sampler=BalanceClassSampler(labels=train_['label'].values, mode="downsampling")
)
val_loader = torch.utils.data.DataLoader(
valid_ds,
batch_size=param['valid_bs'],
num_workers=param['num_workers'],
shuffle=False,
pin_memory=False,
)
return train_loader, val_loader
#from
#https://www.kaggle.com/khyeh0719/pytorch-efficientnet-baseline-train-amp-aug
def rand_bbox(size, lam):
W = size[0]
H = size[1]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def get_img(path):
im_bgr = cv2.imread(path)
im_rgb = im_bgr[:, :, ::-1]
#print(im_rgb)
return im_rgb
class CassavaDataset(Dataset):
def __init__(self, df, data_root,param,
transforms=None,
output_label=True,
one_hot_label=False,
do_fmix=False,
do_cutmix=False,
cutmix_params={
'alpha': 1,
}
):
super().__init__()
fmix_params={
'alpha': 1.,
'decay_power': 3.,
'shape': (param['img_size'], param['img_size']),
'max_soft': True,
'reformulate': False
},
self.df = df.reset_index(drop=True).copy()
self.transforms = transforms
self.data_root = data_root
self.do_fmix = do_fmix
self.fmix_params = fmix_params
self.do_cutmix = do_cutmix
self.cutmix_params = cutmix_params
self.output_label = output_label
self.one_hot_label = one_hot_label
if output_label == True:
self.labels = self.df['label'].values
#print(self.labels)
if one_hot_label is True:
self.labels = np.eye(self.df['label'].max()+1)[self.labels]
#print(self.labels)
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index: int):
# get labels
if self.output_label:
target = self.labels[index]
img = get_img("{}/{}".format(self.data_root, self.df.loc[index]['image_id']))
if self.transforms:
img = self.transforms(image=img)['image']
if self.do_fmix and np.random.uniform(0., 1., size=1)[0] > 0.5:
with torch.no_grad():
#lam, mask = sample_mask(**self.fmix_params)
lam = np.clip(np.random.beta(self.fmix_params['alpha'], self.fmix_params['alpha']),0.6,0.7)
# Make mask, get mean / std
mask = make_low_freq_image(self.fmix_params['decay_power'], self.fmix_params['shape'])
mask = binarise_mask(mask, lam, self.fmix_params['shape'], self.fmix_params['max_soft'])
fmix_ix = np.random.choice(self.df.index, size=1)[0]
fmix_img = get_img("{}/{}".format(self.data_root, self.df.iloc[fmix_ix]['image_id']))
if self.transforms:
fmix_img = self.transforms(image=fmix_img)['image']
mask_torch = torch.from_numpy(mask)
# mix image
img = mask_torch*img+(1.-mask_torch)*fmix_img
#print(mask.shape)
#assert self.output_label==True and self.one_hot_label==True
# mix target
rate = mask.sum()/CFG['img_size']/CFG['img_size']
target = rate*target + (1.-rate)*self.labels[fmix_ix]
#print(target, mask, img)
#assert False
if self.do_cutmix and np.random.uniform(0., 1., size=1)[0] > 0.5:
#print(img.sum(), img.shape)
with torch.no_grad():
cmix_ix = np.random.choice(self.df.index, size=1)[0]
cmix_img = get_img("{}/{}".format(self.data_root, self.df.iloc[cmix_ix]['image_id']))
if self.transforms:
cmix_img = self.transforms(image=cmix_img)['image']
lam = np.clip(np.random.beta(self.cutmix_params['alpha'], self.cutmix_params['alpha']),0.3,0.4)
bbx1, bby1, bbx2, bby2 = rand_bbox((CFG['img_size'], CFG['img_size']), lam)
img[:, bbx1:bbx2, bby1:bby2] = cmix_img[:, bbx1:bbx2, bby1:bby2]
rate = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (CFG['img_size'] * CFG['img_size']))
target = rate*target + (1.-rate)*self.labels[cmix_ix]
#print('-', img.sum())
#print(target)
#assert False
# do label smoothing
#print(type(img), type(target))
if self.output_label == True:
return img, target
else:
return img
|
{"/src/learning.py": ["/src/data_set.py", "/src/model/train_model.py"], "/train_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"], "/inference_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"]}
|
30,781
|
jshirius/kaggle_cassava
|
refs/heads/main
|
/src/model/train_model.py
|
# 訓練モデル
#import efficientnet.tfkeras as efn
import tensorflow as tf
import tensorflow.keras.layers as L
import tensorflow.keras.backend as K
from tensorflow.keras import optimizers, Sequential, losses, metrics, Model
from tensorflow.keras.callbacks import EarlyStopping
import torch
from torch import nn
import timm
# ====================================================
# Label Smoothing
# ====================================================
# From
# https://www.kaggle.com/piantic/train-cassava-starter-using-various-loss-funcs
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes=5, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
# ====================================================
# TaylorCrossEntropyLoss
# ====================================================
# From
# https://www.kaggle.com/piantic/train-cassava-starter-using-various-loss-funcs
class TaylorSoftmax(nn.Module):
'''
This is the autograd version
'''
def __init__(self, dim=1, n=2):
super(TaylorSoftmax, self).__init__()
assert n % 2 == 0
self.dim = dim
self.n = n
def forward(self, x):
'''
usage similar to nn.Softmax:
>>> mod = TaylorSoftmax(dim=1, n=4)
>>> inten = torch.randn(1, 32, 64, 64)
>>> out = mod(inten)
'''
fn = torch.ones_like(x)
denor = 1.
for i in range(1, self.n+1):
denor *= i
fn = fn + x.pow(i) / denor
out = fn / fn.sum(dim=self.dim, keepdims=True)
return out
class TaylorCrossEntropyLoss(nn.Module):
def __init__(self, n=2, ignore_index=-1, reduction='mean', smoothing=0.05):
super(TaylorCrossEntropyLoss, self).__init__()
assert n % 2 == 0
self.taylor_softmax = TaylorSoftmax(dim=1, n=n)
self.reduction = reduction
self.ignore_index = ignore_index
#ラベルは5つと決まっているので5にした
self.lab_smooth = LabelSmoothingLoss(5, smoothing=smoothing)
#self.lab_smooth = LabelSmoothingLoss(CFG.target_size, smoothing=smoothing)
def forward(self, logits, labels):
log_probs = self.taylor_softmax(logits).log()
#loss = F.nll_loss(log_probs, labels, reduction=self.reduction,
# ignore_index=self.ignore_index)
loss = self.lab_smooth(log_probs, labels)
return loss
class TaylorSmoothedLoss(nn.Module):
def __init__(self, n=2, ignore_index=-1, reduction='mean', smoothing=0.2):
super(TaylorSmoothedLoss, self).__init__()
assert n % 2 == 0
self.taylor_softmax = TaylorSoftmax(dim=1, n=n)
self.reduction = reduction
self.ignore_index = ignore_index
#ラベルは5つと決まっているので5にした
self.lab_smooth = LabelSmoothingLoss(5, smoothing=smoothing)
def forward(self, logits, labels):
log_probs = self.taylor_softmax(logits).log()
#loss = F.nll_loss(log_probs, labels, reduction=self.reduction,
# ignore_index=self.ignore_index)
loss = self.lab_smooth(log_probs, labels)
return loss
#From
#https://www.kaggle.com/capiru/cassavanet-cutmix-implementation-cv-0-9
class CutMixCriterion(nn.Module):
def __init__(self, criterion):
super(CutMixCriterion, self).__init__()
self.criterion = criterion
def forward(self, preds, targets):
targets1 = targets[:,0]
targets2 = targets[:,1]
lam = targets[0,2]
return lam * self.criterion.forward(
preds, targets1) + (1 - lam) * self.criterion.forward(preds, targets2)
# ====================================================
# MODEL ResNext
# ====================================================
#https://www.kaggle.com/takiyu/cassava-resnext50-32x4d-starter-training
#現状、CassvaImgClassifierとほぼ同じ処理なので、以下の関数は利用しなくて良い
class CustomResNext(nn.Module):
def __init__(self, model_name='resnext50_32x4d', pretrained=False):
super().__init__()
self.model = timm.create_model(model_name, pretrained=pretrained)
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, CFG.target_size)
def forward(self, x):
x = self.model(x)
return x
#有力
#model_archでモデル(ReXNet,EfficientNetなど)を指定できる
#https://pypi.org/project/timm/
#https://www.kaggle.com/takiyu/pytorch-efficientnet-baseline-train-amp-aug/edit
class CassvaImgClassifier(nn.Module):
def __init__(self, model_arch, n_class, pretrained=False, drop_rate = 0.0):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained, drop_rate= drop_rate)
if("resnext" in model_arch):
#resnextの場合
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, n_class)
else:
#EfficientNetなど
n_features = self.model.classifier.in_features
#TODO:dropoutあたり入れてみるか
self.model.classifier = nn.Linear(n_features, n_class)
'''
self.model.classifier = nn.Sequential(
nn.Dropout(0.3),
#nn.Linear(n_features, hidden_size,bias=True), nn.ELU(),
nn.Linear(n_features, n_class, bias=True)
)
'''
def forward(self, x):
x = self.model(x)
return x
#https://www.kaggle.com/takiyu/cassava-leaf-disease-training-with-tpu-v2-pods
#EfficientNetB4 tensorflow
def model_fn(input_shape, N_CLASSES):
inputs = L.Input(shape=input_shape, name='input_image')
base_model = efn.EfficientNetB4(input_tensor=inputs,
include_top=False,
weights='noisy-student',
pooling='avg')
base_model.trainable = False
x = L.Dropout(.5)(base_model.output)
output = L.Dense(N_CLASSES, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=output)
return model
|
{"/src/learning.py": ["/src/data_set.py", "/src/model/train_model.py"], "/train_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"], "/inference_main.py": ["/src/utils.py", "/src/data_set.py", "/src/model/train_model.py", "/src/learning.py"]}
|
30,784
|
philimon-reset/AirBnB_clone_good
|
refs/heads/master
|
/models/state.py
|
#!/usr/bin/python3
"""
contains state class to represent a state
"""
from models.base_model import BaseModel, Base
from sqlalchemy import Column, String
from sqlalchemy.orm import relationship
class State(BaseModel, Base):
""" State class """
__tablename__ = "states"
name = Column(String(128), nullable=False)
cities = relationship("City", backref="state")
@property
def cities(self):
result = []
for city in self.cities:
if city.state_id == self.id:
result.append(city)
return result
|
{"/models/review.py": ["/models/place.py"], "/models/place.py": ["/models/user.py"]}
|
30,785
|
philimon-reset/AirBnB_clone_good
|
refs/heads/master
|
/models/review.py
|
#!/usr/bin/python3
from models.base_model import BaseModel, Base
from models.place import Place
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine, Column, Integer, String
class Review(BaseModel, Base):
"""
Review class
"""
__tablename__ = "reviews"
# place_id = Column(String(60), nullable=False, ForigenKey(Place.id))
# user_id = Column(String(60), nullable=False, ForigenKey(User.id))
text = Column(String(1024), nullable=False)
|
{"/models/review.py": ["/models/place.py"], "/models/place.py": ["/models/user.py"]}
|
30,786
|
philimon-reset/AirBnB_clone_good
|
refs/heads/master
|
/models/user.py
|
#!/usr/bin/python3
"""
module containing user class
"""
from models.base_model import BaseModel, Base
# from models.review import Review
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine, Column, Integer, String
class User(BaseModel, Base):
"""
User class
"""
__tablename__ = "users"
email = Column(String(128), nullable=False)
password = Column(String(128), nullable=False)
first_name = Column(String(128), nullable=True)
last_name = Column(String(128), nullable=True)
# reviews = relationship("Review", backref="user")
|
{"/models/review.py": ["/models/place.py"], "/models/place.py": ["/models/user.py"]}
|
30,787
|
philimon-reset/AirBnB_clone_good
|
refs/heads/master
|
/models/place.py
|
#!/usr/bin/python3
"""
module containing place
"""
from models.base_model import BaseModel, Base
from models.city import City
from models.user import User
from sqlalchemy import Column, String, Integer, Float
from sqlalchemy.sql.schema import ForeignKey
class Place(BaseModel, Base):
"""
Place class
"""
__tablename__ = "places"
city_id = Column(String(60), ForeignKey(City.id), nullable=False)
user_id = Column(String(60), ForeignKey(User.id), nullable=False)
name = Column(String(128), nullable=False)
description = Column(String(1024), nullable=False)
number_rooms = Column(Integer, nullable=False, default=0)
number_bathrooms = Column(Integer, nullable=False, default=0)
max_guest = Column(Integer, nullable=False, default=0)
price_by_night = Column(Integer, nullable=False, default=0)
latitude = Column(Float, nullable=True)
longitude = Column(Float, nullable=True)
amenity_ids = []
|
{"/models/review.py": ["/models/place.py"], "/models/place.py": ["/models/user.py"]}
|
30,815
|
linhduongtuan/sesemi
|
refs/heads/master
|
/models/__init__.py
|
from .sesemi import SESEMI
|
{"/models/__init__.py": ["/models/sesemi.py"], "/dataset.py": ["/utils.py"], "/models/sesemi.py": ["/models/timm.py", "/utils.py"], "/inference.py": ["/models/__init__.py", "/utils.py", "/dataset.py"], "/open_sesemi.py": ["/models/__init__.py", "/utils.py", "/dataset.py"]}
|
30,816
|
linhduongtuan/sesemi
|
refs/heads/master
|
/models/timm.py
|
# Copyright 2021, Flyreel. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import torch
import torch.nn as nn
PYTORCH_IMAGE_MODELS_REPO = 'rwightman/pytorch-image-models'
class PyTorchImageModels(nn.Module):
def __init__(self, backbone='resnet50d', pretrained=True, global_pool='avg'):
super(PyTorchImageModels, self).__init__()
try:
self.encoder = torch.hub.load(
PYTORCH_IMAGE_MODELS_REPO,
backbone,
pretrained,
num_classes=0,
global_pool=global_pool,
)
except RuntimeError:
self.encoder = torch.hub.load(
PYTORCH_IMAGE_MODELS_REPO,
backbone,
pretrained,
num_classes=0,
global_pool=global_pool,
force_reload=True,
)
self.in_features = self.encoder.num_features
if global_pool == 'catavgmax':
self.in_features *= 2
def forward(self, x):
return self.encoder(x)
|
{"/models/__init__.py": ["/models/sesemi.py"], "/dataset.py": ["/utils.py"], "/models/sesemi.py": ["/models/timm.py", "/utils.py"], "/inference.py": ["/models/__init__.py", "/utils.py", "/dataset.py"], "/open_sesemi.py": ["/models/__init__.py", "/utils.py", "/dataset.py"]}
|
30,817
|
linhduongtuan/sesemi
|
refs/heads/master
|
/dataset.py
|
# Copyright 2021, Flyreel. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import os, errno
import torch
from torchvision import datasets, transforms
from utils import GammaCorrection, validate_paths
channel_mean = [0.485, 0.456, 0.406]
channel_std = [0.229, 0.224, 0.225]
def train_transforms(random_resized_crop=True, resize=256, crop_dim=224, scale=(0.2, 1.0), interpolation=2,
gamma_range=(0.5, 1.5), p_hflip=0.5, norms=(channel_mean, channel_std), p_erase=0.5):
default_transforms = [
GammaCorrection(gamma_range),
transforms.RandomHorizontalFlip(p_hflip),
transforms.ToTensor(),
transforms.Normalize(*norms),
transforms.RandomErasing(p=p_erase, value='random')
]
if random_resized_crop:
return transforms.Compose(
[transforms.RandomResizedCrop(crop_dim, scale, interpolation=interpolation)] + default_transforms
)
else:
return transforms.Compose([
transforms.Resize(resize, interpolation),
transforms.RandomCrop(crop_dim)
] + default_transforms)
def center_crop_transforms(resize=256, crop_dim=224, interpolation=2, norms=(channel_mean, channel_std)):
return transforms.Compose([
transforms.Resize(resize, interpolation),
transforms.CenterCrop(crop_dim),
transforms.ToTensor(),
transforms.Normalize(*norms)
])
def multi_crop_transforms(resize=256, crop_dim=224, num_crop=5, interpolation=2,
norms=(channel_mean, channel_std)):
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(*norms)
Lambda = transforms.Lambda
if num_crop == 5:
multi_crop = transforms.FiveCrop
elif num_crop == 10:
multi_crop = transforms.TenCrop
else:
raise NotImplementedError('Number of crops should be integer of 5 or 10')
return transforms.Compose([
transforms.Resize(resize, interpolation),
multi_crop(crop_dim), # this is a list of PIL Images
Lambda(lambda crops: torch.stack([to_tensor(crop) for crop in crops])),
Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])),
])
class UnlabeledDataset(torch.utils.data.Dataset):
def __init__(self, img_dir, transformations):
# `img_dir` must have one or more subdirs containing images
self.dataset = datasets.ImageFolder(img_dir, transformations)
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
x, subdir_index = self.dataset[index]
return (x, subdir_index)
class RotationTransformer():
def __init__(self):
self.num_rotation_labels = 4
def __call__(self, batch):
tensors, labels = [], []
for tensor, _ in batch:
for k in range(self.num_rotation_labels):
if k == 0:
t = tensor
else:
t = torch.rot90(tensor, k, dims=[1, 2])
tensors.append(t)
labels.append(torch.LongTensor([k]))
x = torch.stack(tensors, dim=0)
y = torch.cat(labels, dim=0)
return (x, y)
if __name__ == '__main__':
import argparse
from tqdm import trange
parser = argparse.ArgumentParser(description='Dataset Visualization',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--img-dir', required=True,
help='`img_dir` must have one or more subdirs containing images')
parser.add_argument('--head', default=10, type=int,
help='visualize first k images')
parser.add_argument('--hflip', action='store_true',
help='apply random horizontal flip')
parser.add_argument('--erase', action='store_true',
help='apply random erase')
parser.add_argument('--gamma', action='store_true',
help='apply random luminance and gamma correction')
parser.add_argument('--normalize', action='store_true',
help='apply channel-wise mean-std normalization')
parser.add_argument('--visualize-rotations', action='store_true',
help='visualize rotation transformations')
parser.add_argument('--out-dir', default='./sample_dataset_vis',
help='directory to save images for visualization')
args = parser.parse_args()
validate_paths([args.img_dir])
os.makedirs(args.out_dir, exist_ok=True)
print('args:')
for key, val in args.__dict__.items():
print(' {:20} {}'.format(key, val))
p_hflip = 0.5 if args.hflip else 0.0
p_erase = 0.5 if args.erase else 0.0
gamma_range = (0.5, 1.5) if args.gamma else (1.0, 1.0)
(mean, std) = (channel_mean, channel_std) \
if args.normalize else ([0., 0., 0.], [1., 1., 1.])
transformations = train_transforms(
gamma_range=gamma_range, p_hflip=p_hflip, norms=(mean, std), p_erase=p_erase
)
dataset = datasets.ImageFolder(args.img_dir, transformations)
print('transformations:\n', transformations)
print('dataset size: {}'.format(len(dataset)))
to_pil_image = transforms.ToPILImage()
rotate = RotationTransformer()
for i in trange(args.head):
fpath = dataset.imgs[i][0]
fname = fpath.split('/')[-1]
x, dummy_label = dataset[i]
if args.visualize_rotations:
tensors, indices = rotate([(x, dummy_label)])
for x, ind in zip(*(tensors, indices)):
image = to_pil_image(x)
image.save(os.path.join(args.out_dir, f'rotated_{ind}_' + fname))
else:
image = to_pil_image(x)
image.save(os.path.join(args.out_dir, fname))
|
{"/models/__init__.py": ["/models/sesemi.py"], "/dataset.py": ["/utils.py"], "/models/sesemi.py": ["/models/timm.py", "/utils.py"], "/inference.py": ["/models/__init__.py", "/utils.py", "/dataset.py"], "/open_sesemi.py": ["/models/__init__.py", "/utils.py", "/dataset.py"]}
|
30,818
|
linhduongtuan/sesemi
|
refs/heads/master
|
/models/sesemi.py
|
# Copyright 2021, Flyreel. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from .timm import PyTorchImageModels
from utils import sigmoid_rampup, adjust_polynomial_lr
import logging
import pytorch_lightning as pl
from pytorch_lightning.trainer.states import TrainerState, RunningStage
from torchmetrics.classification.accuracy import Accuracy
from torchmetrics.average import AverageMeter
SUPPORTED_BACKBONES = (
# The following backbones strike a balance between accuracy and model size, with optional
# pretrained ImageNet weights. For a summary of their ImageNet performance, see
# <https://github.com/rwightman/pytorch-image-models/blob/master/results/results-imagenet.csv>.
# Compared with the defaults, the "d" variants (e.g., resnet50d, resnest50d)
# replace the 7x7 conv in the input stem with three 3x3 convs.
# And in the downsampling block, a 2x2 avg_pool with stride 2 is added before conv,
# whose stride is changed to 1. Described in `Bag of Tricks <https://arxiv.org/abs/1812.01187>`.
# ResNet models.
'resnet18', 'resnet18d', 'resnet34', 'resnet34d', 'resnet50', 'resnet50d', 'resnet101d', 'resnet152d',
# ResNeXt models.
'resnext50_32x4d', 'resnext50d_32x4d', 'resnext101_32x8d',
# Squeeze and Excite models.
'seresnet50', 'seresnet152d', 'seresnext26d_32x4d', 'seresnext26t_32x4d', 'seresnext50_32x4d',
# ResNeSt models.
'resnest14d', 'resnest26d', 'resnest50d', 'resnest101e', 'resnest200e', 'resnest269e',
'resnest50d_1s4x24d', 'resnest50d_4s2x40d',
# ResNet-RS models.
'resnetrs50', 'resnetrs101', 'resnetrs152', 'resnetrs200',
# DenseNet models.
'densenet121', 'densenet169', 'densenet201', 'densenet161',
# Inception models.
'inception_v3', 'inception_v4', 'inception_resnet_v2',
# Xception models.
'xception', 'xception41', 'xception65', 'xception71',
# EfficientNet models.
'tf_efficientnet_b0', 'tf_efficientnet_b1', 'tf_efficientnet_b2',
'tf_efficientnet_b3', 'tf_efficientnet_b4', 'tf_efficientnet_b5',
'tf_efficientnet_b6', 'tf_efficientnet_b7',
# EfficientNet models trained with noisy student.
'tf_efficientnet_b0_ns', 'tf_efficientnet_b1_ns', 'tf_efficientnet_b2_ns',
'tf_efficientnet_b3_ns', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b5_ns',
'tf_efficientnet_b6_ns', 'tf_efficientnet_b7_ns',
)
class SESEMI(pl.LightningModule):
def __init__(self, hparams):
super(SESEMI, self).__init__()
self.save_hyperparameters(hparams)
assert self.hparams.backbone in SUPPORTED_BACKBONES, f'--backbone must be one of {SUPPORTED_BACKBONES}'
self.feature_extractor = PyTorchImageModels(self.hparams.backbone, self.hparams.pretrained, self.hparams.global_pool)
if self.hparams.pretrained:
logging.info(f'Initialized with pretrained {self.hparams.backbone} backbone')
if self.hparams.freeze_backbone:
logging.info(f'Freezing {self.hparams.backbone} backbone')
for m in self.feature_extractor.modules():
m.eval()
for param in m.parameters():
param.requires_grad = False
self.in_features = self.feature_extractor.in_features
self.dropout = nn.Dropout(self.hparams.dropout_rate)
self.fc_labeled = nn.Linear(self.in_features, self.hparams.num_labeled_classes)
self.fc_unlabeled = nn.Linear(self.in_features, self.hparams.num_unlabeled_classes)
self.register_buffer(
'current_learning_rate',
torch.tensor(self.hparams.warmup_lr, dtype=torch.float32, device=self.device))
self.register_buffer(
'best_validation_top1_accuracy',
torch.tensor(0., dtype=torch.float32, device=self.device))
self.training_accuracy = Accuracy(top_k=1, dist_sync_on_step=True)
self.validation_top1_accuracy = Accuracy(top_k=1)
self.validation_average_loss = AverageMeter()
def forward(self, x):
features = self.feature_extractor(x)
logits = self.fc_labeled(features)
return F.softmax(logits, dim=-1)
def forward_train(self, x_labeled, x_unlabeled=None):
# Compute output for labeled input
x_labeled = self.feature_extractor(x_labeled)
if self.hparams.dropout_rate > 0.0:
x_labeled = self.dropout(x_labeled)
output_labeled = self.fc_labeled(x_labeled)
if x_unlabeled is not None:
# Compute output for unlabeled input and return both outputs
x_unlabeled = self.feature_extractor(x_unlabeled)
output_unlabeled = self.fc_unlabeled(x_unlabeled)
return output_labeled, output_unlabeled
return output_labeled, None
def optimizer_step(
self,
epoch,
batch_idx,
optimizer,
optimizer_idx,
optimizer_closure,
**kwargs,
):
optimizer.step(closure=optimizer_closure)
self.current_learning_rate = torch.tensor(adjust_polynomial_lr(
optimizer.optimizer, self.global_step,
warmup_iters=self.hparams.warmup_iters,
warmup_lr=self.hparams.warmup_lr,
lr=self.hparams.lr,
lr_pow=self.hparams.lr_pow,
max_iters=self.hparams.max_iters),
dtype=self.current_learning_rate.dtype,
device=self.current_learning_rate.device)
def configure_optimizers(self):
if self.hparams.optimizer.lower() == 'sgd':
optimizer = torch.optim.SGD(
filter(lambda p: p.requires_grad, self.parameters()),
lr=self.hparams.lr, momentum=self.hparams.momentum, nesterov=True,
weight_decay=self.hparams.weight_decay)
elif self.hparams.optimizer.lower() == 'adam':
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, self.parameters()),
lr=self.hparams.lr, betas=(self.hparams.momentum, 0.999), weight_decay=0.0)
else:
raise NotImplementedError()
return optimizer
def training_step(self, batch, batch_index):
inputs_t, targets_t = batch['supervised']
inputs_u, targets_u = batch.get('unsupervised_rotation', (None, None))
# Forward pass
outputs_t, outputs_u = self.forward_train(inputs_t, inputs_u)
loss_t = F.cross_entropy(outputs_t, targets_t, reduction='mean')
if outputs_u is not None:
loss_u = F.cross_entropy(outputs_u, targets_u, reduction='mean')
else:
loss_u = 0.
loss_weight = self.hparams.initial_loss_weight * sigmoid_rampup(
self.global_step, self.hparams.stop_rampup)
loss = loss_t + loss_u * loss_weight
self.log('train/loss_labeled', loss_t)
self.log('train/loss_unlabeled', loss_u)
self.log('train/loss_unlabeled_weight', loss_weight)
self.log('train/loss', loss)
self.log('train/learning_rate', self.current_learning_rate)
return {'loss': loss, 'probs': F.softmax(outputs_t, dim=-1), 'targets': targets_t}
def training_step_end(self, outputs):
self.training_accuracy(outputs['probs'], outputs['targets'])
self.log('acc', self.training_accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=False)
self.log('lr', self.current_learning_rate, on_step=True, on_epoch=False, prog_bar=True, logger=False)
loss = outputs['loss'].mean()
return loss
def validation_step(self, batch, batch_index):
inputs_t, targets_t = batch
outputs_t = self.fc_labeled(self.feature_extractor(inputs_t))
probs_t = F.softmax(outputs_t, dim=-1)
loss_t = F.cross_entropy(outputs_t, targets_t, reduction='none')
return probs_t, targets_t, loss_t
def validation_step_end(self, outputs):
outputs_t, targets_t, loss_t = outputs
self.validation_top1_accuracy.update(outputs_t, targets_t)
self.validation_average_loss.update(loss_t)
def validation_epoch_end(self, outputs):
top1 = self.validation_top1_accuracy.compute()
loss = self.validation_average_loss.compute()
self.validation_top1_accuracy.reset()
self.validation_average_loss.reset()
if self.trainer.state.stage != RunningStage.SANITY_CHECKING:
if top1 > self.best_validation_top1_accuracy:
self.best_validation_top1_accuracy = torch.tensor(
float(top1),
dtype=self.best_validation_top1_accuracy.dtype,
device=self.best_validation_top1_accuracy.device)
self.log('val/top1', top1)
self.log('val/loss', loss)
if self.global_rank == 0:
print()
logging.info(
'Epoch {:03d} =====> '
'Valid Loss: {:.4f} '
'Valid Acc: {:.4f} [Best {:.4f}]'.format(
self.trainer.current_epoch,
loss,
top1,
self.best_validation_top1_accuracy)
)
|
{"/models/__init__.py": ["/models/sesemi.py"], "/dataset.py": ["/utils.py"], "/models/sesemi.py": ["/models/timm.py", "/utils.py"], "/inference.py": ["/models/__init__.py", "/utils.py", "/dataset.py"], "/open_sesemi.py": ["/models/__init__.py", "/utils.py", "/dataset.py"]}
|
30,819
|
linhduongtuan/sesemi
|
refs/heads/master
|
/inference.py
|
# Copyright 2021, Flyreel. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import os
import argparse
import numpy as np
from tqdm import trange
import logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.INFO
)
import torch
from torchvision import datasets
from models import SESEMI
from utils import validate_paths
from dataset import center_crop_transforms, multi_crop_transforms
parser = argparse.ArgumentParser(description='Perform inference on test data',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Run arguments
parser.add_argument('--checkpoint-path', default='',
help='path to saved checkpoint')
parser.add_argument('--no-cuda', action='store_true',
help='disable cuda')
# Data loading arguments
parser.add_argument('--data-dir', default='',
help='path to test dataset with one or more subdirs containing images')
parser.add_argument('--batch-size', default=16, type=int,
help='mini-batch size')
parser.add_argument('--workers', default=6, type=int,
help='number of data loading workers')
# Inference arguments
parser.add_argument('--oversample', action='store_true',
help='enable test-time augmentation')
parser.add_argument('--ncrops', default=5, type=int,
help='number of crops to oversample')
parser.add_argument('--topk', default=1, type=int,
help='return topk predictions')
parser.add_argument('--resize', default=256, type=int,
help='resize smaller edge to this resolution while maintaining aspect ratio')
parser.add_argument('--crop-dim', default=224, type=int,
help='dimension for center or multi cropping')
parser.add_argument('--outfile', default='inference_results.csv',
help='write prediction results to file')
class Classifier():
def __init__(self, model_path, args):
self.args = args
self.model_path = model_path
self.device = torch.device(
'cpu' if args.no_cuda or not torch.cuda.is_available() else 'cuda'
)
self.init_model()
def init_model(self):
self.model = SESEMI.load_from_checkpoint(self.model_path, map_location=self.device)
logging.info(f'Model checkpoint loaded from {self.model_path}')
self.model = torch.nn.DataParallel(self.model).to(self.device)
self.classes = np.array(self.model.module.hparams.classes)
self.model.eval()
def predict(self, x, ncrops, topk=1):
with torch.no_grad():
x = x.to(self.device)
batch_size = x.size(0)
w, h, c = x.shape[-1:-4:-1]
outputs = self.model(x.view(-1, c, h, w)) # fuse batch size and ncrops
outputs = outputs.view(batch_size, ncrops, -1).mean(1) # avg over crops
scores, indices = torch.topk(outputs, k=topk, largest=True, sorted=True)
scores = scores.cpu().numpy()
indices = indices.cpu().numpy()
labels = self.classes[indices]
return (labels, scores)
def predict():
args = parser.parse_args()
classifier = Classifier(args.checkpoint_path, args)
# Data loading
validate_paths([args.data_dir])
if args.oversample:
ncrops = args.ncrops
test_transformations = multi_crop_transforms(
args.resize, args.crop_dim, ncrops, interpolation=3
)
else:
ncrops = 1
test_transformations = center_crop_transforms(
args.resize, args.crop_dim, interpolation=3
)
dataset = datasets.ImageFolder(args.data_dir, test_transformations)
dataset_loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.workers,
pin_memory=True, drop_last=False)
# Write prediction results to file
if os.path.exists(args.outfile):
os.remove(args.outfile)
with open(args.outfile, 'a') as f:
header = ','.join(['Id', 'Category', 'Score'])
f.write(header + '\n')
index = 0
dataset_iterator = iter(dataset_loader)
for _ in trange(len(dataset_loader), desc=f'Inferencing on {len(dataset.imgs)} files', position=1):
inputs, _ = next(dataset_iterator)
labels, scores = classifier.predict(inputs, ncrops, args.topk)
# Write prediction results to file
with open(args.outfile, 'a') as f:
for label, score in zip(labels, scores):
img_path = dataset.imgs[index][0]
img_id = os.path.splitext(os.path.basename(img_path))[0]
label = ' '.join(label)
score = [f'{s:.6f}' for s in score]
score = ' '.join(score)
f.write(','.join([img_id, label, score]) + '\n')
index += 1
if __name__ == '__main__':
predict()
|
{"/models/__init__.py": ["/models/sesemi.py"], "/dataset.py": ["/utils.py"], "/models/sesemi.py": ["/models/timm.py", "/utils.py"], "/inference.py": ["/models/__init__.py", "/utils.py", "/dataset.py"], "/open_sesemi.py": ["/models/__init__.py", "/utils.py", "/dataset.py"]}
|
30,820
|
linhduongtuan/sesemi
|
refs/heads/master
|
/open_sesemi.py
|
# Copyright 2021, Flyreel. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import os
import argparse
import numpy as np
import torch
from torchvision import datasets
import pytorch_lightning as pl
from pytorch_lightning import callbacks
from pytorch_lightning.callbacks import ModelCheckpoint
from omegaconf import OmegaConf
from models import SESEMI
from utils import validate_paths, assert_same_classes, load_checkpoint
from dataset import (
UnlabeledDataset, RotationTransformer,
train_transforms, center_crop_transforms
)
import logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.INFO
)
parser = argparse.ArgumentParser(description='Supervised and Semi-Supervised Image Classification',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Run arguments
parser.add_argument('--run-id', default='run01',
help='experiment ID to name checkpoints and logs')
parser.add_argument('--log-dir', default='./logs',
help='directory to output checkpoints and metrics')
parser.add_argument('--resume-from-checkpoint', default='',
help='path to saved checkpoint')
parser.add_argument('--pretrained-checkpoint', default='',
help='path to pretrained model weights')
parser.add_argument('--num-gpus', default=1, type=int,
help='the number of GPUs to use')
parser.add_argument('--no-cuda', action='store_true',
help='disable CUDA')
# Data loading arguments
parser.add_argument('--data-dir', nargs='+', default=[],
help='path(s) to dataset containing "train" and "val" subdirs')
parser.add_argument('--unlabeled-dir', nargs='+', default=[],
help='path(s) to unlabeled dataset with one or more subdirs containing images')
parser.add_argument('--batch-size', default=16, type=int,
help='mini-batch size')
parser.add_argument('--workers', default=6, type=int,
help='number of data loading workers')
parser.add_argument('--resize', default=256, type=int,
help='resize smaller edge to this resolution while maintaining aspect ratio')
parser.add_argument('--crop-dim', default=224, type=int,
help='dimension for center or multi cropping')
# Training arguments
parser.add_argument('--backbone', default='resnet50d',
help='choice of backbone architecture')
parser.add_argument('--global-pool', default='avg',
choices=['avg', 'max', 'avgmax', 'catavgmax'],
help='global pooling method to aggregate backbone features')
parser.add_argument('--freeze-backbone', action='store_true',
help='freeze backbone weights from updating')
parser.add_argument('--pretrained', action='store_true',
help='use backbone architecture with pretrained ImageNet weights')
parser.add_argument('--optimizer', default='SGD',
choices=['SGD'.lower(), 'Adam'.lower()],
help='optimizer to use')
parser.add_argument('--lr', default=1e-2, type=float,
help='initial learning rate')
parser.add_argument('--epochs', default=30, type=int,
help='number of total epochs to run')
parser.add_argument('--lr-pow', default=0.5, type=float,
help='power to drop LR in polynomial scheduler')
parser.add_argument('--warmup-lr', default=1e-6, type=float,
help='initial learning rate for warmup')
parser.add_argument('--warmup-epochs', default=0, type=int,
help='number of warmup epochs')
parser.add_argument('--momentum', default=0.9, type=float,
help='momentum parameter in SGD or beta1 parameter in Adam')
parser.add_argument('--weight-decay', default=5e-4, type=float,
help='weight decay')
parser.add_argument('--fully-supervised', action='store_true',
help='fully supervised training without unlabeled data')
parser.add_argument('--evaluate-only', action='store_true',
help='evaluate model on validation set and exit')
def open_sesemi():
args = parser.parse_args()
run_dir = os.path.join(args.log_dir, args.run_id)
os.makedirs(run_dir, exist_ok=True)
# Data loading
traindir, valdir = [], []
for datadir in args.data_dir:
for d in os.scandir(datadir):
if d.is_dir():
if d.name == 'train':
traindir.append(os.path.join(datadir, d))
elif d.name == 'val':
valdir.append(os.path.join(datadir, d))
else:
continue
data_dirs = traindir + valdir
if args.unlabeled_dir:
data_dirs.extend(args.unlabeled_dir)
validate_paths(data_dirs)
train_transformations = train_transforms(
random_resized_crop=True, resize=args.resize, crop_dim=args.crop_dim, scale=(0.2, 1.0), p_erase=0.0, interpolation=3
)
test_transformations = center_crop_transforms(resize=args.resize, crop_dim=args.crop_dim, interpolation=3)
train_dataset = torch.utils.data.ConcatDataset([
datasets.ImageFolder(datadir, train_transformations) for datadir in traindir
])
val_dataset = torch.utils.data.ConcatDataset([
datasets.ImageFolder(datadir, test_transformations) for datadir in valdir
])
unlabeled_dataset = torch.utils.data.ConcatDataset([
UnlabeledDataset(datadir, train_transformations) for datadir in data_dirs
])
for ds in [train_dataset, val_dataset]:
assert_same_classes(ds.datasets)
rotate = RotationTransformer()
unlabeled_loader = torch.utils.data.DataLoader(
unlabeled_dataset,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True,
worker_init_fn=lambda x: np.random.seed(),
collate_fn=rotate, drop_last=True)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, pin_memory=True,
worker_init_fn=lambda x: np.random.seed(), drop_last=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, drop_last=False)
# Initialize hyper-parameters
num_unlabeled_classes = rotate.num_rotation_labels
args.classes = train_dataset.datasets[0].classes
args.iters_per_epoch = len(train_loader) if args.fully_supervised else len(unlabeled_loader)
args.warmup_iters = args.warmup_epochs * args.iters_per_epoch
args.max_iters = args.epochs * args.iters_per_epoch
args.stop_rampup = int(0.0 * args.max_iters) # try 0.1-0.5
args.loss_weight = 1.0
hparams = OmegaConf.create(dict(
backbone=args.backbone,
pretrained=args.pretrained,
freeze_backbone=args.freeze_backbone,
num_labeled_classes=len(args.classes),
num_unlabeled_classes=num_unlabeled_classes if not args.fully_supervised else 0,
classes=args.classes,
dropout_rate=0.5 if not args.fully_supervised else 0.0,
global_pool=args.global_pool,
optimizer=args.optimizer,
momentum=args.momentum,
weight_decay=args.weight_decay,
initial_loss_weight=args.loss_weight,
stop_rampup=args.stop_rampup,
warmup_iters=args.warmup_iters,
warmup_lr=args.warmup_lr,
lr=args.lr,
lr_pow=args.lr_pow,
max_iters=args.max_iters,
))
# Model loading and training
model = SESEMI(hparams)
model_checkpoint_callback = ModelCheckpoint(
monitor='val/top1',
mode='max',
save_top_k=1,
save_last=True)
trainer = pl.Trainer(
gpus=0 if args.no_cuda else args.num_gpus,
accelerator='dp',
max_steps=args.max_iters,
default_root_dir=run_dir,
resume_from_checkpoint=args.resume_from_checkpoint or None,
callbacks=[model_checkpoint_callback])
if not args.resume_from_checkpoint and args.pretrained_checkpoint:
# Load checkpoint for finetuning or evaluation
logging.info(f'Loading checkpoint {args.pretrained_checkpoint}')
load_checkpoint(model, args.pretrained_checkpoint)
if args.evaluate_only:
# Evaluate model on validation set and exit
trainer.validate(model, val_loader)
return
if args.fully_supervised:
loaders = dict(supervised=train_loader)
else:
loaders = dict(supervised=train_loader, unsupervised_rotation=unlabeled_loader)
trainer.fit(model, loaders, val_loader)
if __name__ == '__main__':
open_sesemi()
|
{"/models/__init__.py": ["/models/sesemi.py"], "/dataset.py": ["/utils.py"], "/models/sesemi.py": ["/models/timm.py", "/utils.py"], "/inference.py": ["/models/__init__.py", "/utils.py", "/dataset.py"], "/open_sesemi.py": ["/models/__init__.py", "/utils.py", "/dataset.py"]}
|
30,821
|
linhduongtuan/sesemi
|
refs/heads/master
|
/utils.py
|
# Copyright 2021, Flyreel. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import numpy as np
import os, errno
import torch
import torchvision.transforms.functional as TF
from itertools import combinations
def sigmoid_rampup(curr_iter, rampup_iters):
"""Exponential rampup from <https://arxiv.org/abs/1610.02242>"""
if rampup_iters == 0:
return 1.0
else:
current = np.clip(curr_iter, 0.0, rampup_iters)
phase = 1.0 - current / rampup_iters
return float(np.exp(-5.0 * phase * phase))
class GammaCorrection():
def __init__(self, r=(0.5, 2.0)):
self.gamma_range = r
def __call__(self, x):
gamma = np.random.uniform(*self.gamma_range)
return TF.adjust_gamma(x, gamma, gain=1)
def __repr__(self):
return self.__class__.__name__ + '(r={})'.format(self.gamma_range)
def adjust_polynomial_lr(optimizer, curr_iter, *, warmup_iters, warmup_lr, lr, lr_pow, max_iters):
"""Decay learning rate according to polynomial schedule with warmup"""
if curr_iter < warmup_iters:
frac = curr_iter / warmup_iters
step = lr - warmup_lr
running_lr = warmup_lr + step * frac
else:
frac = (float(curr_iter) - warmup_iters) / (max_iters - warmup_iters)
scale_running_lr = max((1.0 - frac), 0.) ** lr_pow
running_lr = lr * scale_running_lr
for param_group in optimizer.param_groups:
param_group['lr'] = running_lr
return running_lr
def assert_same_classes(datasets):
if len(datasets) == 1:
return True
same_classes = [x.class_to_idx == y.class_to_idx for x, y in combinations(datasets, r=2)]
assert all(same_classes), \
f'The following have mismatched subdirectory names. Check the `Root location`.\n{datasets}'
def validate_paths(paths):
for path in paths:
if not os.path.exists(path):
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), path
)
def load_checkpoint(model, checkpoint_path):
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f)
pretrained_backbone = checkpoint['hyper_parameters']['backbone']
model_backbone = model.hparams.backbone
assert pretrained_backbone == model_backbone, \
f'Checkpoint backbone `{pretrained_backbone}` is different from model backbone `{model_backbone}`. ' \
'Specify the correct model backbone to match the pretrained backbone.'
pretrained_state_dict = checkpoint['state_dict']
pretrained_state_dict.pop('current_learning_rate', None)
pretrained_state_dict.pop('best_validation_top1_accuracy', None)
current_state_dict = model.state_dict()
if 'fc_unlabeled.weight' in pretrained_state_dict:
if 'fc_unlabeled.weight' not in current_state_dict or (
pretrained_state_dict['fc_unlabeled.weight'].shape != current_state_dict['fc_unlabeled.weight'].shape):
pretrained_state_dict.pop('fc_unlabeled.weight')
pretrained_state_dict.pop('fc_unlabeled.bias')
if 'fc_labeled.weight' in pretrained_state_dict:
if 'fc_labeled.weight' not in current_state_dict or (
pretrained_state_dict['fc_labeled.weight'].shape != current_state_dict['fc_labeled.weight'].shape):
pretrained_state_dict.pop('fc_labeled.weight')
pretrained_state_dict.pop('fc_labeled.bias')
incompatible_keys = model.load_state_dict(pretrained_state_dict, strict=False)
if incompatible_keys.missing_keys:
print('missing keys:')
print('---')
print('\n'.join(incompatible_keys.missing_keys))
print()
if incompatible_keys.unexpected_keys:
print('unexpected keys:')
print('---')
print('\n'.join(incompatible_keys.unexpected_keys))
print()
|
{"/models/__init__.py": ["/models/sesemi.py"], "/dataset.py": ["/utils.py"], "/models/sesemi.py": ["/models/timm.py", "/utils.py"], "/inference.py": ["/models/__init__.py", "/utils.py", "/dataset.py"], "/open_sesemi.py": ["/models/__init__.py", "/utils.py", "/dataset.py"]}
|
30,836
|
xspring14/tfseg
|
refs/heads/main
|
/tfseg/model.py
|
import os
import tensorflow as tf
import tensorflow_hub as hub
from tfseg.silence_tensorflow import silence_tensorflow
silence_tensorflow()
MODEL_PATH = os.path.join(os.path.dirname(__file__), 'chn_seg_albert')
MODEL = hub.load(MODEL_PATH)
def cut_func(model, sent: str, use_pos=True):
if len(sent) <= 0:
return []
elif len(sent) > 510:
sent = sent[:510]
tokens = ['[CLS]'] + list(sent) + ['[SEP]']
for lens in (32, 64, 128, 256, 512):
if len(tokens) <= lens:
tokens += [''] * (lens - len(tokens))
break
inputs = tf.constant([tokens])
pred = model(inputs)
words = []
poses = []
last_word = ''
last_pos = ''
pred_iter = zip(
sent,
pred.numpy()[0]
)
for w, x in pred_iter:
x = x.decode('utf-8')
pos = x[1:]
if x[0] == 'B' or x[0] == 'S':
if len(last_word):
words.append(last_word)
poses.append(last_pos)
last_word = ''
last_pos = ''
last_word += w
last_pos = pos
else:
last_word += w
if len(last_word):
words.append(last_word)
poses.append(last_pos)
if use_pos:
return words, poses
return words
|
{"/tfseg/model.py": ["/tfseg/silence_tensorflow.py"], "/tfseg/test_cut.py": ["/tfseg/__init__.py", "/tfseg/pair.py"], "/tfseg/__init__.py": ["/tfseg/model.py"], "/tfseg/posseg.py": ["/tfseg/model.py", "/tfseg/pair.py"]}
|
30,837
|
xspring14/tfseg
|
refs/heads/main
|
/tfseg/test_cut.py
|
from tfseg import cut, lcut
from tfseg import posseg
from tfseg.pair import pair
def test_cut():
ret = cut('我爱北京天安门')
for x in ret:
assert isinstance(x, str)
def test_lcut():
ret = lcut('我爱北京天安门')
assert isinstance(ret, list)
assert isinstance(ret[0], str)
def test_posseg_cut():
ret = posseg.cut('我爱北京天安门')
for x in ret:
assert isinstance(x, pair)
assert isinstance(x.word, str)
assert isinstance(x.flag, str)
def test_posseg_lcut():
ret = posseg.lcut('我爱北京天安门')
assert isinstance(ret, list)
assert isinstance(ret[0], pair)
assert isinstance(ret[0].word, str)
assert isinstance(ret[0].flag, str)
|
{"/tfseg/model.py": ["/tfseg/silence_tensorflow.py"], "/tfseg/test_cut.py": ["/tfseg/__init__.py", "/tfseg/pair.py"], "/tfseg/__init__.py": ["/tfseg/model.py"], "/tfseg/posseg.py": ["/tfseg/model.py", "/tfseg/pair.py"]}
|
30,838
|
xspring14/tfseg
|
refs/heads/main
|
/tfseg/silence_tensorflow.py
|
"""Copy from silence-tensorflow
https://github.com/LucaCappelletti94/silence_tensorflow/blob/aa02373647db93f92ec824a55f37b6ae175d7227/silence_tensorflow/silence_tensorflow.py#L5
"""
import os
import logging
def silence_tensorflow():
"""Silence every warning of notice from tensorflow."""
logging.getLogger('tensorflow').setLevel(logging.ERROR)
os.environ["KMP_AFFINITY"] = "noverbose"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
tf.autograph.set_verbosity(3)
|
{"/tfseg/model.py": ["/tfseg/silence_tensorflow.py"], "/tfseg/test_cut.py": ["/tfseg/__init__.py", "/tfseg/pair.py"], "/tfseg/__init__.py": ["/tfseg/model.py"], "/tfseg/posseg.py": ["/tfseg/model.py", "/tfseg/pair.py"]}
|
30,839
|
xspring14/tfseg
|
refs/heads/main
|
/tfseg/pair.py
|
"""Copy from jieba
https://github.com/fxsjy/jieba/blob/d703bce30236f278818d7f346b2d746256871380/jieba/posseg/__init__.py#L44
"""
class pair(object):
def __init__(self, word, flag):
self.word = word
self.flag = flag
def __unicode__(self):
return '%s/%s' % (self.word, self.flag)
def __repr__(self):
return 'pair(%r, %r)' % (self.word, self.flag)
def __str__(self):
return self.__unicode__()
def __iter__(self):
return iter((self.word, self.flag))
def __lt__(self, other):
return self.word < other.word
def __eq__(self, other):
return isinstance(other, pair) and \
self.word == other.word and self.flag == other.flag
def __hash__(self):
return hash(self.word)
def encode(self, arg):
return self.__unicode__().encode(arg)
|
{"/tfseg/model.py": ["/tfseg/silence_tensorflow.py"], "/tfseg/test_cut.py": ["/tfseg/__init__.py", "/tfseg/pair.py"], "/tfseg/__init__.py": ["/tfseg/model.py"], "/tfseg/posseg.py": ["/tfseg/model.py", "/tfseg/pair.py"]}
|
30,840
|
xspring14/tfseg
|
refs/heads/main
|
/tfseg/__init__.py
|
from tfseg.model import MODEL, cut_func
def cut(sent: str):
for word in cut_func(MODEL, sent, use_pos=False):
yield word
def lcut(sent: str):
return cut_func(MODEL, sent, use_pos=False)
|
{"/tfseg/model.py": ["/tfseg/silence_tensorflow.py"], "/tfseg/test_cut.py": ["/tfseg/__init__.py", "/tfseg/pair.py"], "/tfseg/__init__.py": ["/tfseg/model.py"], "/tfseg/posseg.py": ["/tfseg/model.py", "/tfseg/pair.py"]}
|
30,841
|
xspring14/tfseg
|
refs/heads/main
|
/tfseg/posseg.py
|
from tfseg.model import MODEL, cut_func
from tfseg.pair import pair
def cut(sent: str):
for word, pos in zip(*cut_func(MODEL, sent, use_pos=True)):
yield pair(word, pos)
def lcut(sent: str):
return [
pair(word, pos)
for word, pos in zip(*cut_func(MODEL, sent, use_pos=True))
]
|
{"/tfseg/model.py": ["/tfseg/silence_tensorflow.py"], "/tfseg/test_cut.py": ["/tfseg/__init__.py", "/tfseg/pair.py"], "/tfseg/__init__.py": ["/tfseg/model.py"], "/tfseg/posseg.py": ["/tfseg/model.py", "/tfseg/pair.py"]}
|
30,847
|
codingmedved/tickets
|
refs/heads/master
|
/events/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-07-29 18:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('locations', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EventLabel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('rating', models.DecimalField(decimal_places=2, max_digits=10)),
('is_active', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('subtitle', models.CharField(blank=True, max_length=128)),
('review_nmb', models.IntegerField()),
('rating', models.DecimalField(decimal_places=2, max_digits=10)),
('time_start', models.TimeField()),
('time_end', models.TimeField()),
('time_best_start', models.TimeField()),
('time_best_end', models.TimeField()),
('highlights', models.TextField()),
('description', models.TextField()),
('how_to_use', models.TextField()),
('additional_info', models.TextField()),
('insider_tip', models.TextField()),
('lables', models.ManyToManyField(to='events.EventLabel')),
('locations', models.ManyToManyField(to='locations.Location')),
],
),
migrations.CreateModel(
name='TicketCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('description', models.TextField()),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='TicketFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='TicketPrice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('is_active', models.BooleanField(default=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.TicketCategory')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Ticket')),
],
),
migrations.AddField(
model_name='ticket',
name='ticket_feature',
field=models.ManyToManyField(to='events.TicketFeature'),
),
migrations.AddField(
model_name='review',
name='ticket',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Ticket'),
),
migrations.AddField(
model_name='review',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,848
|
codingmedved/tickets
|
refs/heads/master
|
/orders/models.py
|
from django.db import models
from events.models import Ticket, TicketPrice
from django.contrib.auth.models import User
class TicketStatus(models.Model):
name = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
class TicketNumbers(models.Model):
ticket_price = models.ForeignKey(Ticket)
date = models.DateField()
nmb_initial = models.IntegerField()
nmb_current = models.IntegerField() #initially equeals to nmb_initial
class Order(models.Model):
user = models.ForeignKey(User)
price = models.DecimalField(max_digits=10, decimal_places=2)
is_paid = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
class TicketPurchased(models.Model):
order = models.ForeignKey(Order)
ticket_price = models.ForeignKey(TicketPrice)
date = models.DateField()#to what date you buy it
price = models.DecimalField(max_digits=10, decimal_places=2)
nmb = models.IntegerField()
price_total = models.DecimalField(max_digits=10, decimal_places=2) #price*nmb
status = models.ForeignKey(TicketStatus) #if more than 10 minutes in new status then cancel and return self.nmb to nmb_current on TicketNumber model
created = models.DateTimeField(auto_now_add=True, auto_now=False)
def __str__(self):
return "%s %s" % (self.ticket_price.ticket.title, self.ticket_price.category.name)
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,849
|
codingmedved/tickets
|
refs/heads/master
|
/locations/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-07-29 18:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('description', models.TextField()),
('coordinates', models.CharField(max_length=128)),
('address', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='LocationImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, default=None, null=True, upload_to='/images/')),
('image_medium', models.ImageField(blank=True, default=None, null=True, upload_to='/images_medium/')),
('image_small', models.ImageField(blank=True, default=None, null=True, upload_to='/images_small/')),
('is_active', models.BooleanField(default=True)),
('is_main', models.BooleanField(default=True)),
('city', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='locations.City')),
('country', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='locations.Country')),
('location', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='locations.Location')),
],
),
]
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,850
|
codingmedved/tickets
|
refs/heads/master
|
/events/migrations/0004_auto_20170729_2119.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-07-29 18:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20170729_2117'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='time_best_end',
field=models.TimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='ticket',
name='time_best_start',
field=models.TimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='ticket',
name='time_end',
field=models.TimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='ticket',
name='time_start',
field=models.TimeField(blank=True, null=True),
),
]
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,851
|
codingmedved/tickets
|
refs/heads/master
|
/events/views.py
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from .models import *
from django.contrib import messages
def home(request):
tickets = Ticket.objects.all()
return render(request, 'events/home.html', locals())
def ticket(request, ticket_id):
ticket = Ticket.objects.get(id=ticket_id)
top_five_tickets = Ticket.objects.all().exclude(id=ticket_id).order_by("-rating")[:5]
# ticket.get_is_ticket_purchased(user)
is_ticket_purchased = True
if request.POST:
data = request.POST
print(data)
print (data.get("rating"))
rating = data.get("rating")
ticket.rating = rating
ticket.save(force_update=True)
return render(request, 'events/ticket.html', locals())
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,852
|
codingmedved/tickets
|
refs/heads/master
|
/events/models.py
|
from django.db import models
from django.contrib.auth.models import User
from locations.models import Location
# Create your models here.
# class Label(models.Model):
# user = models.OneToOneField(User)
# city = models.ForeignKey(City)
# date_birth = models.DateField()
# age = models.IntegerField()
# description = models.TextField(blank=True, null=True)
# avatar = models.ImageField(upload_to='avatars/')
"""
comment
"""
class EventLabel(models.Model):
name = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
class TicketFeature(models.Model):
name = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
class Ticket(models.Model):
locations = models.ManyToManyField(Location)
title = models.CharField(max_length=128)
subtitle = models.CharField(max_length=128, blank=True)
review_nmb = models.IntegerField(default=0)
rating = models.DecimalField(max_digits=10, decimal_places=2, default=0)
lables = models.ManyToManyField(EventLabel, blank=True)
#overview section
time_start = models.TimeField(blank=True, null=True)
time_end = models.TimeField(blank=True, null=True)
time_best_start = models.TimeField(blank=True, null=True)
time_best_end = models.TimeField(blank=True, null=True)
highlights = models.TextField()
description = models.TextField()
#tickets
ticket_feature = models.ManyToManyField(TicketFeature, blank=True)
how_to_use = models.TextField()
additional_info = models.TextField()
insider_tip = models.TextField()
def __str__(self):
return "%s" % self.title
# def get_is_ticket_purchased(self, user):
class Review(models.Model):
ticket = models.ForeignKey(Ticket)
user = models.ForeignKey(User)
text = models.TextField()
rating = models.DecimalField(max_digits=10, decimal_places=2)
is_active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
class TicketCategory(models.Model):
name = models.CharField(max_length=128)
description = models.TextField()
is_active = models.BooleanField(default=True)
class TicketPrice(models.Model):
ticket = models.ForeignKey(Ticket)
category = models.ForeignKey(TicketCategory)
price = models.DecimalField(max_digits=10, decimal_places=2)
is_active = models.BooleanField(default=True)
def __str__(self):
return "%s %s" % (self.ticket.title, self.category.name)
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,853
|
codingmedved/tickets
|
refs/heads/master
|
/events/urls.py
|
from django.conf.urls import url, include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^ticket/(?P<ticket_id>\w+)/$', views.ticket, name='ticket'),
]
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,854
|
codingmedved/tickets
|
refs/heads/master
|
/events/migrations/0002_auto_20170729_2116.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-07-29 18:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='lables',
field=models.ManyToManyField(blank=True, to='events.EventLabel'),
),
migrations.AlterField(
model_name='ticket',
name='rating',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
),
migrations.AlterField(
model_name='ticket',
name='review_nmb',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='ticket',
name='time_best_end',
field=models.TimeField(blank=True),
),
migrations.AlterField(
model_name='ticket',
name='time_best_start',
field=models.TimeField(blank=True),
),
migrations.AlterField(
model_name='ticket',
name='time_end',
field=models.TimeField(blank=True),
),
migrations.AlterField(
model_name='ticket',
name='time_start',
field=models.TimeField(blank=True),
),
]
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,855
|
codingmedved/tickets
|
refs/heads/master
|
/locations/models.py
|
from django.db import models
# Create your models here.
class Country(models.Model):
name = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
def __str__(self):
return "%s" % self.name
class City(models.Model):
name = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
def __str__(self):
return "%s" % self.name
class Location(models.Model):
name = models.CharField(max_length=128)
description = models.TextField()
coordinates = models.CharField(max_length=128) #altitude and latitude in Charfield
address = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
def __str__(self):
return "%s" % self.name
class LocationImage(models.Model):
country = models.ForeignKey(Country, blank=True, null=True, default=None)
city = models.ForeignKey(City, blank=True, null=True, default=None)
location = models.ForeignKey(Location, blank=True, null=True, default=None)
image = models.ImageField(upload_to="/images/", blank=True, null=True, default=None)
image_medium = models.ImageField(upload_to="/images_medium/", blank=True, null=True, default=None)
image_small = models.ImageField(upload_to="/images_small/", blank=True, null=True, default=None)
is_active = models.BooleanField(default=True)
is_main = models.BooleanField(default=True)
def __str__(self):
return "%s" % self.id
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,856
|
codingmedved/tickets
|
refs/heads/master
|
/payments/models.py
|
from django.db import models
from orders.models import Order
# Create your models here.
class Payment(models.Model):
order = models.OneToOneField(Order)
amount = models.DecimalField(max_digits=10, decimal_places=2)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,857
|
codingmedved/tickets
|
refs/heads/master
|
/events/admin.py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(EventLabel)
admin.site.register(TicketFeature)
admin.site.register(Ticket)
admin.site.register(Review)
admin.site.register(TicketCategory)
admin.site.register(TicketPrice)
|
{"/orders/models.py": ["/events/models.py"], "/events/views.py": ["/events/models.py"], "/events/models.py": ["/locations/models.py"], "/payments/models.py": ["/orders/models.py"], "/events/admin.py": ["/events/models.py"]}
|
30,860
|
dimasKaskader/SpeechRecognitionTesting
|
refs/heads/master
|
/english_recognition.py
|
import speech_recognition as sr
import urllib.request as req
from xml.dom import minidom
from os import path
import os
import deep_speech as ds
def recognize_yandex(audio):
key = 'abc41255-8098-4fb0-8f6f-45be137bfc05'
uuid = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab'
data = audio.get_wav_data()
request = req.Request(url='https://asr.yandex.net/asr_xml?uuid=' + uuid + '&key=' + key + '&topic=queries&lang=en-US',
headers={'Content-Type': 'audio/x-wav', 'Content-Length': len(data)})
response = req.urlopen(url=request, data=data)
xmldoc = minidom.parseString(response.read())
if xmldoc.getElementsByTagName('recognitionResults')[0].attributes['success'].value == '1':
'''for variant in xmldoc.getElementsByTagName('variant'):
print(variant.attributes['confidence'].value + ' ' + variant.childNodes[0].nodeValue)'''
return xmldoc.getElementsByTagName('variant')[0].childNodes[0].nodeValue
AUDIO_DIR = path.join(path.dirname(path.realpath(__file__)), 'audio')
files = os.listdir(AUDIO_DIR)
for file in files:
AUDIO_FILE = AUDIO_DIR + '/' + file
print()
print('File: ' + file)
# use the audio file as the audio source
r = sr.Recognizer()
with sr.AudioFile(AUDIO_FILE) as source:
audio = r.record(source) # read the entire audio file
# recognize speech using Sphinx
try:
print("PocketSphinx: " + r.recognize_sphinx(audio))
except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e))
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
print("Google Speech Recognition: " + r.recognize_google(audio))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
print("Yandex SpeechKit: " + recognize_yandex(audio))
print("Mozilla DeepSpeech: " + ds.recognize_deepspeech(AUDIO_FILE))
|
{"/main_recognition.py": ["/deepspeech_recognizer.py", "/openseq2seq_recognizer.py", "/wer.py"]}
|
30,861
|
dimasKaskader/SpeechRecognitionTesting
|
refs/heads/master
|
/openseq2seq_recognizer.py
|
import tensorflow as tf
from open_seq2seq.utils.utils import deco_print, get_base_config, check_logdir, \
create_logdir, create_model, get_interactive_infer_results
# Define the command line arguments that one would pass to run.py here
# A simpler version of what run.py does. It returns the created model and its saved checkpoint
def get_model(args, scope):
with tf.variable_scope(scope):
args, base_config, base_model, config_module = get_base_config(args)
checkpoint = check_logdir(args, base_config)
model = create_model(args, base_config, config_module, base_model, None)
return model, checkpoint
class OpenSeq2Seq:
def __init__(self, model_path):
self.args_S2T = ["--config_file=" + model_path + "/config.py",
"--mode=interactive_infer",
"--logdir=" + model_path + "/",
"--batch_size_per_gpu=1",
]
self.model_S2T, checkpoint_S2T = get_model(self.args_S2T, "S2T")
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(config=sess_config)
vars_S2T = {}
# vars_T2S = {}
for v in tf.get_collection(tf.GraphKeys.VARIABLES):
if "S2T" in v.name:
vars_S2T["/".join(v.op.name.split("/")[1:])] = v
'''if "T2S" in v.name:
vars_T2S["/".join(v.op.name.split("/")[1:])] = v'''
saver_S2T = tf.train.Saver(vars_S2T)
saver_S2T.restore(self.sess, checkpoint_S2T)
def recognize(self, wav_file):
# Recognize speech
results = get_interactive_infer_results(self.model_S2T, self.sess, model_in=[wav_file])
english_recognized = results[0][0]
return english_recognized
|
{"/main_recognition.py": ["/deepspeech_recognizer.py", "/openseq2seq_recognizer.py", "/wer.py"]}
|
30,862
|
dimasKaskader/SpeechRecognitionTesting
|
refs/heads/master
|
/deepspeech_recognizer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import argparse
import numpy as np
import shlex
import subprocess
import sys
import wave
from deepspeech import Model
from timeit import default_timer as timer
try:
from shhlex import quote
except ImportError:
from pipes import quote
# These constants control the beam search decoder
# Beam width used in the CTC decoder when building candidate transcriptions
BEAM_WIDTH = 500
# The alpha hyperparameter of the CTC decoder. Language Model weight
LM_ALPHA = 0.75
# The beta hyperparameter of the CTC decoder. Word insertion bonus.
LM_BETA = 1.85
# These constants are tied to the shape of the graph used (changing them changes
# the geometry of the first layer), so make sure you use the same constants that
# were used during training
# Number of MFCC features to use
N_FEATURES = 26
# Size of the context window used for producing timesteps in the input vector
N_CONTEXT = 9
def convert_samplerate(audio_path):
sox_cmd = 'sox {} --type raw --bits 16 --channels 1 --rate 16000 --encoding signed-integer --endian little --compression 0.0 --no-dither - '.format(quote(audio_path))
try:
output = subprocess.check_output(shlex.split(sox_cmd), stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
raise RuntimeError('SoX returned non-zero status: {}'.format(e.stderr))
except OSError as e:
raise OSError(e.errno, 'SoX not found, use 16kHz files or install it: {}'.format(e.strerror))
return 16000, np.frombuffer(output, np.int16)
class VersionAction(argparse.Action):
def __init__(self, *args, **kwargs):
super(VersionAction, self).__init__(nargs=0, *args, **kwargs)
def __call__(self, *args, **kwargs):
#printVersions()
exit(0)
class DeepSpeech:
def __init__(self, model_path):
self.model = model_path + '/output_graph.pbmm'
self.alphabet = model_path + '/alphabet.txt'
self.lm = model_path + '/lm.binary'
self.trie = model_path + '/trie'
#print('Loading model from file {}'.format(self.model), file=sys.stderr)
#model_load_start = timer()
self.ds = Model(self.model, N_FEATURES, N_CONTEXT, self.alphabet, BEAM_WIDTH)
#model_load_end = timer() - model_load_start
#print('Loaded model in {:.3}s.'.format(model_load_end), file=sys.stderr)
if self.lm and self.trie:
#print('Loading language model from files {} {}'.format(self.lm, self.trie), file=sys.stderr)
#lm_load_start = timer()
self.ds.enableDecoderWithLM(self.alphabet, self.lm, self.trie, LM_ALPHA, LM_BETA)
#lm_load_end = timer() - lm_load_start
#print('Loaded language model in {:.3}s.'.format(lm_load_end), file=sys.stderr)
def recognize(self, wav_file):
'''parser = argparse.ArgumentParser(description='Running DeepSpeech inference.')
parser.add_argument('--model', required=True,
help='Path to the model (protocol buffer binary file)')
parser.add_argument('--alphabet', required=True,
help='Path to the configuration file specifying the alphabet used by the network')
parser.add_argument('--lm', nargs='?',
help='Path to the language model binary file')
parser.add_argument('--trie', nargs='?',
help='Path to the language model trie file created with native_client/generate_trie')
parser.add_argument('--audio', required=True,
help='Path to the audio file to run (WAV format)')
parser.add_argument('--version', action=VersionAction,
help='Print version and exits')
args = parser.parse_args()'''
fin = wave.open(wav_file, 'rb')
fs = fin.getframerate()
if fs != 16000:
#print('Warning: original sample rate ({}) is different than 16kHz. Resampling might produce erratic speech recognition.'.format(fs), file=sys.stderr)
fs, audio = convert_samplerate(wav_file)
else:
audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)
#audio_length = fin.getnframes() * (1/16000)
fin.close()
#print('Running inference.', file=sys.stderr)
#inference_start = timer()
return self.ds.stt(audio, fs)
#inference_end = timer() - inference_start
#print('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length), file=sys.stderr)
|
{"/main_recognition.py": ["/deepspeech_recognizer.py", "/openseq2seq_recognizer.py", "/wer.py"]}
|
30,863
|
dimasKaskader/SpeechRecognitionTesting
|
refs/heads/master
|
/main_recognition.py
|
import os
from deepspeech_recognizer import DeepSpeech
from kaldiasr.nnet3 import KaldiNNet3OnlineModel, KaldiNNet3OnlineDecoder
from openseq2seq_recognizer import OpenSeq2Seq
from wer import wer
DEEPSPEECH_MODEL = './deepspeech/models'
KALDI_MODEL = './kaldi/models/kaldi-generic-en-tdnn_sp-r20180815'
OPENSEQ2SEQ_MODEL = './OpenSeq2Seq/Infer_S2T W2L'
deepspeech = DeepSpeech(DEEPSPEECH_MODEL)
kaldi_model = KaldiNNet3OnlineModel(KALDI_MODEL, acoustic_scale=1.0, beam=7.0, frame_subsampling_factor=3)
kaldi_decoder = KaldiNNet3OnlineDecoder(kaldi_model)
openseq2seq = OpenSeq2Seq(OPENSEQ2SEQ_MODEL)
def kaldi_recognize(wav_file):
if kaldi_decoder.decode_wav_file(wav_file):
s, l = kaldi_decoder.get_decoded_string()
return s
else:
return "***ERROR: decoding of %s failed." % wav_file
def append_to_file(file, line):
with open(file, 'a') as f:
f.write(line + '\n')
AUDIO_DIR = 'audio'
with open(AUDIO_DIR + '/' + 'files.csv', 'r') as files:
for file in files.readlines():
splitter = file.split(',')
if splitter[1][-1] == '\n':
splitter[1] = splitter[1][0:-1]
audio_file = AUDIO_DIR + '/' + splitter[0]
recognized_text = deepspeech.recognize(audio_file)
print(recognized_text)
w, r = wer(splitter[1].split(), recognized_text.split())
append_to_file('deepspeech.csv', recognized_text + ',' + splitter[1] + ',' + w + ',' + r)
'''recognized_text = kaldi_recognize(audio_file)
w, r = wer(splitter[1].split(), recognized_text.split())
append_to_file('kaldi.csv', recognized_text + ',' + splitter[1] + ',' + w + ',' + r)
recognized_text = openseq2seq.recognize(audio_file)
w, r = wer(splitter[1].split(), recognized_text.split())
append_to_file('openseq2seq.csv', recognized_text + ',' + splitter[1] + ',' + w + ',' + r)'''
|
{"/main_recognition.py": ["/deepspeech_recognizer.py", "/openseq2seq_recognizer.py", "/wer.py"]}
|
30,864
|
dimasKaskader/SpeechRecognitionTesting
|
refs/heads/master
|
/wer.py
|
"""
@author Kiettiphong Manovisut
References:
https://en.wikipedia.org/wiki/Word_error_rate
https://www.github.com/mission-peace/interview/wiki
"""
import numpy
def wer(r, h):
"""
Given two list of strings how many word error rate(insert, delete or substitution).
"""
d = numpy.zeros((len(r) + 1) * (len(h) + 1), dtype=numpy.uint16)
d = d.reshape((len(r) + 1, len(h) + 1))
for i in range(len(r) + 1):
for j in range(len(h) + 1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
for i in range(1, len(r) + 1):
for j in range(1, len(h) + 1):
if r[i - 1] == h[j - 1]:
d[i][j] = d[i - 1][j - 1]
else:
substitution = d[i - 1][j - 1] + 1
insertion = d[i][j - 1] + 1
deletion = d[i - 1][j] + 1
d[i][j] = min(substitution, insertion, deletion)
wrong_words = str(d[len(r)][len(h)])
all_words = str(len(r))
return wrong_words, all_words
|
{"/main_recognition.py": ["/deepspeech_recognizer.py", "/openseq2seq_recognizer.py", "/wer.py"]}
|
30,865
|
dimasKaskader/SpeechRecognitionTesting
|
refs/heads/master
|
/pro_codich.py
|
import speech_recognition as sr
import urllib.request as req
from xml.dom import minidom
from os import path
import os
import openpyxl
'''class Excel:
index = 2
wb = openpyxl.load_workbook(filename='excel.xlsx')
sheet = wb['table1']
@staticmethod
def init():
while Excel.sheet['A' + str(Excel.index)].value is not None:
Excel.index += 1
@staticmethod
def write_line(name, sphinx, yandex, google):
sheet = Excel.sheet
sheet['A' + str(Excel.index)] = name
sheet['B' + str(Excel.index)] = sphinx
sheet['E' + str(Excel.index)] = '-'
sheet['H' + str(Excel.index)] = yandex
sheet['K' + str(Excel.index)] = google
Excel.index += 1
@staticmethod
def close():
Excel.wb.save('excel.xlsx')'''
def recognize_yandex(audio):
key = '1e692527-ad23-4fdb-b463-b34e545f9a13'
uuid = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab'
data = audio.get_wav_data()
request = req.Request(url='https://asr.yandex.net/asr_xml?uuid=' + uuid + '&key=' + key + '&topic=queries',
headers={'Content-Type': 'audio/x-wav', 'Content-Length': len(data)})
response = req.urlopen(url=request, data=data)
xmldoc = minidom.parseString(response.read())
if xmldoc.getElementsByTagName('recognitionResults')[0].attributes['success'].value == '1':
return xmldoc.getElementsByTagName('variant')[0].childNodes[0].nodeValue
AUDIO_DIR = path.join(path.dirname(path.realpath(__file__)), 'audio')
r = sr.Recognizer()
files = os.listdir(AUDIO_DIR)
for file in files:
AUDIO_FILE = AUDIO_DIR + '/' + file
print()
print('File: ' + file)
with sr.AudioFile(AUDIO_FILE) as source:
audio = r.record(source) # чтение аудиофайла
sphinx = r.recognize_sphinx(audio) #распознавание с помощью sphinx
print("PocketSphinx: " + sphinx)
google = r.recognize_google(audio, language='ru') #распознавание с помощью google
print("Google Speech Recognition: " + google)
yandex = recognize_yandex(audio) #распознавание с помощью яндекс
print("Yandex SpeechKit: " + yandex)
#Excel.write_line(file.split('.')[0], sphinx, yandex, google)
#Excel.close()
|
{"/main_recognition.py": ["/deepspeech_recognizer.py", "/openseq2seq_recognizer.py", "/wer.py"]}
|
30,870
|
OneRaynyDay/LinearRegression
|
refs/heads/master
|
/TestData.py
|
import LinearRegression
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
arr = np.array([[1,0.7,0.5],[2,0.8,2.5],[3,0.9,3],[4,1.1,4.5],[5,1.4,4.5]])
LR = LinearRegression.LinearRegression(arr)
for i in range(10):
LR.gradDescent(0.1)
vals = np.dot(LR.X, LR.Theta)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(arr[:, 0], arr[:, 1], arr[:, 2], c='r')
print(vals)
ax.plot(arr[:, 0], arr[:, 1], vals, c='b')
plt.show()
print(LR.Theta)
|
{"/TestData.py": ["/LinearRegression.py"]}
|
30,871
|
OneRaynyDay/LinearRegression
|
refs/heads/master
|
/LinearRegression.py
|
import numpy as np
class LinearRegression:
def __init__(self, matrix):
'''X: The input of the supervised learning
y: The results of the supervised learning'''
biasTerm = np.ones((matrix.shape[0], 1))
print("biasTerm's shape: " + str(biasTerm.shape))
print(matrix)
Xcomp = np.array(matrix[:,0:-1])
print(Xcomp)
self.X = np.append(biasTerm, Xcomp, axis=1)
print("X's shape: " + str(self.X.shape))
self.y = matrix[:,-1]
print("Y's shape: " + str(self.y.shape))
self.Theta = np.zeros((self.X.shape[1]))
print("Theta's shape: " + str(self.Theta.shape))
def costFunction(self):
'''
m: # of samples
:return: COST of current theta
'''
m = self.X.shape[0]
Cost = np.sum((np.dot(self.X,self.Theta) - self.y), axis=None)/(2*m)
return Cost
def gradDescent(self, alpha):
m = self.X.shape[0]
for i in range(m): #going through i-th sample
print("Theta: " + str(self.Theta))
print("shape: " + str(np.dot(self.X,self.Theta.T).shape))
self.Theta = self.Theta - alpha*(np.sum((np.dot(self.X,self.Theta.T) - self.y), axis=None)*self.X[i])/m
print("Theta: " + str(self.Theta))
|
{"/TestData.py": ["/LinearRegression.py"]}
|
30,878
|
francesco-borzacchiello/SudokuSover
|
refs/heads/main
|
/solver/solver.py
|
from abc import ABC, abstractmethod
from puzzle.puzzle import *
class Solver(ABC):
def __init__(self, puzzles_to_solve : Puzzle):
self.puzzles_to_solve = puzzles_to_solve
@abstractmethod
def solve(self):
pass
@abstractmethod
def get_solution(self) -> Puzzle:
pass
|
{"/solver/solver.py": ["/puzzle/puzzle.py"], "/utility/printUtility.py": ["/puzzle/cell.py", "/puzzle/sudoku.py"], "/puzzle/sudoku.py": ["/puzzle/cell.py", "/puzzle/puzzle.py", "/utility/printUtility.py"], "/solver/sudokuSolver.py": ["/solver/solver.py", "/puzzle/sudoku.py", "/puzzle/cell.py", "/utility/printUtility.py"]}
|
30,879
|
francesco-borzacchiello/SudokuSover
|
refs/heads/main
|
/puzzle/puzzle.py
|
class Puzzle:
pass
|
{"/solver/solver.py": ["/puzzle/puzzle.py"], "/utility/printUtility.py": ["/puzzle/cell.py", "/puzzle/sudoku.py"], "/puzzle/sudoku.py": ["/puzzle/cell.py", "/puzzle/puzzle.py", "/utility/printUtility.py"], "/solver/sudokuSolver.py": ["/solver/solver.py", "/puzzle/sudoku.py", "/puzzle/cell.py", "/utility/printUtility.py"]}
|
30,880
|
francesco-borzacchiello/SudokuSover
|
refs/heads/main
|
/utility/printUtility.py
|
from math import *
from numpy import *
from puzzle.cell import *
from puzzle.sudoku import ClassicSudoku
class PrintClassicSudokuBoard:
def __init__(self, sudoku : ClassicSudoku):
self.__sudoku = sudoku
self.__candidates_is_present = False
self.__dimension_of_a_cell = 1
self.__board = ""
def __make_top_frame(self) -> str:
return self.__make_frame_parts("╔", "═", "╦", "╗\n")
def __make_orizontal_divider_frame(self) -> str:
return self.__make_frame_parts("╠", "═", "╬", "╣\n")
def __make_bottom_frame(self) -> str:
return self.__make_frame_parts("╚", "═", "╩", "╝\n")
def __make_frame_parts(self, start: str, edge: str, divider : str, end : str) -> str:
return self.__make_frame_parts_with_additional_divider(start, edge, divider, edge, end)
def __make_orizontal_divider_block(self) -> str:
return self.__make_frame_parts_with_additional_divider("║", "─", "║", "┼", "║\n")
def __make_frame_parts_with_additional_divider(self, start: str, edge: str, divider : str,
intermediate_divider : str, end : str) -> str:
# + 2 for aesthetic reasons, to increase the width of the sudoku
border_cell = edge * (self.__dimension_of_a_cell + 2)
border_block = (((border_cell + intermediate_divider)
* (self.__sudoku.values_for_side_of_a_block - 1))
+ border_cell)
return (start
+ ((border_block + divider) * (self.__sudoku.blocks_for_side_of_a_sudoku - 1))
+ border_block
+ end)
def print_sudoku(self):
self.__dimension_of_a_cell = 1
self.__candidates_is_present = False
return self.__add_the_top_and_bottom_to_frame_of_board({})
def print_sudoku_with_candidate(self, candidates : dict) -> str:
self.__dimension_of_a_cell = self.__sudoku.values_for_side_of_a_block
self.__candidates_is_present = True
return self.__add_the_top_and_bottom_to_frame_of_board(candidates)
def __add_the_top_and_bottom_to_frame_of_board(self, candidates : dict) -> str:
return (self.__make_top_frame()
+ self.make_board(candidates)
+ self.__make_bottom_frame())
def make_board(self, candidates : dict) -> str:
for row in range(self.__sudoku.values_for_side_of_a_sudoku):
self.__make_row_of_a_board(row, candidates)
self.__make_orizontal_divider(row)
return self.__board
def __make_row_of_a_board(self, row : int, candidates : dict):
for row_of_value in range(self.__dimension_of_a_cell):
self.__board += "║ "
self.__make_contents_of_a_row_of_a_board(row, row_of_value, candidates)
self.__board += "\n"
def __make_contents_of_a_row_of_a_board(self, row : int, row_of_value : int, candidates : dict):
for column in range(self.__sudoku.values_for_side_of_a_sudoku):
for column_of_value in range(self.__dimension_of_a_cell):
cell = IndicesOfCell(row, column)
self.__make_the_part_of_a_single_cell(cell, IndicesOfCell(row_of_value, column_of_value),
self.__sudoku.get_the_value_from_cell(cell), candidates)
def __make_the_part_of_a_single_cell(self, cell : IndicesOfCell, part_of_cell : IndicesOfCell, value : int, candidates : dict):
self.__make_the_content_of_a_part_of_a_single_cell(cell, part_of_cell, value, candidates)
self.__board += self.__make_vertical_divider_frame(cell.column, part_of_cell.column)
def __make_the_content_of_a_part_of_a_single_cell(self, cell : IndicesOfCell, part_of_cell : IndicesOfCell,
value : int, candidates : dict):
if not self.__sudoku.cell_is_empty(cell):
self.__board += self.__make_cell_full(part_of_cell, value)
elif self.__candidates_is_present:
self.__make_the_part_of_a_single_cell_with_candidates(
candidates[cell],
self.__calculate_expected_candidate(part_of_cell))
else:
self.__board += " "
def __make_vertical_divider_frame(self, column : int, column_of_cell : int) -> str:
if self.__is_the_boundary_of_cell(column, column_of_cell):
return " │ "
elif self.__is_the_boundary_of_block(column, column_of_cell):
return " ║ "
return ""
def __is_the_boundary_of_cell(self, column : int, column_of_cell : int) -> bool:
return (column_of_cell == self.__dimension_of_a_cell - 1
and (column + 1) % self.__sudoku.values_for_side_of_a_block != 0)
def __is_the_boundary_of_block(self, column : int, column_of_cell : int) -> bool:
return (column_of_cell == self.__dimension_of_a_cell - 1
and (column + 1) % self.__sudoku.values_for_side_of_a_block == 0)
def __make_cell_full(self, part_of_cell : IndicesOfCell, value : int) -> str:
if self.__is_center_of_a_cell(part_of_cell):
return str(value)
return "•" if self.__candidates_is_present else ""
def __is_center_of_a_cell(self, part_of_cell : IndicesOfCell):
return (part_of_cell.row == part_of_cell.column
and ceil(self.__dimension_of_a_cell / 2) == (part_of_cell.row + 1))
def __make_the_part_of_a_single_cell_with_candidates(self, candidates : list, expected_candidate : int):
if expected_candidate in candidates:
self.__board += str(expected_candidate)
else:
self.__board += " "
def __calculate_expected_candidate(self, part_of_cell : IndicesOfCell) -> int:
return (part_of_cell.row * self.__sudoku.values_for_side_of_a_block) + (part_of_cell.column + 1)
# TODO: check index
def __make_orizontal_divider(self, row : int):
if row + 1 != (self.__sudoku.blocks_for_side_of_a_sudoku * self.__sudoku.values_for_side_of_a_block):
if (row + 1) % self.__sudoku.values_for_side_of_a_block == 0:
self.__board += self.__make_orizontal_divider_frame()
else:
self.__board += self.__make_orizontal_divider_block()
|
{"/solver/solver.py": ["/puzzle/puzzle.py"], "/utility/printUtility.py": ["/puzzle/cell.py", "/puzzle/sudoku.py"], "/puzzle/sudoku.py": ["/puzzle/cell.py", "/puzzle/puzzle.py", "/utility/printUtility.py"], "/solver/sudokuSolver.py": ["/solver/solver.py", "/puzzle/sudoku.py", "/puzzle/cell.py", "/utility/printUtility.py"]}
|
30,881
|
francesco-borzacchiello/SudokuSover
|
refs/heads/main
|
/puzzle/cell.py
|
from operator import *
class IndicesOfCell(tuple):
def __new__(self, row : int, column : int):
IndicesOfCell.row = property(itemgetter(0))
IndicesOfCell.column = property(itemgetter(1))
return tuple.__new__(IndicesOfCell, (row, column))
|
{"/solver/solver.py": ["/puzzle/puzzle.py"], "/utility/printUtility.py": ["/puzzle/cell.py", "/puzzle/sudoku.py"], "/puzzle/sudoku.py": ["/puzzle/cell.py", "/puzzle/puzzle.py", "/utility/printUtility.py"], "/solver/sudokuSolver.py": ["/solver/solver.py", "/puzzle/sudoku.py", "/puzzle/cell.py", "/utility/printUtility.py"]}
|
30,882
|
francesco-borzacchiello/SudokuSover
|
refs/heads/main
|
/puzzle/sudoku.py
|
from math import *
from typing import Any, Callable
from numpy import *
from puzzle.cell import *
from puzzle.puzzle import *
class ClassicSudoku(Puzzle):
#region Constructor
def __init__(self, sudoku : list):
self.__check_input(sudoku)
self.__make_grid(sudoku)
#region Check if the input is valid
def __check_input(self, sudoku : list):
self.__check_dimensions(sudoku)
self.__check_content(sudoku)
def __check_dimensions(self, sudoku : list):
square_root = int(sqrt(len(sudoku)))
if square_root > 1 and (square_root * square_root) != len(sudoku):
raise ValueError("Dimensione del sudoku non valida, assicurarsi di inserire una griglia valida!!")
def __check_content(self, sudoku : list):
for row in sudoku:
if len(row) != len(sudoku):
raise ValueError("Una riga non è compatibile con il sudoku in questione!!")
for value in row:
if value > len(sudoku):
raise ValueError(str(value) + " non può essere presente in questo sudoku!!")
#endregion
#region Make a grid that contains the sudoku values and related information
def __make_grid(self, sudoku: list):
self.__init_information(sudoku)
self.__sudoku = array(sudoku)
def __init_information(self, sudoku: list):
self.__values_for_side_of_a_block = int(sqrt(len(sudoku)))
self.__blocks_for_side_of_a_sudoku = int(sqrt(len(sudoku)))
#endregion
#endregion
#region To string
def __str__(self):
from utility.printUtility import PrintClassicSudokuBoard
printer = PrintClassicSudokuBoard(self)
return printer.print_sudoku()
#endregion
#region It's equal to [sudoku]
def __eq__(self, sudoku):
return (self.__sudoku is not None
and type(self) == type(sudoku)
and array_equal(self.__sudoku, sudoku.__sudoku))
#endregion
#region Property
@property
def blocks_for_side_of_a_sudoku(self) -> int:
return self.__blocks_for_side_of_a_sudoku
@property
def values_for_side_of_a_block(self) -> int:
return self.__values_for_side_of_a_block
@property
def values_for_side_of_a_sudoku(self) -> int:
return self.__values_for_side_of_a_block * self.__blocks_for_side_of_a_sudoku
#endregion
#region Get information about the sudoku and its contents
def get_the_value_from_cell(self, cell : IndicesOfCell) -> int:
return self.__sudoku[cell.row, cell.column]
def first_cell_of_the_block(self, cell : IndicesOfCell) -> IndicesOfCell:
return IndicesOfCell(
int(cell.row / self.__blocks_for_side_of_a_sudoku) * self.__values_for_side_of_a_block,
int(cell.column / self.__blocks_for_side_of_a_sudoku) * self.__values_for_side_of_a_block)
def cell_is_empty(self, cell : IndicesOfCell) -> bool:
return self.__sudoku[cell.row, cell.column] == 0
#region Check if the following cells all belong to the same section
def these_cells_belong_to_a_single_block(self, references_to_the_cells : set) -> bool:
return self.__these_cells_belong_to_a_single_section(references_to_the_cells, self.first_cell_of_the_block)
def these_cells_belong_to_a_single_row(self, references_to_the_cells : set) -> bool:
return self.__these_cells_belong_to_a_single_section(references_to_the_cells, lambda cell : cell.row)
def these_cells_belong_to_a_single_column(self, references_to_the_cells : set) -> bool:
return self.__these_cells_belong_to_a_single_section(references_to_the_cells, lambda cell : cell.column)
def __these_cells_belong_to_a_single_section(self, references_to_the_cells : set,
get_information_from_cell : Callable[[IndicesOfCell], Any]) -> bool:
try:
first_cell_of_the_blocks = self.__extract_the_first_cells_of_the_blocks_by_the_following_cells(
references_to_the_cells, get_information_from_cell
)
return len(first_cell_of_the_blocks) == 1
except IndexError:
return False
def __extract_the_first_cells_of_the_blocks_by_the_following_cells(self, cells : set,
get_information_from_cell : Callable[[IndicesOfCell], Any]) -> set:
first_cell_of_the_blocks = set()
for cell in cells:
first_cell_of_the_blocks.add(get_information_from_cell(cell))
return first_cell_of_the_blocks
#endregion
#region Checks if a value is in a part of the sudoku
def value_not_in_block(self, cell : IndicesOfCell, candidate : int) -> bool:
cell_to_start_from = self.first_cell_of_the_block(cell)
return candidate not in self.__sudoku[
cell_to_start_from.row : cell_to_start_from.row + self.__values_for_side_of_a_block,
cell_to_start_from.column : cell_to_start_from.column + self.__values_for_side_of_a_block]
def value_not_in_row(self, row : int, candidate : int) ->bool:
return candidate not in self.__sudoku[row, : ]
def value_not_in_column(self, column : int, candidate : int) ->bool:
return candidate not in self.__sudoku[ :, column]
#endregion
def get_the_set_of_cells_indices_of_a_block(self, a_cell_in_the_block) -> set:
return set(self.get_the_iterator_of_the_indices_of_the_cells_in_the_block(
self.first_cell_of_the_block(a_cell_in_the_block)))
# TODO: Test
def is_solved(self) -> bool:
return 0 not in self.__sudoku
#endregion
#region Insert a value in a cell of the sudoku
def insert_value_in_cell(self, cell : IndicesOfCell, value : int) -> bool:
if self.__cell_and_value_is_valid(cell, value):
self.__sudoku[cell.row, cell.column] = value
return True
return False
#region Checks if the input of the insert_value_in_cell function is valid
def __cell_and_value_is_valid(self, cell : IndicesOfCell, value : int) -> bool:
return cell is not None and self.__cell_is_valid(cell) and self.__value_is_valid(value)
def __cell_is_valid(self, cell : IndicesOfCell) -> bool:
return self.__index_is_valid(cell.row) and self.__index_is_valid(cell.column)
def __index_is_valid(self, index : int) -> bool:
return (isinstance(index, int)
and index >= 0
and index < self.values_for_side_of_a_sudoku)
def __value_is_valid(self, value : int) -> bool:
return (isinstance(value, int)
and value > 0
and value <= self.values_for_side_of_a_sudoku)
#endregion
#endregion
#region Iterators getter
def get_the_iterator_of_the_indices_of_the_sudoku_cells(self):
return self.__IteratorOfTheIndicesOfTheSudokuCells(self.values_for_side_of_a_sudoku)
def get_the_iterator_of_the_indices_of_the_cells_in_the_block(self, cell_to_start_from : IndicesOfCell):
return self.__IteratorOfTheIndicesOfTheCellsInTheBlock(cell_to_start_from, self.__values_for_side_of_a_block)
def get_the_iterator_of_the_indices_of_the_cells_in_the_row(self, row : int):
return self.__IteratorOfTheIndicesOfTheCellsInTheRow(row, self.values_for_side_of_a_sudoku)
def get_the_iterator_of_the_indices_of_the_cells_in_the_column(self, column : int):
return self.__IteratorOfTheIndicesOfTheCellsInTheColumn(column, self.values_for_side_of_a_sudoku)
#endregion
#region Iterators, to navigate the sudoku in different ways
class __IteratorOfTheIndicesOfTheSudokuCells:
def __init__(self, upper_bound : int):
self._column_to_start = 0
self._upper_bound_for_row = self._upper_bound_for_column = upper_bound
def __iter__(self):
self._current_row = 0
self._current_column = -1
return self
def __next__(self) -> IndicesOfCell:
if self._is_last_column():
self.__elements_are_finished()
return self.__next_row()
return self.__next_column()
def __next_column(self) -> IndicesOfCell:
self._current_column += 1
return IndicesOfCell(self._current_row, self._current_column)
def __next_row(self) -> IndicesOfCell:
self._current_column = self._column_to_start
self._current_row += 1
return IndicesOfCell(self._current_row, self._current_column)
def __elements_are_finished(self):
if self.__is_last_row():
raise StopIteration
def __is_last_row(self):
return self._current_row + 1 >= self._upper_bound_for_row
def _is_last_column(self):
return self._current_column + 1 >= self._upper_bound_for_column
class __IteratorOfTheIndicesOfTheCellsInTheBlock(__IteratorOfTheIndicesOfTheSudokuCells):
def __init__(self, cell_to_start_from : IndicesOfCell, side_of_block : int):
self.__cell_to_start_from = cell_to_start_from
self._column_to_start = cell_to_start_from.column
self._upper_bound_for_row = cell_to_start_from.row + side_of_block
self._upper_bound_for_column = cell_to_start_from.column + side_of_block
def __iter__(self):
self._current_row = self.__cell_to_start_from.row
self._current_column = self.__cell_to_start_from.column - 1
return self
def __next__(self) -> IndicesOfCell:
return super().__next__()
class __IteratorOfTheIndicesOfTheCellsInTheRow(__IteratorOfTheIndicesOfTheSudokuCells):
def __init__(self, row : int, upper_bound_for_column : int):
super().__init__(upper_bound_for_column)
self._upper_bound_for_row = row
def __iter__(self):
self._current_row = self._upper_bound_for_row
self._current_column = self._column_to_start - 1
return self
def __next__(self) -> IndicesOfCell:
return super().__next__()
class __IteratorOfTheIndicesOfTheCellsInTheColumn(__IteratorOfTheIndicesOfTheSudokuCells):
def __init__(self, column : int, upper_bound_for_row : int):
super().__init__(upper_bound_for_row)
self._column_to_start = self._upper_bound_for_column = column
def __iter__(self):
self._current_row = -1
self._current_column = self._upper_bound_for_column
return self
def __next__(self) -> IndicesOfCell:
return super().__next__()
#endregion
|
{"/solver/solver.py": ["/puzzle/puzzle.py"], "/utility/printUtility.py": ["/puzzle/cell.py", "/puzzle/sudoku.py"], "/puzzle/sudoku.py": ["/puzzle/cell.py", "/puzzle/puzzle.py", "/utility/printUtility.py"], "/solver/sudokuSolver.py": ["/solver/solver.py", "/puzzle/sudoku.py", "/puzzle/cell.py", "/utility/printUtility.py"]}
|
30,883
|
francesco-borzacchiello/SudokuSover
|
refs/heads/main
|
/solver/sudokuSolver.py
|
from typing import Iterator
from solver.solver import *
from puzzle.sudoku import *
from puzzle.cell import *
class ClassicSudokuSolver(Solver):
#region Constructor
def __init__(self, sudoku : ClassicSudoku):
self.__initialize_the_fields(sudoku)
self.__calculate_candidates()
def __initialize_the_fields(self, sudoku : ClassicSudoku):
self.__sudoku = sudoku
self.__stall = False
self.__count_inserted = 0
self.__count_excess_candidates_removed = 0
#endregion
#region To stirng
def __str__(self):
from utility.printUtility import PrintClassicSudokuBoard
printer = PrintClassicSudokuBoard(self.__sudoku)
return printer.print_sudoku_with_candidate(self.__candidates)
#endregion
#region Calculate candidates
def __calculate_candidates(self):
self.__candidates = {}
iterator = self.__sudoku.get_the_iterator_of_the_indices_of_the_sudoku_cells()
for cell in iterator:
self.__if_the_cell_is_empty_calculates_its_candidates(cell)
def __if_the_cell_is_empty_calculates_its_candidates(self, cell: IndicesOfCell):
if self.__sudoku.cell_is_empty(cell):
self.__candidates[cell] = self.__calculate_candidates_for_a_cell(cell)
def __calculate_candidates_for_a_cell(self, cell: IndicesOfCell) -> list:
candidates_for_a_cell = []
for candidate in range(1, self.__sudoku.values_for_side_of_a_sudoku + 1):
if self.__candidate_is_eligible(cell, candidate):
candidates_for_a_cell.append(candidate)
return candidates_for_a_cell
def __candidate_is_eligible(self, cell : IndicesOfCell, value : int) -> bool:
return (self.__sudoku.cell_is_empty(cell)
and self.__sudoku.value_not_in_block(cell, value)
and self.__sudoku.value_not_in_row(cell.row, value)
and self.__sudoku.value_not_in_column(cell.column, value))
#endregion
#region Solve
def solve(self):
while not self.__sudoku.is_solved() and not self.__stall:
print(self)
# input("press enter")
self.__start_to_solve()
self.__check_if_a_stall_has_occurred()
if self.__stall:
print("a stall has occurred")
self.__try_to_remove_excess_candidates()
else:
print(self.__sudoku)
def __start_to_solve(self):
self.__find_cell_with_one_candidate()
self.__find_row_with_candidate_with_only_one_occurrence_and_insert_it()
self.__find_column_with_candidate_with_only_one_occurrence_and_insert_it()
self.__find_block_with_candidate_with_only_one_occurrence_and_insert_it()
def __try_to_remove_excess_candidates(self):
self.__finds_the_row_in_which_a_candidate_belongs_to_only_one_block()
self.__finds_the_column_in_which_a_candidate_belongs_to_only_one_block()
self.__finds_the_block_in_which_a_candidate_belongs_to_a_single_row()
self.__finds_the_block_in_which_a_candidate_belongs_to_a_single_column()
self.__find_sets_of_candidates_discovered_in_row()
print(self)
self.__check_if_the_stall_has_been_resolved()
if not self.__stall:
self.solve()
else:
print("not possible remove a stall")
#region Find a cell with only one candidate
def __find_cell_with_one_candidate(self):
iterator = self.__sudoku.get_the_iterator_of_the_indices_of_the_sudoku_cells()
for cell in iterator:
self.__try_to_solve_the_cell(cell)
def __try_to_solve_the_cell(self, cell : IndicesOfCell):
if self.__cell_has_only_one_candidate(cell):
self.__confirm_candidate(cell)
def __cell_has_only_one_candidate(self, cell : IndicesOfCell) -> bool:
return (self.__sudoku.cell_is_empty(cell)
and cell in self.__candidates
and len(self.__candidates[cell]) == 1)
def __confirm_candidate(self, cell : IndicesOfCell):
self.__insert_the_value_and_update_the_candidates(cell, self.__candidates[cell][0])
#endregion
#region Find a row with a candidate that has only one occurrence and insert it
def __find_row_with_candidate_with_only_one_occurrence_and_insert_it(self):
for row in range(self.__sudoku.values_for_side_of_a_sudoku):
self.__find_and_insert_candidate_with_only_one_occurence_for_this_row(row)
def __find_and_insert_candidate_with_only_one_occurence_for_this_row(self, row : int):
iterator = self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_row(row)
self.__find_and_insert_candidate_with_only_one_occurence_for_this_section(iterator)
#endregion
#region Find a column with a candidate that has only one occurrence and insert it
def __find_column_with_candidate_with_only_one_occurrence_and_insert_it(self):
for column in range(self.__sudoku.values_for_side_of_a_sudoku):
self.__find_and_insert_candidate_with_only_one_occurence_for_this_column(column)
def __find_and_insert_candidate_with_only_one_occurence_for_this_column(self, column : int):
iterator = self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_column(column)
self.__find_and_insert_candidate_with_only_one_occurence_for_this_section(iterator)
#endregion
#region Find a block with a candidate that has only one occurrence and insert it
def __find_block_with_candidate_with_only_one_occurrence_and_insert_it(self):
for row in range(0, self.__sudoku.values_for_side_of_a_sudoku, 3):
for column in range(0, self.__sudoku.values_for_side_of_a_sudoku, 3):
self.__find_and_insert_candidate_with_only_one_occurence_for_this_block(IndicesOfCell(row, column))
def __find_and_insert_candidate_with_only_one_occurence_for_this_block(self, cell_to_start_from : IndicesOfCell):
iterator = self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_block(cell_to_start_from)
self.__find_and_insert_candidate_with_only_one_occurence_for_this_section(iterator)
#endregion
#region Find and insert candidate with only one occurrence for this section of the sudoku
def __find_and_insert_candidate_with_only_one_occurence_for_this_section(self, iterator : Iterator):
for candidate in range(1, self.__sudoku.values_for_side_of_a_sudoku + 1):
cell = self.__find_the_cell_in_which_to_insert_value(iterator, candidate)
self.__insert_the_value_and_update_the_candidates(cell, candidate)
def __find_the_cell_in_which_to_insert_value(self, iterator : Iterator, candidate : int) -> IndicesOfCell:
references_to_the_cells = list(self.__find_the_cells_that_contain_the_candidate(iterator, candidate))
return references_to_the_cells[0] if len(references_to_the_cells) == 1 else None
def __find_the_cells_that_contain_the_candidate(self, iterator : Iterator, candidate : int) -> set:
references_to_the_cells = set()
for cell in iterator:
self.__if_cell_has_this_candidate_add_it_to_the_list_of_references(cell, candidate, references_to_the_cells)
return references_to_the_cells
def __if_cell_has_this_candidate_add_it_to_the_list_of_references(self, cell : IndicesOfCell,
candidate : int, references : set):
if self.__cell_has_candidate(cell, candidate):
references.add(cell)
#endregion
#region Insert the value and update the candidates
def __insert_the_value_and_update_the_candidates(self, cell : IndicesOfCell, value : int):
if self.__sudoku.insert_value_in_cell(cell, value):
self.__count_inserted += 1
self.__update_candidates(cell, value)
#region Update candidates
def __update_candidates(self, cell : IndicesOfCell, value_confirmed : int):
self.__candidates.pop(cell)
self.__update_row_candidates(cell.row, value_confirmed)
self.__update_column_candidates(cell.column, value_confirmed)
self.__update_block_candidates(self.__sudoku.first_cell_of_the_block(cell), value_confirmed)
def __update_row_candidates(self, row : int, value_confirmed : int):
for column in range(self.__sudoku.values_for_side_of_a_sudoku):
self.__remove_a_candidate(IndicesOfCell(row, column), value_confirmed)
def __update_column_candidates(self, column : int, value_confirmed : int):
for row in range(self.__sudoku.values_for_side_of_a_sudoku):
self.__remove_a_candidate(IndicesOfCell(row, column), value_confirmed)
def __update_block_candidates(self, cell_to_start_from : IndicesOfCell, value_confirmed : int):
iterator = self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_block(cell_to_start_from)
for cell in iterator:
self.__remove_a_candidate(cell, value_confirmed)
#region Remove a candidate
def __remove_a_candidate(self, cell : IndicesOfCell, candidate_to_be_deleted : int) -> bool:
if self.__cell_has_candidate(cell, candidate_to_be_deleted):
self.__candidates[cell].remove(candidate_to_be_deleted)
return True
return False
def __cell_has_candidate(self, cell : IndicesOfCell, candidate: int) -> bool:
return cell in self.__candidates and candidate in self.__candidates[cell]
#endregion
#endregion
#endregion
#region Find the row where a candidate belongs to only one block, if this row exists remove excess candidates from the block
def __finds_the_row_in_which_a_candidate_belongs_to_only_one_block(self):
for row in range(self.__sudoku.values_for_side_of_a_sudoku):
self.__find_the_candidate_belonging_to_only_one_block_in_this_section(
self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_row(row)
)
#endregion
#region Find the column where a candidate belongs to only one block, if this column exists remove excess candidates from the block
def __finds_the_column_in_which_a_candidate_belongs_to_only_one_block(self):
for column in range(self.__sudoku.values_for_side_of_a_sudoku):
self.__find_the_candidate_belonging_to_only_one_block_in_this_section(
self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_column(column)
)
#endregion
def __find_the_candidate_belonging_to_only_one_block_in_this_section(self, iterator : Iterator):
for candidate in range(1, self.__sudoku.values_for_side_of_a_sudoku + 1):
self.__if_the_candidate_belongs_to_only_one_block_in_this_section_update_candidates_of_block(iterator, candidate)
def __if_the_candidate_belongs_to_only_one_block_in_this_section_update_candidates_of_block(self, iterator : Iterator,
candidate : int):
self.__if_the_candidate_belongs_to_a_part_of_the_section_updates_the_candidates_of_this_part(
iterator, candidate, self.__sudoku.these_cells_belong_to_a_single_block,
lambda a_set : self.__sudoku.get_the_set_of_cells_indices_of_a_block(list(a_set)[0])
)
#region Find the block where a candidate belongs to only one row, if this block exists remove excess candidates from the row
def __finds_the_block_in_which_a_candidate_belongs_to_a_single_row(self):
for row in range(0, self.__sudoku.values_for_side_of_a_sudoku, 3):
for column in range(0, self.__sudoku.values_for_side_of_a_sudoku, 3):
self.__find_the_candidate_belonging_to_only_one_row_in_this_block(
self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_block(IndicesOfCell(row, column))
)
def __find_the_candidate_belonging_to_only_one_row_in_this_block(self, iterator : Iterator):
for candidate in range(1, self.__sudoku.values_for_side_of_a_sudoku + 1):
self.__if_the_candidate_belongs_to_only_one_row_in_this_block_update_candidates_of_row(iterator, candidate)
def __if_the_candidate_belongs_to_only_one_row_in_this_block_update_candidates_of_row(self, iterator : Iterator,
candidate : int):
self.__if_the_candidate_belongs_to_a_part_of_the_section_updates_the_candidates_of_this_part(
iterator, candidate, self.__sudoku.these_cells_belong_to_a_single_row,
lambda a_set : set(self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_row(list(a_set)[0].row))
)
#endregion
#region Find the block where a candidate belongs to only one column, if this block exists remove excess candidates from the column
def __finds_the_block_in_which_a_candidate_belongs_to_a_single_column(self):
for row in range(0, self.__sudoku.values_for_side_of_a_sudoku, 3):
for column in range(0, self.__sudoku.values_for_side_of_a_sudoku, 3):
self.__find_the_candidate_belonging_to_only_one_column_in_this_block(
self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_block(IndicesOfCell(row, column))
)
def __find_the_candidate_belonging_to_only_one_column_in_this_block(self, iterator : Iterator):
for candidate in range(1, self.__sudoku.values_for_side_of_a_sudoku + 1):
self.__if_the_candidate_belongs_to_only_one_column_in_this_block_update_candidates_of_column(iterator, candidate)
def __if_the_candidate_belongs_to_only_one_column_in_this_block_update_candidates_of_column(self, iterator : Iterator,
candidate : int):
self.__if_the_candidate_belongs_to_a_part_of_the_section_updates_the_candidates_of_this_part(
iterator, candidate, self.__sudoku.these_cells_belong_to_a_single_column,
lambda a_set : set(self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_column(list(a_set)[0].column))
)
#endregion
def __if_the_candidate_belongs_to_a_part_of_the_section_updates_the_candidates_of_this_part(self, iterator : Iterator,
candidate : int,
these_cells_belong_to_a_single_section : Callable[[set], bool],
get_the_set_of_cells_indices_of_a_section : Callable[[IndicesOfCell], set]):
section_of_interest = self.__find_the_cells_that_contain_the_candidate(iterator, candidate)
if these_cells_belong_to_a_single_section(section_of_interest):
self.__delete_the_candidate_from_the_other_parts_of_that_section(
get_the_set_of_cells_indices_of_a_section(section_of_interest) - set(iterator),
candidate
)
def __delete_the_candidate_from_the_other_parts_of_that_section(self, cells_to_modify : set, candidate : int):
for cell in cells_to_modify:
self.__count_excess_candidates_removed += bool(self.__remove_a_candidate(cell, candidate))
#TODO: refactoring
def __find_sets_of_candidates_discovered_in_row(self):
for row in range(self.__sudoku.values_for_side_of_a_sudoku):
iterator = self.__sudoku.get_the_iterator_of_the_indices_of_the_cells_in_the_row(row)
iterator2 = list(iterator)
for i in iterator:
references_of_cell = set()
if self.__sudoku.cell_is_empty(i):
references_of_cell.add(i)
iterator2.remove(i)
for j in iterator2:
if self.__sudoku.cell_is_empty(j) and self.__candidates[i] == self.__candidates[j]:
references_of_cell.add(j)
if len(self.__candidates[i]) == len(references_of_cell):
for candidate in self.__candidates[i]:
self.__delete_the_candidate_from_the_other_parts_of_that_section(
set(iterator) - references_of_cell, candidate
)
#region Check if you have stalled, or if you have come out of a stall
def __check_if_a_stall_has_occurred(self):
self.__stall = self.__count_inserted == 0
self.__count_inserted = 0
def __check_if_the_stall_has_been_resolved(self):
self.__stall = self.__count_excess_candidates_removed == 0
self.__count_excess_candidates_removed = 0
#endregion
#endregion
#region Get Solution
def get_solution(self) -> ClassicSudoku:
return self.__sudoku
#endregion
|
{"/solver/solver.py": ["/puzzle/puzzle.py"], "/utility/printUtility.py": ["/puzzle/cell.py", "/puzzle/sudoku.py"], "/puzzle/sudoku.py": ["/puzzle/cell.py", "/puzzle/puzzle.py", "/utility/printUtility.py"], "/solver/sudokuSolver.py": ["/solver/solver.py", "/puzzle/sudoku.py", "/puzzle/cell.py", "/utility/printUtility.py"]}
|
30,898
|
ssh6189/2019.12.10
|
refs/heads/master
|
/test.py
|
import calc
print(calc.add(5, 10))
|
{"/test.py": ["/calc.py"]}
|
30,899
|
ssh6189/2019.12.10
|
refs/heads/master
|
/dictionery.py
|
#key는 unique해야 하며, 불변 이다 , value 는 가변(변경 가능)
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
print ("dict['Name']: ", dict['Name'])
print ("dict['Age']: ", dict['Age'])
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
print ("dict['Alice']: ", dict['Alice']) #존재하지 않는 키로 요소에 접근할 경우?
dict['Age'] = 8; #요소의 value변경
dict['School'] = "DPS School" #새로운 요소를 추가
print ("dict['Age']: ", dict['Age'])
print ("dict['School']: ", dict['School'])
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
del dict1['Name'] #특정 요소만 삭제
dict.clear() #모든 요소를 삭제하고, dict 객체는 남고, empty dict instance가 된다.
del dict # dict 객체 삭제?
print(dict) #error?
print ("dict['Age']: ", dict['Age'])
print ("dict['School']: ", dict['School'])
dict = {'Name': 'Zara', 'Age': 7, 'Name': 'Manni'} #오버라이팅된다. 엎어쓰기 된다.
print ("dict['Name']: ", dict['Name'])
dict = {['Name']: 'Zara', 'Age': 7} #키에 가변개체 선언(사용), 에러발생, 불변만 써야한다.
print ("dict['Name']: ", dict['Name'])
dict = {'Name': 'Zara', 'Age': 7}
print ("Value : %s" % dict.items())
print ("Value : %s" % dict.keys())
print ("Value : %s" % dict.get('Age')) #없는 값을 요청할때
print ("Value : %s" % dict.get('Sex', "NA"))
dict = {'Sex': 'female', 'Age': 7, 'Name': 'Zara'}
print ("Values : ", list(dict.values()))
dict = {'Name': 'Manni', 'Age': 7, 'Class': 'First'} #dictionery 요소개수
print ("Length : %d" % len (dict))
#######################################################
dict1 = {'Name': 'Zara', 'Age': 7};
dict2 = {'Name': 'Mahnaz', 'Age': 27};
dict3 = {'Name': 'Abid', 'Age': 27};
dict4 = {'Name': 'Zara', 'Age': 7};
print "Return Value : %d" % cmp (dict1, dict2)
print "Return Value : %d" % cmp (dict2, dict3)
print "Return Value : %d" % cmp (dict1, dict4)
|
{"/test.py": ["/calc.py"]}
|
30,900
|
ssh6189/2019.12.10
|
refs/heads/master
|
/팩토리얼.py
|
result = 1
n = int(input("수를 입력하시오."))
for i in range(n):
result = result * (i+1)
print(result)
str(input())
|
{"/test.py": ["/calc.py"]}
|
30,901
|
ssh6189/2019.12.10
|
refs/heads/master
|
/함수호출방식.py
|
def f():
s = "I love London!"
print(s)
return s
s = "I love Paris!"
s = f()
print(s)
|
{"/test.py": ["/calc.py"]}
|
30,902
|
ssh6189/2019.12.10
|
refs/heads/master
|
/이진수 변환 제작.py
|
num = int(input("수를 입력하시오."))
str
while num > 0:
a = num//2
b = num%2
|
{"/test.py": ["/calc.py"]}
|
30,903
|
ssh6189/2019.12.10
|
refs/heads/master
|
/calc.py
|
def add(x, y):
return x+y
if __name__ == '__main__':
print(add(3, 5))
#__name__은 특별한 변수 이름
#calc.py를 직접 실행시키면 __name__변수에 __main__ 값이 저장됩니다.
#import되면 __name__변수에 calc.py값이 저장됩니다.
|
{"/test.py": ["/calc.py"]}
|
30,904
|
ssh6189/2019.12.10
|
refs/heads/master
|
/dictionery test.py
|
seo = {"name":"ssh", "age":"16"}
print(seo.keys())
print(seo.values())
print(type(seo.keys()))
print(type(seo.values()))
print(seo["name"])
print(seo["age"])
print(seo.get("name"))
|
{"/test.py": ["/calc.py"]}
|
30,905
|
ssh6189/2019.12.10
|
refs/heads/master
|
/사칙연산계산기.py
|
def calc(a, b, op):
if op == "+":
return a+b
elif op == "-":
return a-b
elif op == "*":
return a*b
elif op == "/":
return a/b
else:
return print("올바르지 않은 입력값입니다....")
if __name__ == "__main__":
x = int(input("수를 입력하시오."))
z = str(input("연산자를 입력하시오."))
y = int(input("수를 입력하시오."))
print("결과 : ", calc(x, y, z))
|
{"/test.py": ["/calc.py"]}
|
30,906
|
ssh6189/2019.12.10
|
refs/heads/master
|
/구구단 가로.py
|
for num in range(1, 10) : # 1~9
for dan in range(2, 10) : #2~9
gugu = "{0} X {1}={2:2d} ".format(dan, num, (num*dan)) #3버전부터 지원
print(gugu , end=" ")
print()
#%operator 를 지원하지만 공식문서에서는 권장하지 않는다고 합니다.
for num in range(1, 10) : # 1~9
for dan in range(2, 10) : #2~9
f = f'{dan}X{num}={num*dan} ' #3.6버전 f-string
print(f, end=" ")
print()
#%operator 를 지원하지만 공식문서에서는 권장하지 않는다고 합니다.
|
{"/test.py": ["/calc.py"]}
|
30,907
|
ssh6189/2019.12.10
|
refs/heads/master
|
/Word Count.py
|
f = open("c:/Users/yesterday.txt",'r')
result = 0
for i in range(40):
yl = f.readline()
yl = yl.title()
print(yl)
if(yl.count("Yesterday")):
result = result + 1
print(result)
|
{"/test.py": ["/calc.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.