index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
5,691
|
raysmith619/sudoku
|
refs/heads/master
|
/src/test_kwargs.py
|
# test_kwargs.py
def fun1a(**kwargs):
for key, val in kwargs.items():
print(f"fun1a: {key}={val}")
def fun1(**kwargs):
print(f"fun1: **kwargs({kwargs}")
for key, val in kwargs.items():
print(f"fun1: {key}={val}")
print("fun1 calling fun1a(**kwargs)")
fun1a(**kwargs)
kargs = {"key_1": "key_1_val", "key_2": "key_2_val", "key_3": "key_3_val"}
print("\ncall:", 'fun1(a="a_arg", b="b_arg", c="c_arg")')
fun1(a="a_arg", b="b_arg", c="c_arg")
print("\ncall:fun1(**kargs)")
fun1(**kargs)
print("\ncall:fun1(kwargs=kargs)")
fun1(kwargs=kargs)
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,692
|
raysmith619/sudoku
|
refs/heads/master
|
/src/test_isinstance.py
|
# test_isinstance.py
"""
Not seeing SudokuData
"""
from SudokuBoard import SudokuBoard
from SudokuData import SudokuData
from SudokuPly import SudokuPly
sb = SudokuBoard()
sd = SudokuData()
sp = SudokuPly()
if isinstance(sd, SudokuData):
print(f"Found sd{sd} to be SudokuData instance")
else:
print(f"Did not find sd{sd} to be SudokuData instance")
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,693
|
raysmith619/sudoku
|
refs/heads/master
|
/src/SudokuPuzzle.py
|
#SudokuPuzzle.py
# Adapted from SudokuData.py
#################
## sudokuPuzzle ##
#################
import time
from select_trace import SlTrace
from select_error import SelectError
from SudokuData import SudokuData
class SudokuPuzzle(SudokuData):
def __init__(self, desc=None, file_name=None,
nmove=0,
nbackup=0,
**kwargs):
"""
:description: Description of puzzle
:file_name: file name, if known
:nmove: cumulative number of moves (new square values)
:nbackup: cumulative number of backups (square value retries)
:kwargs: parameters passed to SudokuData base class
"""
if desc is None:
desc = "Basic Sudoku Puzzle"
self.nmove = nmove
self.nbackup = nbackup
self.file_name=file_name
self.set_start_time()
self.solve_start = time.time()
self.dur = 0
self.ply = None
self.prev_ply = None
self.depth = 0
self.max_depth = 0 # Maximum ply depth reached so far
self.nempty = 0
self.min_nempty = None
self.max_nempty = None
super().__init__(**kwargs)
def set_start_time(self):
""" set/reset solve start time
"""
self.solve_start = time.time()
def add_cell(self, row=None, col=None, val=None):
""" Add data square to puzzle
:row: row number
:col: column number
:val: square number
"""
if row is None or col is None or val is None:
raise SelectError(f" row, col and val must be specified row={row}, col={col}, val={val}")
self.setCell(row=row, col=col, val=val)
def file2puzzle(self, file=None):
""" convert file name/object to puzzle
:file: name if string, else open file stream
:returns: puzzle, None if failure
"""
if isinstance(file, str):
self.file_name = file
file = open(file)
puzzle_str = file.splitlines()
puzzle = self.str2puzzle(puzzle_str)
return puzzle
def track_move(self, ply=None):
""" Track solution moves
Check for backup here, comparing with ply.depth
against prev_ply.depth
:ply: SutokuPly, if known
"""
self.dur = time.time() - self.solve_start
self.prev_ply = self.ply
if self.prev_ply is None:
self.prev_ply = ply
if ply is not None:
self.nempty = ply.getNumEmpty()
if self.min_nempty is None or self.nempty < self.min_nempty:
self.min_nempty = self.nempty
if self.max_nempty is None or self.nempty > self.max_nempty:
self.max_nempty = self.nempty
if ply.depth < self.depth:
self.nbackup += 1
SlTrace.lg(f"backup: move: {self.nmove} ply.depth:{ply.depth} < self.depth{self.depth}", "trace_move")
self.depth = ply.depth
if self.depth < self.prev_ply.depth:
self.nbackup += 1
if self.max_depth is None or self.depth > self.max_depth:
self.max_depth = self.depth
self.prev_ply = ply
self.dur = time.time() - self.solve_start
if SlTrace.trace("trace_move"):
self.trace_check(ply=ply)
def trace_check(self, ply=None, prefix=None):
""" Do timely reporting of trace
Check for backup here, comparing with ply.depth
against prev_ply.depth
:ply: SutokuPly, if known
"""
if prefix is None:
prefix = ""
depth_info = ""
sq_info = ""
sq_info = f"empty: {self.nempty}[{self.min_nempty}-{self.max_nempty}]"
depth_info = f" depth: {self.depth} max depth: {self.max_depth}"
sol_time = f"in {self.dur:.2f} sec"
SlTrace.lg(f"{prefix} move: {self.nmove} {sq_info} backup: {self.nbackup} {depth_info} {sol_time}")
def new_backup(self, ply=None):
""" Track moves - only called if explicit backup, else calculation is done implicitly
:ply: SudokuPLy, if known
"""
self.nbackup += 1
self.track_move(ply=ply)
def new_move(self, ply=None):
""" Track moves
:ply: SudokuPLy, if known
"""
self.nmove += 1
self.track_move(ply=ply)
def copy(self):
""" Copy puzzle to insulate changes in data
:Returns: copy of data with new objects for cells
"""
cp = SudokuPuzzle(rows=self.nRow, grows=self.nSubRow,
cols=self.nCol, gcols=self.nSubCol,
file_name=self.file_name,
nmove = self.nmove,
nbackup = self.nbackup)
for ri in range(cp.nRow):
row = ri + 1
for ci in range(cp.nCol):
col = ci + 1
val = self.getCellVal(row=row, col=col)
if val is not None:
cp.add_cell(row=row, col=col, val=val)
return cp
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,694
|
raysmith619/sudoku
|
refs/heads/master
|
/puzzle/SudokuPly.py
|
#SudokuPly.pm
"""
One play possibilities
"""
from select_trace import SlTrace
from select_error import SelectError
from sudoku_search_stop import SudokuSearchStop
import sudoku_globals as g
from SudokuData import SudokuData, CellDesc
from docutils.nodes import row
class SudokuPly(SudokuData):
ch_one_depth = None # Depth in choice 0
stop_searching = False # Set True to stop searching (soon)
# Setup move display
@classmethod
def setDisplay(cls, display_rtn=None, display_time=None):
if display_time is None:
display_time = .3
cls.DisplayRtn = display_rtn
cls.Display_time = display_time
@classmethod
def clear_search_stop(cls):
cls.stop_searching = False
@classmethod
def stop_search(cls, msg=None):
cls.stop_searching = True
@classmethod
def ck_search(cls):
if cls.stop_searching:
raise SudokuSearchStop()
def __init__(self,displayRtn=None, displayTime=None, **kwargs):
if SlTrace.trace("sudokuply"):
SlTrace.lg(f"\nSudokuPly(kwargs={kwargs}")
self.ck_search()
self.choices = None
self.setCellList = [] # setCell trace
if displayRtn is not None:
self.DisplayRtn = displayRtn
if displayTime is not None:
self.DisplayTime = displayTime
self.level = 0
self.depth = 0
super().__init__(**kwargs)
self.startList = None
base = kwargs['base'] if 'base' in kwargs else None
if SlTrace.trace("sudokuply"):
if base is not None:
base.display("SudokuPly(base)")
self.setPlyData(base=base)
self.choices = self.getChoices()
#
# Populate puzzle
# - populate given cells
#
def populatePuzzle(self, **kwargs):
""" Populate puzzle
populate given cells
:startLis: t=><start array of cells>
:choice:=><index in startList for prospective cell>
:returns: array of fully populated bds
"""
self.ck_search()
startList=None
if 'starList' in kwargs:
startList = kwargs['starList']
choice=None
if 'choice' in kwargs:
choice = kwargs['choice']
self.level += 1
if startList is None:
SlTrace.lg(f"popuatePuzzle(level:{self.level}): no startList")
startList = []
startCells = startList[:]
if (SlTrace.trace('depth')):
SlTrace.lg(f"populatePuzzle: level={self.level}"
+ f" depth = {self.depth}")
self.display("end populatePuzzle")
SlTrace.lg(f"================")
if len(startCells) == 0:
# Check if solution possible
# and unique
sols = self.solvePuzzle(nFirst=10)
self.level -= 1
return sols
if choice is not None:
r_choice = startCells.pop(choice)
r_chvals = r_choice.vals
chvals = r_chvals
for chval in chvals:
ply = self.copy()
row = r_choice.row
col = r_choice.col
ply.setCell(row, col, chval)
del kwargs['choice'] # Look for choices
sols = ply.populatePuzzle(**kwargs)
if sols == 0:
continue
self.level -= 1
return sols
self.level -= 1
return [] # No acceptable patterns
choices = self.orderChoices(startCells)
choices = choices[:]
if (len(choices) == 0):
self.level-= 1
return []
ncmin = choices[0].nval
if ncmin == 0:
self.level -= 1
return [] # No choices for most constrained
maxind = len(choices) - 1
r_max = choices[maxind] # Look at least constrained
r_chvals = r_max.vals
chvals = r_chvals[:]
for chval in chvals:
sols = self.populatePuzzle(
startList=choices,
choice=maxind,
)
if sols == 0:
continue
self.level -= 1
return sols
self.level -= 1
return []
#
# Set current ply data
def setPlyData(self, base=None):
if SlTrace.trace("setPlyData"):
self.display(f"setPlyData before depth={self.depth} choices={self.choices}")
if base is None:
self.choices = [] # Cell choices ordered by number of vals
self.nchoice = 0 # Number of choices
self.depth = 0
self.data = None
if isinstance(base, SudokuPly):
self.back = base # link back
###r_base_data = base.data
###self.data = SudokuPly(base=r_base_data)
a_start_list = base.startList
a_new_list = self.startList = []
depth = self.depth
if depth is None:
depth = 0
self.depth = depth + 1
if base.level is not None:
self.level = base.level
if a_start_list is not None:
a_new_list.extend(a_start_list)
elif isinstance(base,SudokuData):
pass
###self.data = SudokuData(base=base)
else:
raise SelectError(f"Can't set SudokuPly from {base}")
self.choices = self.getChoices()
if SlTrace.trace("setPlyData"):
self.display(f"setPlyData after depth={self.depth} choices={self.choices}")
# Get cell info
def getCell(self, row, col): # Returns: data cell
cell = self.vals.getCell(row, col)
return cell
# Get base data (SudokuData)
# or new data with partial subset
def getData(self, data=None, subset=None): # Returns: SudokuData
subset = subset
if data is None:
data = self.data
if subset is None:
return data
new_data = data()
for cell in subset:
row = cell.row
col = cell.col
val = data.getCell(row, col)
new_data.setCellVal(row=row, col=col, val=val)
return new_data
# get links to all ply ancestors
def getPlyList(self): # Retrns: List of plys, including this
plys = []
back = self
while back is not None:
plys.append(back)
back = back.back
return plys
def getSetList(self): # Returns: list of setCell hash
plys = self.getPlyList()
sets = []
for ply in plys:
setCellList = self.setCellList
sets.append(setCellList)
return sets
#
# Make puzzle setup
#
def makePuzzle(self, *args, **kwargs): # Returns: ply with sol else None
"""
:startList: <start array of cells>
:choice: <index in startList for prospective cell>
"""
startList = None
if 'startList' in kwargs:
pops = self.populatePuzzle(*args, **kwargs)
startList = kwargs['startList']
if startList is None:
raise SelectError("makePuzzle: undefined startList")
if len(pops) == 1:
return pops[0]
return None
# Assemble list of next move choices
# sorted in ascending number of values per cell
@staticmethod
def _choices_cmp_val(elm):
return elm.nval
''' inherited from SudokuData
# Get next empty cell in board data, if any
def getNextEmpty(self, ): # Returns: cell ref, else None if none empty
r_data = self.data
return r_data.getNextEmpty()
'''
def getNextChoice(self):
""" Get next suggested cell
going through start list, choices till best choice in start list
then best choice, till done then empty
for now we check for empty
"""
if SlTrace.trace("getnextchoice"):
self.display("getNextChoice")
self.choices = self.vals.getChoices()
if SlTrace.trace("getnextchoice"):
SlTrace.lg(f"getNextChoice of {self.choices}")
start_list = self.startList
choices = self.choices
if choices is None or len(choices) == 0:
return None
ch = self.choices[0]
if ch is None or len(ch.vals) == 0:
return None # No choice for most constrained
if (start_list is None
or len(start_list) == 0): # No start -> take choice
return self.choices.pop(0)
# Choose choice specified in start_list
for ich in range(choices):
choice = choices[ich]
ch_row = choice.row
ch_col = choice.col
for ist in range(len(start_list)):
start = start_list[ist]
if (start.row == ch_row
and start.col == ch_col):
start_list.pop(ist)
self.choices.pop(ich)
return choice
return None
#
# Set new ply as deep copy of old ply
# Set from arg's data
# This may be opdimized later
def setPly(self, r_ply):
r_data = r_ply.data
self.setData(r_data)
# Set puzzle start up values
#
def setPuzzle(self, **kwargs): # Returns: TRUE iff successful setup
#startList= list of cells to populate
if "startList" not in kwargs:
raise SelectError("missing startList")
starts = kwargs["startList"]
# Process until list completed
while starts > 0:
pass
def solveChoice(self, *args, **kwargs): # Returns: solution list, empty if none
""" Solve puzzle for given choice
If no choice - use first in 'choices'
Plan
Check for possible solutions by keeping a list of cells ordered
by increasing number of legal values.
Loop over candidates in choice
Choose candidate value from cell
Solve resulting board
returning list of solutions, possibly empty
"""
if SlTrace.trace("solvechoice"):
SlTrace.lg(f"solveChoice: args={args} kwargs={kwargs}")
self.display()
if self.getNextEmpty() is None:
# Filled
if self.DisplayRtn is not None:
self.DisplayRtn()
return [self]
choice = kwargs['choice'] if 'choice' in kwargs else None # choice CellDesc
nfirst = kwargs['first'] if 'first' in kwargs else None # Limit to first n if defined
start_list = kwargs['startList'] if 'startList' in kwargs else None
# Add selection restrictions, if any
if start_list is not None:
if not hasattr(self, 'startList'):
self.startList = []
self.startList.append(start_list)
if choice is None:
choice = self.getNextChoice()
if choice is None:
return []
if SlTrace.trace("solvechoice"):
SlTrace.lg(f"solveChoice: choice={choice} nfirst={nfirst} start_list={start_list}")
self.display()
sols = [] # list of solutions, possibly empty
row = choice.row
col = choice.col
vals = choice.vals
for val in vals:
legals = self.getLegalVals(row=row, col=col)
if len(legals) < 1:
SlTrace.lg(f"solveChoice {kwargs} - len(legals)<1")
legals = self.getLegalVals(row=row, col=col)
continue
if val not in legals:
SlTrace.lg(f"val{val} not in row={row} col={col} legals:{legals}")
continue
sol1s = self.solveChoiceOne(row=row, col=col, val=val,
first=nfirst-len(sols))
sols.extend(sol1s)
if len(sols) >= nfirst:
break # Quit if we got enough
return sols
#
def solveChoiceOne(self, **kwargs):
""" Solve puzzle for one specific row, col, value
Returns: list of solutions, possibly empty
"""
row = kwargs['row']
col = kwargs['col']
val = kwargs['val']
nfirst = kwargs['first']
self.enterChoiceOne(**kwargs)
ch1 = SudokuPly(base=self) # Create deep copy of ourself
legals = self.getLegalVals(row=row, col=col)
if len(legals) < 1:
SlTrace.lg(f"solveChoiceOne {kwargs} - len(legals)<1")
legals = self.getLegalVals(row=row, col=col)
else:
ch1.setCell(row, col, val)
ret = ch1.solveChoice(first=nfirst)
self.exitChoiceOne(ret)
return ret
#
# Deep copy of ply
#
def copy(self, ): # Returns: deep copy
ply = SudokuPly(base=self)
return ply
# Simple display of data area
def display(self, msg=None):
if msg is None:
msg = "SudokuPly data"
super().display(msg=msg)
def displaySetCellList(self, ):
setCells = self.getSetList()
SlTrace.lg(f"setCells:", "display")
for r_set in setCells:
col = r_set.col
row = r_set.row
val = r_set.val
SlTrace.lg(f"C{col}R{row}:{val}", "display")
SlTrace.lg(f"")
def enterChoiceOne(self, row=None, col=None, val=None, first=None):
if self.ch_one_depth is None:
self.ch_one_depth = 0
self.ch_one_depth += 1
if SlTrace.trace("solve"):
SlTrace.lg(f"enterChoiceOne depth: {self.ch_one_depth}"
+ f" row={row} col={col} val={val}")
self.display()
if self.Display_time is not None and self.DisplayRtn is not None:
self.DisplayRtn()
def exitChoiceOne(self, res=None):
if self.ch_one_depth is None:
self.ch_one_depth = 0
self.ch_one_depth -= 1
if SlTrace.trace("solve"):
SlTrace.lg(f"exitChoiceOne depth: {self.ch_one_depth}")
self.display()
def solvePuzzle(self, startList=None, nFirst=1): # Returns: ref to solution, else None
return self.solveChoice(first=nFirst)
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,695
|
raysmith619/sudoku
|
refs/heads/master
|
/puzzle/sudoku.py
|
# sudoko.py
"""
This is a program to solve, and someday create, Sudoku puzzles
It was adapted from the Perl program sudoku.pl
To ease the adaption process the original variable, function and file names, where possible, have been preserved.
The Trace.pm module use has been replaced by the select_trace.py module.
"""
##############
## External ##
##############
from math import *
import datetime
import traceback
import time
import os
import argparse
import re
from tkinter import *
###############
## Libraries ##
###############
from select_trace import SlTrace
from select_error import SelectError
base_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
SlTrace.setLogName(base_name)
SlTrace.lg(f"{base_name} {' '.join(sys.argv[1:])}")
from select_window import SelectWindow
from select_control import SelectControl
from variable_control import VariableControl
from trace_control import TraceControl
from tkMath import tkMath
from sudoku_subs import *
import sudoku_globals as g
g.initialize_globals()
##################
## User-defined ##
##################
# Set up main board
def prime_exit():
SlTrace.lg("Prime Exit")
g.running = False
pgm_exit()
mw = g.Display_mw = Tk() # To support grid layout - MUST be done before wm
mw.title("Sudoku Playing")
mw.protocol("WM_DELETE_WINDOW", prime_exit)
tkMath.setup(mw)
cF = SelectControl(control_prefix="run_control", update_report=update_report)
cF.make_label("Puzzle Dimensions")
g.nCol = cF.make_val("nCol", 9)
g.nSubCol =cF.make_val("nSubCol", 3)
g.nRow = cF.make_val("nRow", 9)
g.nSubRow = cF.make_val("nSubRow", 3)
cF.make_label("Puzzle Size")
g.bSize = cF.make_val("bSize", 3) # Main Bd size inches
g.sSize = cF.make_val("sSize", 2) # Solution Bd size
g.nFirst = cF.make_val("nFirst", 5) # first n solutions
g.makePuzzle = cF.make_val("makePuzzle", False) # If defined, generate puzzle with this many cells filled
g.makePuzzleSym = cF.make_val("PuzzleSym", "c") # Puzzle symetry pref for initial settings
# x, y, center, n - none
g.traceList = cF.make_val("traceList", "any")
g.UsePuzzle = cF.make_val("UsePuzzle", False)
g.xPer = cF.make_val("xPer", False) # experimental
# Progress display variables
cF.make_label("Display Time")
g.Display_time = cF.make_val("Display_time", .5) # Display time, None - no display
# 0 - wait for continue
# > 0 delay (sec)
g.update_time = cF.make_val("update_time", 10.)
g.Display_board = None
g.Display_prev_time = 0 # Previous display time
##################
## Main program ##
##################
parser = argparse.ArgumentParser()
###parser.add_argument('--closed_tours', type=str2bool, dest='closed_tours', default=closed_tours)
###parser.add_argument('--display_complete', type=str2bool, dest='display_complete', default=display_complete)
###parser.add_argument('--display_path_board', type=str2bool, dest='display_path_board', default=display_path_board)
###parser.add_argument('--max_look_ahead=', type=int, dest='max_look_ahead', default=max_look_ahead)
parser.add_argument('--cols', type=int, dest='nCol', default=g.nCol) # Number of cell col
parser.add_argument('--bSize', type=float, dest='bSize', default=g.bSize) # Board size in inches
parser.add_argument('--displayTime', type=float, dest='Display_time', default=g.Display_time) # Solution step display time (sec)
# 0 - till user ACK, None - none
parser.add_argument('--first', type=int, dest='nFirst', default=g.nFirst) # first(atleast) solutions
parser.add_argument('--gcols', type=int, dest='nSubCol', default=g.nSubCol) # Number of cell col in group
parser.add_argument('--grows=', type=int, dest='nSubRow', default=g.nSubRow) # Number of cell row in group
parser.add_argument('--makePuzzle', type=int, dest='makePuzzle', default=g.makePuzzle) # Make random puzzle with n start
parser.add_argument('--msymetric', type=str, dest='makePuzzleSym', default=g.makePuzzleSym) # Make puzzle symetry
parser.add_argument('--rows', type=int, dest='nRow', default=g.nRow) # Number of cell row
parser.add_argument('--sSize=f', type=float, dest='sSize', default=g.sSize) # Solution board size
parser.add_argument('--traceList=s', type=str, dest='traceList', default=g.traceList) # Comma separated trace list
parser.add_argument('--uPuzzle', type=str2bool, dest='UsePuzzle', default=g.UsePuzzle) # Use preset puzzle
parser.add_argument('--update_time', type=str2bool, dest='update_time', default=g.update_time) # Use preset puzzle
parser.add_argument('--xper=n', type=int, dest='xPer', default=g.xPer) # Experimental = 1
args = parser.parse_args() # or raise SelectError("Illegal options")
SlTrace.lg(f"args: {args}")
g.nCol = args.nCol
g.bSize = args.bSize
g.Display_time = args.Display_time
g.nFirst = args.nFirst
g.nSubCol = args.nSubCol
g.makePuzzle = args.makePuzzle
g.makePuzzleSym = args.makePuzzleSym
g.nRow = args.nRow
g.sSize = args.sSize
g.traceList = args.traceList
g.UsePuzzle = args.UsePuzzle
g.xPer = args.xPer
# Update persistent values
cF.set_val("bSize", g.bSize) # Main Bd size inches
cF.set_val("sSize", g.sSize) # Solution Bd size
cF.set_val("nFirst", g.nFirst) # first n solutions
cF.set_val("makePuzzle", g.makePuzzle) # If defined, generate puzzle with this many cells filled
cF.set_val("PuzzleSym", g.makePuzzleSym) # Puzzle symetry pref for initial settings
# x, y, center, n - none
cF.set_val("nCol", g.nCol)
cF.set_val("nSubCol", g.nSubCol)
cF.set_val("nRow", g.nRow)
cF.set_val("nSubRow", g.nSubRow)
cF.set_val("traceList", g.traceList)
cF.set_val("update_time", g.update_time)
cF.set_val("UsePuzzle", g.UsePuzzle)
cF.set_val("xPer", g.xPer) # experimental
cF.set_val("Display_time", g.Display_time)
trace = True if g.traceList is not None else False
SlTrace.setFlags(g.traceList)
if g.nSubCol is None:
nSubCol = int(sqrt(g.nCol))
if g.nRow is None:
g.nRow = g.nCol # Set square by default
if g.nSubRow is None:
nSubRow = int(sqrt(g.nRow))
makePuzzle = int(g.nCol*g.nRow/3)
if g.makePuzzle % 2 == 1:
makePuzzle -= 1 # Make even
mw.update()
bs_in = int(tkMath.inchesToPixels(g.bSize))
w = bs_in + 200
h = bs_in + 100
g.nSol = None # Solution window number
g.top_fr = Frame(mw)
g.top_fr.pack(side = 'top')
control_fr = Frame(g.top_fr)
control_fr.pack(side = 'top')
app = SelectWindow(g.Display_mw,
title="Playing Sudoku",
arrange_selection=False,
pgmExit=prime_exit,
file_open = file_open,
)
app.add_menu_command("Puzzle", file_open) # Dedicated puzzle menu item
app.add_menu_command("Contols", set_controls) # Display variable controls
mw.geometry(f"{w}x{h}")
mw.update()
solve_puzzle = Button(control_fr,
text = "Solve Puzzle", # Guess all remaining
command = solve_main_puzzle,
)
solve_puzzle.pack(side = 'left')
make_puzzle_b = Button(control_fr,
text = "Make Puzzle",
command = make_puzzle,
)
make_puzzle_b.pack(side = 'left')
reset_b = Button(control_fr,
text = "Reset", # Reset to initial setting
command = reset_board
)
reset_b.pack(side = 'left')
clear_b = Button(control_fr,
text = "Clear Board",
command = clear_board,
)
clear_b.pack(side = 'left')
sbox_fr = None # Set value frame
sbox = None # selection box
sbox_row = None # selected cell row
sbox_col = None
sbox_legal_vals = [] # legal vals in selection box
g.o_data = None # Primary data
g.o_board = None # Primary board
Initial_data = None # Initial data values
# setup initial position
if g.UsePuzzle:
use_puzzle() # Use premade puzzle
else:
sols = make_puzzle(g.makePuzzle)
# Display progress during puzzle solution
while True:
update()
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,696
|
raysmith619/sudoku
|
refs/heads/master
|
/src/sudoku_search_stop.py
|
# sudoku_search_stop.py
class SudokuSearchStop(Exception):
"""Base class for exceptions in this module."""
pass
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,697
|
raysmith619/sudoku
|
refs/heads/master
|
/puzzle/SudokuBoard.py
|
#SudokuBoard.pm
# Thanks to JCN for Tk reintroduction and insights.
# Sudoku Board Goals / Requirements
# 1. Runtime variable row, col, major rows, major cols
# 2. Runtime resize of puzzle board including text size
# text resizes based on number of rows...
#
from tkinter import *
from tkinter.font import Font
from select_trace import SlTrace
from select_error import SelectError
import sudoku_globals as g
from SudokuData import SudokuData, CellDesc, CellMark
markerTagDef = 1 # Default marker tag
"""
Argument setting for create_line
"""
class LineArgs:
pos = None # orthogonal position
args = []
kwargs = {}
class SudokuBoard:
pixelPerIn = 117 # Screen pixels per in (a guess)
# Setup new board
# Creates main window unless provided in opth{mw}
def __init__(self,
mw = None,
frame = None, # If present, use this as basis of operation
data = None,
rows = None, # Used only if data is None
grows = None,
cols = None,
gcols = None,
# TBD - determine via system call
bdWidth = 3, # Board with in inches
bdHeight = 3, # Board height in inches
bdBg = "white", # Board background
bdBorderWidth = 6, # Board border width
bdBorderHeight = 6, # Board border height
bdBorderFg = "blue", # Board border color
# row, col, subrow, sub col from Data
bdFont = None,
bdFontName = "times", # Board font name
# Board fond size determined from row
cellSep = 2, # Cell separator in pixels
cellSepFg = "skyblue", # Cell separator color
groupSep = 6, # Cell group separator in pixels
groupSepFg = "skyblue", # Group separator color
hilightFg = "red",
selectBg = "pink",
markWidth = 4, # Marker width
markBg = "hotpink", # Marker backround color
initialData = None,
):
# TBD - determine via system call
self.bdWidth = bdWidth
self.bdHeight = bdHeight
self.bdBg = bdBg
self.bdBorderWidth = bdBorderWidth
self.bdBorderHeight = bdBorderHeight
self.bdBorderFg = bdBorderFg
# row, col, subrow, sub col from Data
self.bdFont = bdFont
self.bdFontName = bdFontName
# Board fond size determined from row
self.cellSep = cellSep
self.cellSepFg = cellSepFg
self.groupSep = groupSep
self.groupSepFg = groupSepFg
self.hilightFg = hilightFg
self.selectBg = selectBg
self.markWidth = markWidth
self.markBg = markBg
if data is not None:
if rows is not None or grows is not None or cols is not None or gcols is not None:
raise SelectError(f"Don't include data AND rows({rows}),"
f"cols({cols}),grows({grows}),gcols({gcols})")
rows = data.nRow
grows = data.nSubRow
cols = data.nCol
gcols = data.nSubCol
# Setup as empty cells, awaiting formatting marking info
self.cells = [[CellDesc(row=ri+1, col=ci+1) for ci in range(cols)] for ri in range(rows)]
if data is not None:
rows = data.nRow
grows = data.nSubRow
cols = data.nCol
gcols = data.nSubCol
for ri in range(rows):
row = ri + 1
for ci in range(cols):
col = ci + 1
data_val = data.getCellVal(row=row, col=col)
if data_val is not None:
self.cells[ci][ri].val = data_val # Set data value
else:
data = SudokuData(rows=rows, grows=grows, cols=cols, gcols=gcols)
self.data = data
self.initialData = initialData
if initialData is not None:
self.initialize(initialData)
# Allow trace or traceList
self.markH = {} # Marked cells by tag (0 -> default)
self.data = data # Possibly empty
# Uses mw if provided, else creates Minimal Tk setup
if mw is None:
mw = Tk()
mw.title("Sudoku GUI")
self.mw = mw
if frame is not None:
self.use_frame = True
self.frame = frame
self.cv = None
self.newBoard = 1
# Finish Tk setup
self.drawBoard()
self.bindKeys() # Setup key event processing
def addMark(self, tag=None, id=None, rc=None, col=None, row=None,
backColor=None, boundColor=None,
boundWidth=None, boundWidget=None):
markH = self.markH
if markH is None:
raise SelectError("undefined markH")
if tag not in markH:
r_marks = markH[tag] = [] # new tag
r_marks.append(CellMark(row=row, col=col, id=id, rc=rc,
backColor=backColor, boundColor=boundColor,
boundWidth=boundWidth, boundWidget=boundWidget))
def initialize(self, initialData):
""" initialize - TBD
"""
pass
# Setup key event processing
def bindKeys(self):
mw = self.mw
mw.bind(0, self.key_digit)
mw.bind(1, self.key_digit)
mw.bind(2, self.key_digit)
mw.bind(3, self.key_digit)
mw.bind(4, self.key_digit)
mw.bind(5, self.key_digit)
mw.bind(6, self.key_digit)
mw.bind(7, self.key_digit)
mw.bind(8, self.key_digit)
mw.bind(0, self.key_digit)
def key_event(self, event):
""" Process key events not bound elsewhere
"""
SlTrace.lg(f"key_event {event}", "key_event")
# Get selected cells
def getSelected(self):
r_mk = self.getMarked()
if r_mk is None or len(r_mk) == 0:
return None
return r_mk
def keyDown(self):
r_c = self.getSelected()
if r_c is None:
return
row = r_c.row
col = r_c.col
if row < self.data.nRow:
row += 1 # Stop at last
self.selectCell(row, col)
def keyLeft(self):
r_c = self.getSelected()
if r_c is None:
return
row = r_c.row
col = r_c.col
if col > 1:
col -= 1 # Stop at first
self.selectCell(row, col)
def keyRight(self):
r_c = self.getSelected()
if r_c is None:
return
row = r_c.row
col = r_c.col
if col < self.data.nCol:
col += 1 # Stop at first
self.selectCell(row, col)
def keyUp(self):
r_c = self.getSelected()
if r_c is None:
return
row = r_c.row
col = r_c.col
if row > 1:
row -= 1 # Stop at first
self.selectCell(row, col)
def keySpace(self):
r_c = self.getSelected()
if r_c is None:
return
row = r_c.row
col = r_c.col
col += 1
if (col > self.data.nCol):
col = 1
row += 1
if row > self.data.nRow:
row += 1
if row > self.data.nRow:
row = 1
self.selectCell(row, col)
# Key processing functions
def key_digit(self, event):
key = ord(event.char) - ord('0')
r_mk = self.getSelected() # Only get first
if r_mk is None or len(r_mk) == 0:
SlTrace.lg("Nothing selected")
return
r_c = r_mk[0]
row = r_c.row
col = r_c.col
self.setCell(row, col, key)
# Clear markers
def clearMarks(self, tag=None):
if tag is None:
tag = markerTagDef # Do default marker
markH = self.markH
r_marks = markH[tag] if tag in markH else None
if r_marks is None:
return
cv = self.cv
for r_m in r_marks:
cv.delete(r_m.id)
del markH[tag]
# Clear all marks
def clearMarksAll(self):
markH = self.markH
if markH is None:
return
for tag in markH:
self.clearMarks(tag)
# Board clicked
def click_board(self, event):
SlTrace.lg(f"clickBoard {event}")
r_c = self.getClickedCell(event)
if r_c is None:
return
row = r_c.row
col = r_c.col
SlTrace.lg(f"click row={row} col={col}", "any")
if (self.isSelected(row, col)):
self.clearMarks()
else:
self.selectCell(row, col)
# Set options when found in kwargs
# Checks kwargs for all settings found in r_def hash
# If option key is found in %opth that option key is set in r_set
# r_def and r_set may be the same, e.g. self
def setOpts(self, r_def, **kwargs):
for key in kwargs:
opt = kwargs[key]
if opt is not None:
setattr(self, key, opt) # ??? TBD is this wrong???
def destroy(self):
""" Destroy board
"""
cv = self.cv
if cv is not None:
cv.destroy()
self.cv = None
if self.frame is not None: # If part of a frame just destroy frame, leaving window
self.frame.destroy()
self.frame = None
elif self.mw is not None: # Else destroy window
self.mw.destroy()
self.mw = None
def display(self, msg=None):
if msg is None:
msg = "board display"
if self.data is not None:
self.data.display(msg=msg)
else:
SlTrace.lg(f"{msg} - no data")
# Get data cell
def getCell(self, row=None, col=None):
ri = row - 1
ci = col - 1
return self.cells[ci][ri]
# Get data cell
def getCellVal(self, row=None, col=None):
cell = self.getCell(row=row, col=col)
return cell.val
#
def setCell(self, row=None, col=None, val=None,
x1=None, y1=None, x2=None, y2=None):
""" Set cell to values, display value if present
Data is updated if present
"""
if self.ck_done():
return
if col is None:
raise SelectError(f"setCell: Missing column")
if col is None:
raise SelectError(f"setCell: Missing row")
if col < 1 or col > self.data.nCol:
raise SelectError(f"setCell column({col} out of bounds(1-{self.data.nCol})")
if row < 1 or col > self.data.nRow:
raise SelectError(f"setCell column({row} out of bounds(1-{self.data.nRow})")
r_c = self.getCell(row=row, col=col)
if r_c is None:
raise SelectError(f"No cell at row={row} col={col}")
if x1 is not None:
r_c.x1 = x1
if x2 is not None:
r_c.x2 = x2
if y1 is not None:
r_c.y1 = y1
if y2 is not None:
r_c.y2 = y2
cell_val_id = r_c.valId
cv = self.cv
data = self.data
if (data is not None):
if val is not None:
data.setCellVal(row, col, val) # Update data
if cell_val_id is not None:
if cv is not None:
cv.delete(cell_val_id)
# add new character
rowSize = self.rowSize
font_size = -(rowSize-1) # A bit shorter than cell
if self.isEmpty(val):
cell_disp = " " # Empty
else:
if isinstance(val, int):
cell_disp = f"{val}"
else:
cell_disp = val
opts = {'text' : cell_disp}
opts['font'] = Font(name=self.bdFontName, size=font_size, exists=False)
x = (r_c.x1 + r_c.x2)/2
y = (r_c.y1 + r_c.y2)/2
if cv is not None:
r_c.valId = cv.create_text([x, y], opts)
def drawBoard(self):
if self.ck_done():
return
if self.data is None:
SlTrace.lg("no data to draw")
return
top_frame = self.mw # Default
if self.frame is not None:
top_frame = self.frame # Place inside frame
ppi = self.pixelPerIn
bdWidth = self.bdWidth
width = bdWidth*ppi
bdHeight = self.bdHeight
height = bdHeight*ppi
bdBg = self.bdBg
bdBorderWidth = self.bdBorderWidth
bdBorderHeight = self.bdBorderHeight
xmin = self.xMin = bdBorderWidth
xmax = self.xMax = width - xmin
ymin = self.yMin = bdBorderHeight
ymax = self.yMax = height - ymin
f1 = Frame(top_frame)
f1.pack(expand = 0, fill = "x")
f2 = Frame(top_frame)
f2.pack(expand = 1, fill = "both")
cv = Canvas(f2, width = width,
height = height,
bg = bdBg)
cv.pack()
self.cv = cv
# Create a box for clicking
cv.create_rectangle([0, 0,
width, height],
fill = bdBg,
tags = "click")
cv.bind("<Button-1>", self.click_board)
# create cells and group lines
self.drawCells()
# Create Board surrounding border
box_args = []
box_args.append([xmin, ymin, xmax, ymax])
box_kwargs = {}
box_kwargs['width'] = self.bdBorderWidth
box_kwargs['outline'] = self.bdBorderFg
cv.create_rectangle(box_args, box_kwargs)
top_frame.update()
def ck_done(self):
""" Check if done or not yet processing
"""
if self.mw is None or (hasattr(self,"cv") and self.cv is None):
return
# Draw cells and grouping lines
def drawCells(self):
global Initial_data
if self.ck_done():
return
cv = self.cv
xmin = self.xMin
xmax = self.xMax
ymin = self.yMin
ymax = self.yMax
# Separator line attributes
# (Canvas createLine)
cell_opt_h = {'width':self.cellSep,
'fill':self.cellSepFg}
group_opt_h = {'width':self.groupSep,
'fill':self.groupSepFg}
row_lines = self.getDividers(type="row",
cell=self.data.nRow, group=self.data.nSubRow,
min=ymin, max=ymax,
cellOpt=cell_opt_h,
groupOpt=group_opt_h)
col_lines = self.getDividers(type="col",
cell=self.data.nCol, group=self.data.nSubCol,
min=xmin, max=xmax,
cellOpt=cell_opt_h,
groupOpt=group_opt_h)
rowSize = self.rowSize
colSize = self.colSize
# Setup board font
bdFont = Font(family=self.bdFontName,
size=(colSize-1))
self.bdFont = bdFont
# Setup cells for data display, selection
Initial_data = self.initialData
# TBD: Set up clickCell to obtain entry from event position
for ri in range(len(row_lines)):
row = ri+1
r_l1 = row_lines[ri].args # [x1,y1,x2,y2]
y1 = r_l1[1]
y2 = y1 + rowSize
for ci in range(len(col_lines)):
col = ci+1
c_l1 = col_lines[ci].args # [x1,y1,x2,y2]
x1 = c_l1[0]
x2 = x1 + colSize
if row <= self.data.nRow and col <= self.data.nCol:
self.setCell(row=row, col=col, x1=x1, y1=y1, x2=x2, y2=y2)
'''
if self.initialData is not None:
orig_val = self.initialData.getCell(row, col)
if orig_val is not None:
self.markCell(row=row, col=col, tag='origData',
add=1,
boundWidth='NONE', boundColor='NONE',
backColor="gray90")
'''
# Mark squares
for r_line in row_lines:
cv.create_line(r_line.args, r_line.kwargs)
for r_line in col_lines:
cv.create_line(r_line.args, r_line.kwargs)
def getData(self, ): # Returns: data
return self.data
def getDividers(self, type=None,
cell = None,
group = None,
min = None,
max = None,
cellOpt = None,
groupOpt = None,
):
""" Get divider lines
Returns array of create_line options
sets rowSize/colSize (pixels) appropriately
sets pos in returned lines
:returns: array of canvas line args
"""
if type is None or (type != "row" and type != "col"):
raise SelectError(f"bad type:{type}")
if cell is None:
raise SelectError("cell missing")
if group is None:
raise SelectError("group missing")
if min is None:
raise SelectError("min missing")
if max is None:
raise SelectError("max missing")
if cellOpt is None:
raise SelectError("cellOpt missing")
if groupOpt is None:
raise SelectError("groupOpt missing")
lines = [] # Array of refs to line args
cell_size = int((max-min)/cell)
if (type == "row"):
self.rowSize = cell_size
else:
self.colSize = cell_size
xmin = self.xMin
ymin = self.yMin
xmax = self.xMax
ymax = self.yMax
for i in range(cell):
is_group = False # True if group divider
is_group = True if i % group == 0 else False
val = min + i*cell_size # combersome, but reduces
# trunk error
# TBD adjust for border overlap
lineargs = LineArgs() # Staging area for create_line args
if type == "row":
x1 = xmin
x2 = xmax
y1 = y2 = val # horizontal line
else:
y1 = ymin
y2 = ymax
x1 = x2 = val # vertical line
lineargs.pos = val # Line position(orthogonal)
lineargs.args = [x1, y1, x2, y2]
if is_group:
lineargs.kwargs = groupOpt
else:
lineargs.kwargs = cellOpt
lines.append(lineargs)
return lines
def rowColRef(self, row, col): #
""" Translate row, col to cell ref
:row:
:col:
:returns: ref to cell
"""
return self.cells[col-1][row-1]
def getClickedCell(self, ev): # Returns: cell ref, else None
""" Get clickec cell
:ev: button clikc event
:returns: ref to cell
"""
cv = self.cv
xMin = self.xMin
yMin = self.yMin
x = ev.x
y = ev.y
if SlTrace.trace("any"):
ev_x = ev.x
ev_y = ev.y
SlTrace.lg(f"getClickedCell: ev_x:{ev_x} ev_y:{ev_y}"
+ f" x={x} y={y} xMax={self.xMax} yMax={self.yMax}")
if (x < 0 or y < 0
or x > self.xMax or y > self.yMax):
return None
rowSize = self.rowSize
colSize = self.colSize
if rowSize <= 0 or colSize <= 0:
return None
ri = int(y/colSize)
ci = int(x/rowSize)
cells = self.cells
if cells is None:
return None
return cells[ri][ci]
#
def getMarked(self, tag=None):
""" Get marked cell(s)
:returns: array of refs to marked cells
"""
if tag is None:
tag = markerTagDef
markH = self.markH
if markH is None or tag not in markH:
return []
r_marks = markH[tag]
if r_marks is None:
return []
return r_marks
# Check if cell is selected
def isSelected(self, row, col): # Returns: true iff selected
r_m = self.getMarked()
if r_m is None or len(r_m) == 0:
return False
r_c = r_m[0]
if r_c is None:
return False
if r_c.row == row and r_c.col == col:
return True
return False
#
def markCell(self, col=None, row=None, tag=None,
add=None, backColor=None, boundColor=None, boundWidth=None, boundWidget=None):
""" Mark cell
Does not select cell
col, row, [tag, add,
backColor, boundColor,
boundWidgt
"""
if col is None:
raise SelectError("markCell no col")
if row is None:
raise SelectError("markCell no row")
if add is None:
add = False # Add to tagged group, else delete prev
if tag is None:
tag = markerTagDef # tag grouping, default
r_c = self.rowColRef(row, col)
args = [r_c.x1, r_c.y1, r_c.x2, r_c.y2]
kwargs = {}
if backColor is not None:
kwargs['fill'] = backColor
if boundWidth is not None:
kwargs['width'] = boundWidth
if boundColor is not None:
kwargs['outline'] = boundColor
cv = self.cv
mark_id = cv.create_rectangle(*args, **kwargs)
if not add:
self.clearMarks(tag)
self.addMark(tag=tag, id=mark_id, rc=r_c, col=col, row=row,
backColor=backColor, boundColor=boundColor,
boundWidth=boundWidth, boundWidget=boundWidget)
# Select cell
# Marks cell
# Unselects previously selected
def selectCell(self, row, col): # Returns: ref to cell
r_c = self.rowColRef(row, col)
if r_c is None:
return None
self.markCell(row=row, col=col)
return r_c
def showData(self, data=None, force=False):
""" display data
:data: data to display default: self.data
:force: force updating display values
"""
if self.ck_done():
return
if data is None:
data = self.data
if not isinstance(data, SudokuData):
raise SelectError(f"Warning -- method add_data({type(data)}) expects a 'SudokuData' object")
return
force = True # Force display
for nr in range(1, self.data.nRow+1):
for nc in range(1, self.data.nCol+1):
new = data.getCellVal(row=nr, col=nc)
old = self.getCellVal(row=nr, col=nc)
if force or new != old or data.isEmpty(new):
self.setCell(nr, nc, new)
if self.ck_done(): # Check if changed circumstance
return
if self.mw is not None:
self.mw.update()
if SlTrace.trace("show_data"):
SlTrace.lg("show_data")
self.display()
# Check if value considered empty
def isEmpty(self, val):
return self.data.isEmpty(val)
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,698
|
raysmith619/sudoku
|
refs/heads/master
|
/src/sudoku_puzzle_load.py
|
# sudoku_puzzle_load.py
"""
Support for the loading of sudoku puzzle specification
File Format (Customized to be a small subset of python
to ease processing flexibility)
"""
import sys, traceback
from select_trace import SlTrace
from select_error import SelectError
from SudokuPuzzle import SudokuPuzzle
# Added to between any prefix and the compile string if trace: "puzzle_load" flag is set
test_prefix = """
print("dir: ", dir())
print("globals: ", globals())
print("locals: ", locals())
"""
class SudokuPuzzleLoad:
""" Support simple puzzle building commands
puzzle(rows=..., cols=..., desc=description)
row(cols number of values, 0/None for empty)
"""
@classmethod
def set_input(cls, pfile=None, pstr=None):
""" Setup file access
:pfile: file name/stream containing puzzle specification
OR
:pstr: string containing puzzle specification
:returns: class instance
"""
cls.file_name = None # Set to name if known
cls.puzzle_string = pstr
if pfile is not None and pstr is not None:
raise SelectError(f"Only one of pfile({pfile}) or pstr({pstr}) may be specified")
if pfile is not None:
if isinstance(pfile, str):
cls.file_name = pfile
try:
fin = open(pfile)
except Exception as ex:
SlTrace.lg(f"open puzzle file {pfile} failed {str(ex)}")
return None
else:
fin = pfile # Input is an opened stream
try:
pstr = fin.read()
fin.close()
except Exception as ex:
SlTrace.lg(f"input read {pfile} failed {str(ex)}")
return cls
cls.puzzle_string = pstr
cls.cur_row = 0
return cls
@classmethod
def file2puzzle(cls, file, prefix=None):
""" Take file name/object and return SudokuPuzzle
:file: file name/path / open stream
:prefix: optional string to prefix file text
"""
spl = cls.set_input(pfile=file)
puzzle = spl.procCmdString(prefix=prefix)
return puzzle
@classmethod
def get_puzzle(cls):
""" Retrieve puzzle complete or in progress
"""
return cls.sudoku_puzzle
@classmethod
def procCmdString(cls, string=None, prefix=None):
""" Process python code string, with prefix text
:string: input string
:prefix: optional string to prefix code string for compile
:Returns: resulting cls
"""
cls.result = False # Set True if OK
if string is None:
string = cls.puzzle_string
if string is None:
raise SelectError("string is missing")
gbls = {'version' : cls.version,
'puzzle' : cls.puzzle,
'row' : cls.row,
'end_puzzle' : cls.end_puzzle
}
compile_str = ""
if prefix is not None:
compile_str = prefix
if not prefix.endswith("\n"):
compile_str += "\n" # Insure ending newline
if SlTrace.trace("puzzle_load"):
compile_str += test_prefix
compile_str += string
compile_str += "\nend_puzzle()\n" # End puzzle
try:
exec(compile_str, gbls)
cls.result = True
return cls.sudoku_puzzle
except Exception as e:
_, _, tb = sys.exc_info()
tbs = traceback.extract_tb(tb)
SlTrace.lg(f"Error while executing text from {cls.file_name}\n {str(e)}")
inner_cmds = False
for tbfr in tbs: # skip bottom (in dots_commands.py)
tbfmt = 'File "%s", line %d, in %s' % (tbfr.filename, tbfr.lineno, tbfr.name)
if False: # TFD
inner_cmds = True
SlTrace.lg(" --------------------") # show bottom (in dots_commands.py)
SlTrace.lg(" %s\n %s" % (tbfmt, tbfr.line))
cls.result = False
raise SelectError("compile error")
"""
Basic game file loading functions
Generally one per file command
"""
@classmethod
def version(cls, version_str):
cls.version_str = version_str
@classmethod
def puzzle(cls, desc=None,
rows=None, grows=None, cols=None, gcols=None):
""" Start processing of puzzle
:returns: False if not processing this game
"""
SlTrace.lg(f"puzzle(desc={desc}, rows={rows}, grows={grows} cols={cols} gcols={gcols})")
cls.desc = desc
cls.rows = rows
cls.grows = rows
cls.cols = cols
cls.gcols = gcols
cls.cur_row = 0
cls.sudoku_puzzle = SudokuPuzzle(desc=desc, rows=rows, grows=grows, cols=cols, gcols=gcols)
if SlTrace.trace("puzzle_load"):
cls.sudoku_puzzle.display("puzzle() start")
@classmethod
def row(cls, *col_vals):
""" Add next rows of valuse
:col_vals: column values for this row
Each argument is either:
a 0/None for empty square
OR
a value between 1 and cls.cols
"""
global sudoku_puzzle
cls.cur_row += 1
if cls.cur_row > cls.rows:
raise SelectError(f"row(cls.cur_row) is greater than the puzzle number of rows({cls.rows})")
if len(col_vals) != cls.cols:
raise SelectError((f"Number of columns({len(col_vals)} != puzzle number of columns({cls.cols})"))
for ic, col_val in enumerate(col_vals):
cls.sudoku_puzzle.add_cell(row=cls.cur_row, col=ic+1, val=col_val)
if SlTrace.trace("puzzle_load"):
cls.sudoku_puzzle.display(f"row={cls.cur_row}")
@classmethod
def end_puzzle(cls):
pass
if __name__ == "__main__":
from tkinter import filedialog
start_dir = r"./puzzle"
filename = filedialog.askopenfile(
initialdir = start_dir,
title = "Select puzzle file",
filetypes = (("supz files","*.supz"),("all files","*.*")))
spl = SudokuPuzzleLoad.set_input(pfile=filename)
puzzle1 = spl.procCmdString()
puzzle1.display('puzzle1 display')
puzzle2 = spl.get_puzzle()
puzzle2.display("get_puzzle display")
puzzle3 = SudokuPuzzleLoad.file2puzzle(file=filename)
puzzle3.display("get_puzzle display")
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,699
|
raysmith619/sudoku
|
refs/heads/master
|
/sudoku_subs.py
|
# sudoku_subs.py
# Top level subs for sudoku.py
# imported to workaround the lack of forward referencing subroutines
import sys
import os
from tkinter import filedialog
from tkinter import *
import time
import argparse
from math import *
from select_trace import SlTrace
from select_error import SelectError
from resource_group import ResourceEntry
from select_control import SelectControl
from variable_control_window import VariableControlWindow
import sudoku_globals as gb
from SudokuData import SudokuData, CellDesc
from sudoku_vals import SudokuVals
from SudokuPuzzle import SudokuPuzzle
from SudokuBoard import SudokuBoard
from SudokuPly import SudokuPly
from sudoku_puzzle_load import SudokuPuzzleLoad
from sudoku_search_stop import SudokuSearchStop
def helpstr():
retstr = f"""
--cols number_of_cols Default: {gb.nCol}
--bSize bd size in inches Default: {gb.bSize}
--dispalyTime sec between display, 0 - wait till user ACK
--first number_of_solutions, stop when we have this many Default: {gb.nFirst}
--makePuzzle starting_cells, make puzzle with this many filled cells
--gcols number_of_cells_in_group Default: {gb.nSubCol}
--grows number_of_cells_in_group Default: {gb.nrowGroup}
--rows number_of_rows Default: {gb.nRow}
--sSize solution bd size inches Default: {gb.sSize}
--traceList comma_separated_trace_options Default: {gb.traceList}
--uPuzzle - use preformed puzzle
--xperimental experimental version Default: {gb.xPer}
"""
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def update():
""" do any window updating required
"""
if not gb.running:
return
if not SlTrace.runningJob:
return
if gb.Display_mw is not None:
gb.Display_mw.update()
def pgm_exit():
SlTrace.lg("Quitting Sudoku Playing")
# Trace and Log files save by SlTrace onexit
###SlTrace.lg("Properties File: %s"% SlTrace.getPropPath())
###SlTrace.lg("Log File: %s"% SlTrace.getLogPath())
gb.res_group.destroy_all()
gb.Display_mw.destroy()
gb.Display_mw = None
gb.running = False
SlTrace.onexit() # Force saving
sys.exit(0)
def set_controls():
cF = SelectControl() # Ref to singleton
if gb.vC is not None:
gb.vC.destroy()
gb.vC = None
gb.vC = VariableControlWindow(var_ctl=cF)
def set_puzzle(puzzle, file_name=None):
""" Set/Reset main puzzle
:puzzle: Puzzle to setup
"""
if file_name is not None:
puzzle.file_name = file_name # override if desired
if gb.main_puzzle is not None:
gb.main_puzzle.destroy()
gb.main_puzzle = None
if gb.o_board is not None:
gb.o_board.destroy()
gb.main_puzzle = puzzle.copy() # Insulate from changes
gb.puzzle = puzzle
gb.o_board = SudokuBoard(mw=gb.Display_mw,
frame=new_main_bd_frame(),
data=puzzle,
bdWidth=gb.bSize,
bdHeight=gb.bSize,
puzzle = puzzle)
gb.o_board.showData(force=True)
cF = SelectControl()
cF.set_val("nRow", gb.main_puzzle.nRow)
cF.set_val("nSubRow", gb.main_puzzle.nSubRow)
cF.set_val("nCol", gb.main_puzzle.nCol)
cF.set_val("nSubCol", gb.main_puzzle.nSubCol)
cF.update_settings()#
def use_puzzle(puzzle=None):
""" Use Precreated puzzle
Set reset_data to this
# Pattern
Default: PZ1
n x n lines
- or numeric contents
"""
###global gb.o_data, gb.nCol, gb.nSubCol, gb.nRow, gb.nrowGroup, gb.o_board, gb.Initial_data, gb.nRow, gb.nCol
# Test puzzles
PZ1 = """
- 2 - -
1 4 - -
- - 3 2
- - 1 -
"""
PZ2 = """
3 2 4 1
1 4 2 3
4 1 3 2
2 3 1 4
"""
PZ3 = """
- 2 4 1
1 4 2 3
4 1 3 2
2 3 1 4
"""
PZ4 = """
- - 4 1
1 4 2 3
4 1 3 2
2 3 1 4
"""
if puzzle is None:
puzzle = PZ1
puzzle = PZ2
puzzle = PZ3
puzzle = PZ4
puzzle = PZ1
nrow = 0
ncol = 0
rowcols = [] # array of rows of cols
lines = puzzle.split("\n")
lineno = 0
for line in lines:
lineno += 1
m = re.match(r'^([^#]*)#', line) # Remove comments
if m:
line = m.group(1)
m = re.match(r'^\s+(.*)$', line)
if m:
line = m.group(1)
m = re.match(r'^(.*)\s+$', line)
if m:
line = m.group(1)
if re.match(r'^\s*$', line): # Ignore blank lines
continue
nrow += 1
cs = line.split()
if ncol > 0 and len(cs) < ncol:
raise SelectError("cols not identical in line: lineno")
if len(cs) > ncol:
ncol = len(cs)
rowcols.append(cs)
if ncol != nrow:
raise SelectError(f"number of cols{ncol} != number of rows{nrow}")
gb.nRow = nrow
gb.nCol = ncol
gb.nSubCol = int(sqrt(gb.nCol))
gb.nSubRow = int(sqrt(gb.nRow))
puzzle = SudokuPuzzle(rows=nrow, cols=ncol, grows=gb.nSubRow, gcols=gb.nSubCol,
desc="Internal Puzzle")
for ri in range(nrow):
row = ri+1
for ci in range(ncol):
col = ci+1
val = rowcols[ri][ci]
if val == '-':
continue # Empty cell
puzzle.add_cell(row=row, col=col, val=int(val))
set_puzzle(puzzle)
# Clear to an empty board
def clear_board():
puzzle = SudokuPuzzle(rows=gb.nRow, cols=gb.nCol, grows=gb.nSubRow, gcols=gb.nSubCol,
desc="Internal Puzzle")
set_puzzle(puzzle)
# Close move display window
def search_stop():
###global Display_mw
'''
if Display_mw is not None:
Display_mw.destroy()
'''
SlTrace.lg("search_stop")
SudokuPly.stop_search()
gb.res_group.destroy_all()
display_prev_time = None
display_no = 0
def display_rtn(data):
""" Progress display routing
"""
global display_prev_time
global display_no
if not gb.running:
return
display_no += 1
###gb.main_puzzle.display("display_rtn: main_puzzle")
display_time = gb.Display_time
if display_time is None:
return
now = time.time()
new_board = False
searching_board = gb.res_group.get_obj("searching_board")
if searching_board is None:
solution_search_display_setup()
new_board = True
searching_board = gb.res_group.get_obj("searching_board")
if display_prev_time is None:
display_prev_time = now
gb.Display_mw.after(int(1000*display_time))
if now - display_prev_time > gb.update_time:
puzzle_name = "INTERNAL"
if gb.puzzle.file_name is not None:
puzzle_name = os.path.basename(gb.puzzle.file_name)
gb.main_puzzle.trace_check(prefix=puzzle_name)
###SlTrace.lg(f"{puzzle_name} move:{nmove} empty: {nempty} backup: {nbackup} time:{dur:.3f}")
display_prev_time = now
if searching_board is not None:
searching_board.showData(data, force=new_board)
if SlTrace.trace("display_board"):
searching_board.display(f"display:{display_no}")
# Setup move display
def solution_search_display_setup():
title = "Solution Searching"
SudokuPly.setDisplay(display_rtn, gb.Display_time)
searching_mw = Toplevel()
searching_mw.protocol("WM_DELETE_WINDOW", search_stop)
searching_mw.title(title)
x = 400
y = 600
searching_mw.geometry(f"+{x}+{y}")
top_fr = Frame(searching_mw)
top_fr.pack(side = 'top')
c1 = Button(top_fr,
text = "Close", # Guess one
command = search_stop,
)
c1.pack(side = 'left')
if gb.res_group.get("searching_board") is not None:
gb.res_group.destroy("searching_board")
data = SudokuData.vals2data(gb.main_puzzle)
searching_board = SudokuBoard(mw=searching_mw,
data = data,
bdWidth=gb.sSize*.8,
bdHeight=gb.sSize*.8,
initialData=gb.Initial_data,
puzzle=gb.main_puzzle
)
searching_board.showData(force=True)
gb.res_group.add(ResourceEntry(searching_board), name="searching_board")
gb.main_puzzle.set_start_time()
def file_proc(filename, run_after_load=None):
""" Process (solve) one puzzle file
:filename: full file name may be a puzzle or list of puzzle files
:run_after_load: True -> solve after loading, False just display
Default: gb.run_after_load
"""
if not gb.running:
return
if filename.endswith(".supzl"):
filelist_proc(filename)
return
if run_after_load is None:
run_after_load = gb.run_after_load
spl = SudokuPuzzleLoad.set_input(pfile=filename)
if spl is None:
return
SlTrace.lg(f"Puzzle file name:{filename}")
puzzle = spl.procCmdString()
set_puzzle(puzzle, file_name=filename)
puzzle.display("Puzzle Start")
if run_after_load:
solve_main_puzzle()
file_list_files = {} # Used file list files
def filelist_proc(filename):
""" Process file containing list of puzzle files
:filename: filename of file containing list of puzzle files
Default directory for files in list is dir(filename)
"""
with open(filename) as f:
file_list_files[filename] = 1 # Record as being used
lines = f.readlines()
filedir = os.path.dirname(filename)
for i in range(len(lines)):
line = lines[i]
ml = re.match(r'^(\.*)#.*$', line)
if ml:
line = ml[1] # Before comment
line = line.strip()
if re.match(r'^\s*$', line):
continue # Skip blank lines
name = line
if not os.path.isabs(name):
name = os.path.join(filedir, name)
if name in file_list_files:
SlTrace.lg(f"file: {file} already used - avoiding recursive use ")
continue
file_proc(filename=name, run_after_load=True)
def file_open():
""" Choose puzzle file
"""
start_dir = r"./puzzle"
filename = filedialog.askopenfilename(
initialdir = start_dir,
title = "Select puzzle file",
filetypes = (("puzzle files","*.supz"),("puzzle lists","*.supzl"),("all files","*.*")))
file_proc(filename)
# Create puzzle with number of cells filled in
# Set initial_data to this
def make_puzzle(nfilled=None):
""" Create puzzle with number of cells filled in
Set reset_data to this
:nfilled: Number of cells filled in, None = random
"""
###global o_data, o_board
display_close()
if (gb.o_data is None):
gb.o_data = SudokuData(cols=gb.nCol,
rows=gb.nRow,
gcols=gb.nSubCol,
grows=gb.nSubRow,
)
gb.o_data.clear() # Clear data
if gb.o_board is None:
gb.o_board = SudokuBoard(mw=gb.mw,
frame=new_main_bd_frame(),
data=gb.o_data,
bdWidth=gb.bSize,
bdHeight=gb.bSize)
ncell = gb.nRow*gb.nCol
if (nfilled is None):
nfilled = int(ncell/3)
if nfilled & 1 != 0 and ncell & 1 != 0:
nfilled -= 1 # No possible symitry
# with odd # and even row/col
o_list = SudokuData(base=gb.o_data)
a_start_list = o_list.startCells(
nFilled=nfilled,
symRules=gb.makePuzzleSym)
if (a_start_list is None):
SlTrace.lg(f"no such pattern for nRow=:{gb.nRow}, nCol=:{gb.nCol}"
+ f" nFilled={nfilled} symRules={gb.makePuzzleSym}")
sys.exit("quitting")
# Display start list
sl = SudokuData(rows=gb.nRow, grows=gb.nSubRow, cols=gb.nCol, gcols=gb.nSubCol) # for diagnostic display
SlTrace.lg(f"start list: ")
n = 0
for sc in a_start_list:
val = n
if (n <= ord('Z')-ord('A')):
val = chr(ord('A')+n)
elif (n < 2*(ord('Z')-ord('A'))):
val_ord = ord('a')+n-((ord('Z')-ord('A')-1))
val = chr(val_ord)
sl.setCellVal(sc.row, sc.col,
val)
SlTrace.lg(f" (r:{sc.row}, c:{sc.col})")
n += 1
sl.display()
if (len(a_start_list) != nfilled):
SlTrace.lg(f"Actual list = {len(a_start_list)}"
+ f" Requested list = {nfilled}")
# Set starting arrangement
o_sol = SudokuPly(base=gb.o_data)
sols = o_sol.makePuzzle(
startList=a_start_list)
if sols is not None and len(sols) == 1 and sols[0] is not None:
sol = sols[0]
gb.o_data = sol.getData(subset=a_start_list)
gb.o_board.showData(gb.o_data)
gb.Initial_data = SudokuData(base=gb.o_data)
return sols
main_bd_fr = None # Set if present
def new_main_bd_frame():
""" Create a new main board frame
Deleat old frame if present
"""
global main_bd_fr
if main_bd_fr is not None:
main_bd_fr.destroy()
main_bd_fr = Frame(gb.top_fr)
main_bd_fr.pack(side = 'bottom')
return main_bd_fr
# Adjust puzzle to a unique puzzle
# Generally by adding starting filled cells
def adj_puzzle_uniq(sols, nfilled): # Returns: puzzle solution Ply
sol = sols[0]
val_max = gb.nRow
if gb.nCol > val_max:
val_max = nCol
SlTrace.lg(f"adj_puzzle_uniq\n")
sol_data = sol.getData()
for i in range(nfilled):
nr = int(rand(nRow)+1)
nc = int(rand(nCol)+1)
gb.o_data.curCell(row=nr, col=nc)
r_c = None
ntry = 0
# Space values randomly
min_choice = 2 # Attempting to leave multiple choices
tmc = val_max # Only look so much
legalvals = [] # choices for candidate cell
while True:
for i1 in range(rand(val_max)+1):
r_c = o_data.getNextEmpty()
if r_c is not None:
break # No more empty
legalvals = gb.o_data.getLegalVals(r_c['row'],
r_c['col'])
tmc -= 1
if tmc <= 0 or len(legalvals) >= min_choice:
break
if (r_c is None):
SlTrace.lg(f"Can't find room for puzzle")
break
nr = r_c['row']
nc = r_c['col']
gb.o_data.setCellVal(nr, nc, sol_data.getCellVal(nr, nc))
sol = uniq_sol(gb.o_data) # Make unique
return sol
# Reset to initial board
def reset_board():
gb.o_data = SudokuData(base=gb.Initial_data)
gb.o_board.showData(gb.o_data)
# Setup move display
def set_move_display(display_time):
###global Display_time
gb.Display_time = display_time
# OK to selection
def set_selected_ok():
selecteds = gb.sbox.curselection()
si = selecteds[0]
if si is None:
set_selected_delete()
return
selected_val = gb.sbox_legal_vals[si]
gb.o_data.setCellVal(gb.sbox_row, gb.sbox_col, selected_val)
gb.o_board.showData()
set_selected_delete()
# CANCEL to selection
def set_selected_cancel():
set_selected_delete()
# Remove set_selected
def set_selected_delete():
sbox_fr.destroy()
if exists(sbox_fr):
sbox = None
def clear_solve_main_puzzle():
gb.res_group.destroy_all()
SudokuPly.setDisplay(None)
def update_report(ctl=None):
""" Report control variable (cF) update
ctl: control reference for convenience
"""
gb.update_control_variables()
# Solve Puzzle
def solve_main_puzzle():
gb.solve_start = time.time() # Puzzle start time
gb.main_puzzle.display("solve_main_puzzle before destroy_all: main_puzzle")
gb.res_group.destroy_all() # Clearout result displays
solutions = [] # Puzzle solution(s)
gb.main_puzzle.display("solve_main_puzzle: main_puzzle")
solution_search_display_setup()
Initial_data = gb.main_puzzle # Record initial data
SudokuPly.clear_search_stop()
try:
data = SudokuData.vals2data(gb.main_puzzle)
solutions = solve_puzzle(data=data, puzzle=gb.main_puzzle)
puzzle_file_name = gb.puzzle.file_name
dur = time.time() - gb.solve_start
nmoves = gb.main_puzzle.nmove
sol_time = f"in {dur:.2f} sec"
if puzzle_file_name is None:
puzzle_name = ""
else:
puzzle_name = os.path.basename(puzzle_file_name)
if len(solutions) == 0:
SlTrace.lg(f"EndPuzzle {puzzle_name} No solution to puzzle {nmoves} moves {sol_time}")
else:
nsol = len(solutions)
SlTrace.lg(f"EndPuzzle {puzzle_name} solved - {nsol} solution{'' if nsol == 1 else 's'}"
+ f" {nmoves} moves {sol_time}"
)
searching_board = gb.res_group.get_obj("searching_board")
if searching_board is not None:
searching_board.trace_check(prefix=puzzle_name)
nth = 0
for r_solution in solutions:
nth += 1
r_solution.display(f"Solution {nth} of {nsol} {puzzle_file_name}")
solve_main_puzzle_display(r_solution,
f"Solution {nth} of {nsol}",
nth,
nsol)
except SudokuSearchStop:
SlTrace.lg("SudokuSearchStop")
clear_solve_main_puzzle()
gb.res_group.destroy_all()
SudokuPly.setDisplay(None)
#
def solve_main_puzzle_display(r_solution, title=None, nth=None, nsol=None):
""" Add solution display
:r_position of solution:
:nth: ord positionof solution
:nsol: Number of solutions
"""
if title is not None:
title = "Solution"
if nsol is not None:
nsol = 1
mw = Toplevel()
mw.protocol("WM_DELETE_WINDOW", search_stop)
mw.title(title)
x = 400
y = 200
x += 100*nth
y += 100*nth
mw.geometry(f"+{x}+{y}")
# Find first empty slot, extending if necessary
top_fr = Frame(mw)
top_fr.pack(side = 'top')
c1 = Button(top_fr,
text = "Close", # Guess one
command = [solve_puzzle_close, nth],
)
c1.pack(side = 'left')
if nsol > 1:
c2 = Button(top_fr,
text = "Close All", # Close all
command = solve_puzzle_close_all,
)
c2.pack(side = 'left')
board = SudokuBoard(mw=mw,
data=r_solution,
bdWidth=gb.sSize,
bdHeight=gb.sSize,
initialData=gb.Initial_data,
)
gb.res_group.add(ResourceEntry(mw), number=nth)
board.showData(force=True)
# Close solution window
def solve_puzzle_close(si):
mw = gb.mws[si]
if mw is not None:
if exists(mw):
mw.destroy()
gb.mws[si] = None
# Close all solution windows
def solve_puzzle_close_all():
gb.res_group.destroy_all()
def solve_puzzle(data=None, puzzle=None): # Returns: ref to solution, else None
r_data = data
if r_data is None:
raise SelectError("solve_uzzle: data missing")
solve_puzzle_close_all()
s_ply = SudokuPly(base=r_data, puzzle=puzzle)
return s_ply.solveChoice(first=gb.nFirst)
#
def uniq_sol(r_data): #
""" Return a puzzle with a unique solution
:returns: SudokuPly with one solution, else None
"""
### return r_sols[0] #### STUB
s_ply = SudokuPly(base=gb.o_data)
sols = s_ply.solveChoice(first=gb.nRow)
while (len(sols) > 1):
squares = []
for ri in range(gb.nRow):
row = ri + 1
for ci in range(gb.nCol):
col = ci + 1
if not r_data.isEmptyCell(row, col):
continue
valh = {}
for r_sol in sols:
val = r_sol.getCellVal(row, col)
if r_data.isEmpty(row, col):
SlTrace.lg(f"Empty sol row={row}, col={col}")
continue
valh[val] = 1
vals = valh.keys()
nval = len(vals) # Number of different values
if nval > 1:
squares.append(CellDesc(nval=nval, row=row, col=col, vals=vals))
squares.sort(key=lambda cell: cell.nval)
r_nc = squares[0] # With most values
r_data.setCellVal(r_nc['row'], r_nc-['col'], r_nc['vals'][0])
s_ply = SudokuPly(base=gb.o_data)
sols = s_ply.solveChoice(first=nRow)
return sols[0] # stub - just return first if any
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,722
|
paulojenks/learning_log_flask
|
refs/heads/master
|
/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, TextAreaField, IntegerField, DateField
from wtforms.validators import (DataRequired, Regexp, ValidationError, Email,
Length, EqualTo)
class PostForm(FlaskForm):
title = StringField('Title', validators=[DataRequired(), Length(max=100)])
date = DateField('Date', validators=[DataRequired()])
time_spent = StringField('Time Spent', validators=[DataRequired()])
what_i_learned = TextAreaField('What I learned', validators=[DataRequired(), Length(max=1000)])
resources_to_remember = TextAreaField('Resources to remember', validators=[DataRequired(), Length(max=1000)])
|
{"/app.py": ["/models.py", "/forms.py"]}
|
5,723
|
paulojenks/learning_log_flask
|
refs/heads/master
|
/models.py
|
from peewee import *
DATABASE = SqliteDatabase('journal.db')
class Entry(Model):
title = CharField(max_length=100)
date = DateField()
time_spent = CharField(max_length=255)
what_i_learned = CharField(max_length=1000)
resources_to_remember = CharField(max_length=1000)
class Meta:
database = DATABASE
order_by = ('-date',)
@classmethod
def create_entry(cls, title, date, time_spent, what_i_learned, resources_to_remember):
cls.create(
title=title,
date=date,
time_spent=time_spent,
what_i_learned=what_i_learned,
resources_to_remember=resources_to_remember
)
def initialize():
DATABASE.connect()
DATABASE.create_tables([Entry], safe=True)
DATABASE.close()
|
{"/app.py": ["/models.py", "/forms.py"]}
|
5,724
|
paulojenks/learning_log_flask
|
refs/heads/master
|
/app.py
|
from flask import (Flask, g, render_template, flash, redirect, url_for)
import models
import forms
DEBUG = True
PORT = 5000
HOST = '127.0.0.1'
app = Flask(__name__, static_url_path='/static')
app.secret_key = 'l;joi3;llak;ij;3oij'
@app.before_request
def before_request():
"""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
@app.after_request
def after_request(response):
"""Close the database connection after each request."""
g.db.close()
return response
@app.route('/')
def index():
stream = models.Entry.select().limit(100)
return render_template('index.html', stream=stream)
@app.route('/entries')
def stream():
template = 'index.html'
stream = models.Entry.select().limit(100)
return render_template(template, stream=stream)
@app.route('/entry', methods=["GET", "POST"])
def new():
form = forms.PostForm()
if form.validate_on_submit():
models.Entry.create_entry(
title=form.title.data.strip(),
date=form.date.data,
time_spent=form.time_spent.data,
what_i_learned = form.what_i_learned.data.strip(),
resources_to_remember = form.resources_to_remember.data.strip()
)
flash("Entry Posted!", "success")
return redirect(url_for('index'))
return render_template('entry.html', form=form)
@app.route('/entries/<int:entry_id>')
def details(entry_id):
entries = models.Entry.select().where(
models.Entry.id == entry_id
)
return render_template('detail.html', stream=entries)
@app.route('/entries/delete/<int:entry_id>')
def delete_entry(entry_id):
try:
models.Entry.get(models.Entry.id == entry_id).delete_instance()
except models.IntegrityError:
pass
flash("Entry Deleted", "success")
return redirect(url_for('index'))
@app.route('/entries/edit/<int:entry_id>', methods=('GET', 'POST'))
def edit_entry(entry_id):
edit = models.Entry.select().where(models.Entry.id == entry_id).get()
form = forms.PostForm(obj=edit)
form.populate_obj(edit)
if form.validate_on_submit():
q = models.Entry.update(
title=form.title.data.strip(),
date=form.date.data,
time_spent=form.time_spent.data,
what_i_learned = form.what_i_learned.data.strip(),
resources_to_remember = form.resources_to_remember.data.strip()).where(models.Entry.id == entry_id)
q.execute()
flash("Entry updated", "success")
return redirect(url_for('stream'))
return render_template('entry.html', form=form)
if __name__ == '__main__':
models.initialize()
app.run(debug=DEBUG, host=HOST, port=PORT)
|
{"/app.py": ["/models.py", "/forms.py"]}
|
5,734
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/users.py
|
from .sql_utils import run_sql_file_raw, refresh_materialized_view
def create():
run_sql_file_raw('./data/sql/derived-tables/users_table.sql')
def refresh():
refresh_materialized_view('public.cio_latest_status')
refresh_materialized_view('public.users')
if __name__ == '__create__':
create()
if __name__ == '__refresh__':
refresh()
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,735
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/cio_bounced_backfill.py
|
from concurrent.futures import ThreadPoolExecutor as PoolExecutor
import os
import requests
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.sql import text
from .utils import log
pg_vars = {
'drivername': os.getenv('PG_DRIVER'),
'username': os.getenv('PG_USER'),
'password': os.getenv('PG_PASSWORD'),
'host': os.getenv('PG_HOST'),
'port': os.getenv('PG_PORT'),
'database': os.getenv('PG_DATABASE')
}
pg_ssl = os.getenv('PG_SSL')
# Setup SQL Alchemy postgres connection.
engine = create_engine(URL(**pg_vars),
connect_args={'sslmode': pg_ssl})
conn = engine.connect()
# Grab a page from C.io messages API with optional next param for pagination.
def get_page(next_page=None):
params = {'metric': os.getenv('CIO_API_METRIC'),
'type': os.getenv('CIO_API_TYPE'),
'limit': os.getenv('CIO_API_LIMIT'),
'start': next_page}
user = os.getenv('CIO_API_USER')
password = os.getenv('CIO_API_PASSWORD')
uri = os.getenv('CIO_API_URI')
r = requests.get(uri, params=params, auth=(user, password))
return r.json()
# Insert C.io email_bounced record atomically.
def insert_record(message):
query = text(''.join(("INSERT INTO cio.email_bounced_backfill(email_id, "
"customer_id, email_address, template_id, subject, "
"timestamp) VALUES (:email_id, :customer_id, "
":email_address, :template_id, :subject, "
"to_timestamp(:timestamp)) ON CONFLICT (email_id, "
" email_address, timestamp) DO NOTHING")))
record = {
'email_id': message['id'],
'customer_id': message['customer_id'],
'email_address': message['recipient'],
'template_id': message['msg_template_id'],
'subject': message['subject'],
'timestamp': message['metrics']['sent']
}
conn.execute(query, **record)
log('Message ID {} processed.'.format(message['id']))
# Get next page location.
def get_bookmark():
s = "SELECT * FROM cio.bounced_backfill_track"
result = conn.execute(s)
return result.fetchall()
# Keep track of next page location.
def insert_bookmark(next_page):
query = text(''.join(("INSERT INTO cio.bounced_backfill_track(next_page) "
"VALUES (:next_page)")))
conn.execute("TRUNCATE cio.bounced_backfill_track")
conn.execute(query, next_page=next_page)
def main():
# Check if this is start of the run. If not, resume from last page.
if get_bookmark() is None:
page = get_page()
else:
page = get_page(next_page=get_bookmark())
insert_bookmark(page['next'])
# While there is a page of results, continue processing.
while page:
with PoolExecutor(max_workers=int(os.getenv('POOL_SIZE'))) as executor:
for _ in executor.map(insert_record, page['messages']):
pass
page = get_page(next_page=get_bookmark())
insert_bookmark(page['next'])
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,736
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/northstar_to_user_table.py
|
from datetime import datetime as dt
import os
import sys
import time
import json
from .northstar_scraper import NorthstarScraper
from .sa_database import Database
from .utils import Duration, validate_date
"""DS Northstar to Quasar User ETL script.
This ETL scripts scrapes the DoSomething Thor Northstar User API and ETL's the
output to our MySQL Quasar data warehouse.
The script takes an optional argument for what Northstar page result to start
on. This is mostly used to backfill from a certain page, or from the dawn
of time. Otherwise, pagination is stored in an small status tracking table
that gets updated on ingestion loop.
"""
db = Database()
scraper = NorthstarScraper(os.environ.get('NS_URI'))
def _undict_value(value):
if isinstance(value, dict):
return value['value']
else:
return value
# Returns a proper NULL value when the API returns 'null' string
def _null_value(value):
if json.dumps(value) == 'null':
return None
else:
return json.dumps(value)
def _save_user(user):
record = {
'id': user['id'],
'first_name': user['first_name'],
'last_name': user['last_name'],
'last_initial': user['last_initial'],
'photo': user['photo'],
'email': user['email'],
'mobile': user['mobile'],
'facebook_id': user['facebook_id'],
'interests': user['interests'],
'birthdate': validate_date(user['birthdate']),
'addr_street1': _undict_value(user['addr_street1']),
'addr_street2': _undict_value(user['addr_street2']),
'addr_city': _undict_value(user['addr_city']),
'addr_state': _undict_value(user['addr_state']),
'addr_zip': _undict_value(user['addr_zip']),
'source': user['source'],
'source_detail': user['source_detail'],
'slack_id': user['slack_id'],
'sms_status': user['sms_status'],
'sms_paused': user['sms_paused'],
'voter_registration_status': user['voter_registration_status'],
'language': user['language'],
'country': user['country'],
'role': user['role'],
'last_accessed_at': user['last_accessed_at'],
'last_authenticated_at': user['last_authenticated_at'],
'last_messaged_at': user['last_messaged_at'],
'updated_at': user['updated_at'],
'created_at': user['created_at'],
'email_subscription_status': user['email_subscription_status'],
'feature_flags': _null_value(user['feature_flags']),
'school_id': user['school_id']
}
query = ''.join(("INSERT INTO northstar.users (id, "
"first_name, last_name, last_initial, "
"photo, email, mobile, facebook_id, "
"interests, birthdate, addr_street1, "
"addr_street2, addr_city, addr_state, "
"addr_zip, source, "
"source_detail, slack_id, sms_status, "
"sms_paused, voter_registration_status, "
"language, country, "
"role, last_accessed_at, "
"last_authenticated_at, "
"last_messaged_at, updated_at,"
"created_at, email_subscription_status, feature_flags,"
"school_id) VALUES (:id,:first_name,:last_name,"
":last_initial,:photo,:email,:mobile,:facebook_id,"
":interests,:birthdate,:addr_street1,:addr_street2,"
":addr_city,:addr_state,:addr_zip,"
":source,:source_detail,"
":slack_id,:sms_status,:sms_paused,"
":voter_registration_status,:language,:country,"
":role,:last_accessed_at,"
":last_authenticated_at,:last_messaged_at,:updated_at,"
":created_at,:email_subscription_status, "
":feature_flags, :school_id) "
"ON CONFLICT (id, created_at, updated_at) "
"DO UPDATE SET "
"email_subscription_status = :email_subscription_status"
""))
db.query_str(query, record)
def _interval(hours_ago):
# Return list of ISO8601 formatted timestamps
# from hours_ago in format (hours_ago, hours_ago-1).
def _format(hr):
# Get ISO8601 formatted time from 'hr' hours ago.
_time = int(time.time()) - (int(hr) * 3600)
formatted = dt.fromtimestamp(_time).isoformat()
return formatted
start = _format(hours_ago)
end = _format(hours_ago - 1)
return (start, end)
def _process_page(results):
users = results
for user in users['data']:
_save_user(user)
def _backfill(hours_ago):
duration = Duration()
# Get list of 1 hour chunks for total backfill hours_ago.
intervals = [_interval(hour) for hour in
range(int(hours_ago) + 1) if hour > 0]
# Backfill from most recent going backwards.
intervals.reverse()
for start, end in intervals:
params = {'after[updated_at]': str(start),
'before[updated_at]': str(end),
'pagination': 'cursor'}
# Set page param and next page to true assuming at least
# one page of results exist.
i = 1
params['page'] = i
path = '/v2/users'
next = True
while next is True:
response = scraper.get(path, params).json()
_process_page(response)
if response['meta']['cursor']['next'] is None:
next = False
else:
i += 1
params['page'] = i
db.disconnect()
duration.duration()
def backfill():
_backfill(sys.argv[1])
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,737
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/campaign_info.py
|
import os
from .sa_database import Database
from .sql_utils import run_sql_file
from .utils import Duration
data = {
'campaign_info_all': os.getenv('CAMPAIGN_INFO_ALL'),
'field_data_field_campaigns': os.getenv('FIELD_DATA_FIELD_CAMPAIGNS'),
'node': os.getenv('NODE'),
'field_data_field_campaign_type': os.getenv(''.join(('FIELD_DATA_FIELD_'
'CAMPAIGN_TYPE'))),
'field_data_field_run_date': os.getenv('FIELD_DATA_FIELD_RUN_DATE'),
'field_data_field_call_to_action': os.getenv(''.join(('FIELD_DATA_FIELD_'
'CALL_TO_ACTION'))),
'field_data_field_reportback_noun': os.getenv(''.join(('FIELD_DATA_FIELD_'
'REPORTBACK_'
'NOUN'))),
'field_data_field_reportback_verb': os.getenv(''.join(('FIELD_DATA_FIELD_'
'REPORTBACK_'
'VERB'))),
'field_data_field_action_type': os.getenv('FIELD_DATA_FIELD_ACTION_TYPE'),
'taxonomy_term_data': os.getenv('TAXONOMY_TERM_DATA'),
'field_data_field_cause': os.getenv('FIELD_DATA_FIELD_CAUSE'),
'campaign_info': os.getenv('CAMPAIGN_INFO'),
'campaigns': os.getenv('CAMPAIGNS'),
'campaign_info_international': os.getenv('CAMPAIGN_INFO_INTERNATIONAL')
}
def create():
"""(Re)create materialized views: campaign_info_all, campaign_info,
campaign_info_international.
"""
duration = Duration()
run_sql_file('./data/sql/derived-tables/campaign_info.sql', data)
duration.duration()
def refresh():
db = Database()
duration = Duration()
# Setting statement for schema diffs of campaign_info_all
campaign_all = "REFRESH MATERIALIZED VIEW " + data['campaign_info_all']
db.query(campaign_all)
db.query('REFRESH MATERIALIZED VIEW public.campaign_info')
db.query('REFRESH MATERIALIZED VIEW public.campaign_info_international')
db.disconnect()
duration.duration()
if __name__ == "__create__":
create()
if __name__ == "__refresh__":
refresh()
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,738
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/cio_queue.py
|
import json
import os
import pydash
import sys
from .sa_database import Database
from .queue import QuasarQueue
from .utils import log, logerr
class CioQueue(QuasarQueue):
def __init__(self):
self.amqp_uri = os.environ.get('AMQP_URI')
self.blink_queue = os.environ.get('BLINK_QUEUE')
self.blink_exchange = os.environ.get('BLINK_EXCHANGE')
super().__init__(self.amqp_uri, self.blink_queue,
self.blink_exchange)
self.db = Database()
# Save customer sub data and dates.
def _add_sub_event(self, data):
record = {
'email_id': data['data']['email_id'],
'customer_id': data['data']['customer_id'],
'email_address': data['data']['email_address'],
'event_id': data['event_id'],
'timestamp': data['timestamp'],
'event_type': data['event_type']
}
query = ''.join(("INSERT INTO cio.customer_event_scratch "
"(email_id, customer_id, email_address, "
"event_id, timestamp, "
"event_type) VALUES (:email_id,"
":customer_id,:email_address,:event_id,"
"to_timestamp(:timestamp),:event_type)"))
self.db.query_str(query, record)
return data['event_id']
# Save customer unsub data and dates.
def _add_unsub_event(self, data):
if pydash.get(data, 'template_id'):
record = {
'email_id': data['data']['email_id'],
'customer_id': data['data']['customer_id'],
'email_address': data['data']['email_address'],
'template_id': data['data']['template_id'],
'event_id': data['event_id'],
'timestamp': data['timestamp'],
'event_type': data['event_type']
}
query = ''.join(("INSERT INTO cio.customer_event_scratch "
"(email_id, customer_id,"
"email_address, template_id, event_id,"
"timestamp, event_type) "
"VALUES (:email_id,:customer_id,"
":email_address,:template_id,:event_id,"
"to_timestamp(:timestamp),:event_type)"))
self.db.query_str(query, record)
else:
record = {
'email_id': data['data']['email_id'],
'customer_id': data['data']['customer_id'],
'email_address': data['data']['email_address'],
'event_id': data['event_id'],
'timestamp': data['timestamp'],
'event_type': data['event_type']
}
query = ''.join(("INSERT INTO cio.customer_event_scratch "
"(email_id, customer_id,"
"email_address, event_id, "
"timestamp, event_type) "
"VALUES (:email_id,:customer_id,"
":email_address,:event_id,"
"to_timestamp(:timestamp),:event_type)"))
self.db.query_str(query, record)
log(''.join(("Added customer event from "
"C.IO event id {}.")).format(data['event_id']))
# Save email event data and dates, e.g. email_click.
def _add_email_event(self, data):
record = {
'email_id': data['data']['email_id'],
'customer_id': data['data']['customer_id'],
'email_address': data['data']['email_address'],
'template_id': data['data']['template_id'],
'event_id': data['event_id'],
'timestamp': data['timestamp'],
'event_type': data['event_type']
}
query = ''.join(("INSERT INTO cio.email_event_scratch "
"(email_id, customer_id, email_address, "
"template_id, event_id, timestamp, "
"event_type) VALUES "
"(:email_id,:customer_id,:email_address,"
":template_id,:event_id,"
"to_timestamp(:timestamp),:event_type)"))
self.db.query_str(query, record)
log(''.join(("Added email event from "
"C.IO event id {}.")).format(data['event_id']))
# Save email sent event.
def _add_email_sent_event(self, data):
record = {
'email_id': data['data']['email_id'],
'customer_id': data['data']['customer_id'],
'email_address': data['data']['email_address'],
'template_id': data['data']['template_id'],
'subject': data['data']['subject'],
'event_id': data['event_id'],
'timestamp': data['timestamp']
}
query = ''.join(("INSERT INTO cio.email_sent_scratch "
"(email_id, customer_id, email_address, "
"template_id, subject, event_id, "
"timestamp) VALUES "
"(:email_id,:customer_id,:email_address,"
":template_id,:subject,:event_id,"
"to_timestamp(:timestamp))"))
self.db.query_str(query, record)
log(''.join(("Added email event from "
"C.IO event id {}.")).format(data['event_id']))
# Save email event data and dates, e.g. email_click.
def _add_email_click_event(self, data):
record = {
'email_id': data['data']['email_id'],
'customer_id': data['data']['customer_id'],
'email_address': data['data']['email_address'],
'template_id': data['data']['template_id'],
'subject': data['data']['subject'],
'href': data['data']['href'],
'link_id': data['data']['link_id'],
'event_id': data['event_id'],
'timestamp': data['timestamp'],
'event_type': data['event_type']
}
query = ''.join(("INSERT INTO cio.email_event_scratch "
"(email_id, customer_id, email_address, "
"template_id, subject, href, link_id, "
"event_id, timestamp, "
"event_type) VALUES "
"(:email_id,:customer_id,:email_address,"
":template_id,:subject,:href,:link_id,"
":event_id,to_timestamp(:timestamp),"
":event_type)"))
self.db.query_str(query, record)
log(''.join(("Added email event from "
"C.IO event id {}.")).format(data['event_id']))
# Save email bounced event.
def _add_email_bounced_event(self, data):
record = {
'email_id': data['data']['email_id'],
'customer_id': data['data']['customer_id'],
'email_address': data['data']['email_address'],
'template_id': data['data']['template_id'],
'subject': data['data']['subject'],
'event_id': data['event_id'],
'timestamp': data['timestamp']
}
query = ''.join(("INSERT INTO cio.email_bounced_scratch "
"(email_id, customer_id, email_address, "
"template_id, subject, event_id, "
"timestamp) VALUES "
"(:email_id,:customer_id,:email_address,"
":template_id,:subject,:event_id,"
"to_timestamp(:timestamp))"))
self.db.query_str(query, record)
log(''.join(("Added email bounced event from "
"C.IO event id {}.")).format(data['event_id']))
def process_message(self, message_data):
if pydash.get(message_data, 'data.meta.message_source') == 'rogue':
message_id = pydash.get(message_data, 'data.data.id')
log("Ack'ing Rogue message id {}".format(message_id))
else:
data = message_data['data']
event_type = pydash.get(data, 'event_type')
# Set for checking email event types.
email_event = {
'email_bounced',
'email_converted',
'email_opened',
'email_unsubscribed'
}
try:
if event_type == 'customer_subscribed':
self._add_sub_event(data)
elif event_type == 'customer_unsubscribed':
self._add_unsub_event(data)
elif event_type == 'email_clicked':
self._add_email_click_event(data)
elif event_type == 'email_sent':
self._add_email_sent_event(data)
elif event_type == 'email_bounced':
self._add_email_bounced_event(data)
elif event_type in email_event:
self._add_email_event(data)
else:
pass
except KeyError as e:
logerr("C.IO message missing {}".format(e))
except:
logerr("Something went wrong with C.IO consumer!")
sys.exit(1)
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,739
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/mel.py
|
from .sql_utils import run_sql_file_raw, refresh_materialized_view
def create():
run_sql_file_raw('./data/sql/derived-tables/mel.sql')
def create_for_dbt_validation():
run_sql_file_raw('./data/sql/derived-tables/mel_dbt_validation.sql')
def refresh():
refresh_materialized_view('public.member_event_log')
if __name__ == "__create__":
create()
if __name__ == "__refresh__":
refresh()
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,740
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/ghost_killer.py
|
from .database import Database
import time
db = Database()
def remove_ghost_posts():
# Get list of all posts to remove.
posts = db.query(''.join(("SELECT DISTINCT p.id "
"FROM rogue.signups s "
"INNER JOIN (SELECT g.id "
"FROM rogue.signups g WHERE "
"g.why_participated = "
"'why_participated_ghost_test') ghost "
"ON s.id = ghost.id "
"INNER JOIN rogue.posts p "
"ON p.signup_id = s.id")))
# Copy all posts into ghost posts table to remove from official counts.
for post in posts:
db.query_str(''.join(("INSERT INTO rogue.ghost_posts SELECT * FROM "
"rogue.posts p WHERE p.id = %s")),
(post,))
# Remove posts from posts table.
for post in posts:
db.query_str(''.join(("DELETE FROM rogue.posts p WHERE "
"p.id = %s")),
(post,))
def remove_ghost_signups():
# Get list of all signups to remove.
signups = db.query(''.join(("SELECT DISTINCT su.id FROM "
"rogue.signups su INNER JOIN "
"(SELECT DISTINCT s.id FROM rogue.signups s "
"WHERE s.why_participated = "
"'why_participated_ghost_test') ghost_ids "
"ON ghost_ids.id = su.id")))
# Copy all signups into ghost signups table to remove from official counts.
for signup in signups:
db.query_str(''.join(("INSERT INTO rogue.ghost_signups SELECT * FROM "
"rogue.signups s WHERE s.id = %s")),
(signup,))
# Remove signups from signups table.
for signup in signups:
db.query_str(''.join(("DELETE FROM rogue.signups s WHERE "
"s.id = %s")),
(signup,))
def main():
start_time = time.time()
"""Keep track of start time of script."""
remove_ghost_posts()
remove_ghost_signups()
db.disconnect()
end_time = time.time() # Record when script stopped running.
duration = end_time - start_time # Total duration in seconds.
print('duration: ', duration)
if __name__ == "__main__":
main()
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,741
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/create_post_actions.py
|
import os
from .sql_utils import run_sql_file
data = {'ft_rogue_actions': os.getenv('FT_ROGUE_ACTIONS')}
def main():
run_sql_file('./data/sql/derived-tables/post_actions.sql', data)
if __name__ == '__main__':
main()
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,742
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/northstar_scraper.py
|
from oauthlib.oauth2 import BackendApplicationClient
import os
from requests_oauthlib import OAuth2Session
from .scraper import Scraper
class NorthstarScraper(Scraper):
def __init__(self, url):
Scraper.__init__(self, url, params={
'limit': 100, 'pagination': 'cursor',
'include': ''.join(("last_name,email,mobile,"
"birthdate,addr_street1,"
"addr_street2,age,school_id"))})
self.auth_headers = self.fetch_auth_headers()
def fetch_auth_headers(self):
oauth = OAuth2Session(client=BackendApplicationClient(
client_id=os.environ.get('NS_CLIENT_ID')))
scopes = ['admin', 'user']
ns_client_id = os.environ.get('NS_CLIENT_ID')
ns_client_secret = os.environ.get('NS_CLIENT_SECRET')
new_token = oauth.fetch_token(self.url + '/v2/auth/token',
client_id=ns_client_id,
client_secret=ns_client_secret,
scope=scopes)
return {'Authorization': 'Bearer ' + str(new_token['access_token'])}
def authenticated(func):
def _authenticated(self, *args, **kwargs):
response = func(self, *args, **kwargs)
if response.status_code == 401:
self.auth_headers = self.fetch_auth_headers()
response = func(self, *args, **kwargs)
return response
return _authenticated
@authenticated
def get(self, path, query_params=''):
return super().get(path, headers=self.auth_headers,
params=query_params)
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,743
|
blisteringherb/quasar
|
refs/heads/master
|
/setup.py
|
from setuptools import setup, find_packages
from pipenv.project import Project
from pipenv.utils import convert_deps_to_pip
pfile = Project(chdir=False).parsed_pipfile
requirements = convert_deps_to_pip(pfile['packages'], r=False)
setup(
name="quasar",
version="2019.11.14.0",
packages=find_packages(),
install_requires=requirements,
entry_points={
'console_scripts': [
'bertly_refresh = quasar.bertly:refresh',
'campaign_info_recreate = quasar.campaign_info:create',
'campaign_info_refresh = quasar.campaign_info:refresh',
'cio_consume = quasar.cio_consumer:main',
'cio_import = quasar.cio_import_scratch_records:cio_import',
'cio_bounced_backfill = quasar.cio_bounced_backfill:main',
'cio_sent_backfill = quasar.cio_sent_backfill:main',
'etl_monitoring = quasar.etl_monitoring:run_monitoring',
'gdpr = quasar.gdpr_comply:gdpr_from_file',
'mel_create = quasar.mel:create',
'mel_create_for_dbt_validation = quasar.mel:create_for_dbt_validation',
'mel_refresh = quasar.mel:refresh',
'northstar_backfill = quasar.northstar_to_user_table:backfill',
'northstar_full_backfill = quasar.northstar_to_user_table_full_backfill:backfill',
'post_actions_create = quasar.create_post_actions:main',
'rogue_ghost_killer = quasar.ghost_killer:main',
'users_create = quasar.users:create',
'users_refresh = quasar.users:refresh',
'user_activity_create = quasar.user_activity:create',
'user_activity_create_for_dbt_validation = quasar.user_activity:create_for_dbt_validation',
'user_activity_refresh = quasar.user_activity:refresh'
],
},
author="",
author_email="",
description="",
license="MIT",
keywords=[],
url="",
classifiers=[
],
)
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,744
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/cio_import_scratch_records.py
|
from .sa_database import Database
from .utils import Duration, log
db = Database()
duration = Duration()
def import_records_event(table):
# Import records from cio staging tables that are populated
# by the cio consumer into primary queried tables that have
# event_type based primary key.
scratch = table + '_scratch'
query = ''.join(("INSERT INTO {} SELECT * FROM {} "
"ON CONFLICT (email_id, customer_id, timestamp, "
"event_type) DO NOTHING"
"")).format(table, scratch)
db.query(query)
def import_records(table):
# Import records from cio staging tables that are populated
# by the cio consumer into primary queried tables.
scratch = table + '_scratch'
query = ''.join(("INSERT INTO {} SELECT * FROM {} "
"ON CONFLICT (email_id, customer_id, timestamp) "
"DO NOTHING"
"")).format(table, scratch)
db.query(query)
def truncate_scratch(table):
# Truncate staging tables so consumer can resume updating
# tables after ingestion.
scratch = table + '_scratch'
query = "TRUNCATE TABLE {}".format(scratch)
db.query(query)
def cio_import():
# List of cio tables to process.
tables = ['cio.email_sent', 'cio.email_bounced']
event_tables = ['cio.customer_event', 'cio.email_event']
for table in tables:
log("Importing records for table {}.".format(table))
import_records(table)
scratch = table + '_scratch'
log("Truncating table {}.".format(scratch))
truncate_scratch(table)
for table in event_tables:
log("Importing records for table {}.".format(table))
import_records_event(table)
scratch = table + '_scratch'
log("Truncating table {}.".format(scratch))
truncate_scratch(table)
db.disconnect()
duration.duration()
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,745
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/database.py
|
import json
import os
import psycopg2
from .utils import QuasarException, logerr
# Psycopg2 vars.
opts = {
'user': os.environ.get('PG_USER'),
'host': os.environ.get('PG_HOST'),
'port': os.environ.get('PG_PORT'),
'password': os.environ.get('PG_PASSWORD'),
'database': os.environ.get('PG_DATABASE'),
'sslmode': os.environ.get('PG_SSL')
}
def _connect(opts):
conn = None
try:
conn = psycopg2.connect(**opts)
except psycopg2.InterfaceError as e:
raise QuasarException(e)
finally:
return conn
class Database:
def __init__(self, options={}):
opts.update(options)
self.connect()
def connect(self):
self.connection = _connect(opts)
if self.connection is None:
print("Error, couldn't connect to database with options:", opts)
else:
self.cursor = self.connection.cursor()
def disconnect(self):
self.cursor.close()
self.connection.close()
return self.connection
def roll_reconnect(self):
self.connection.rollback()
self.disconnect()
self.connect()
def query(self, query):
"""Parse and run DB query.
Return On error, raise exception and log why.
"""
try:
self.cursor.execute(query)
self.connection.commit()
try:
results = self.cursor.fetchall()
return results
except psycopg2.ProgrammingError:
results = {}
return results
except psycopg2.DatabaseError as e:
print(self.cursor.query)
raise QuasarException(e)
def query_str(self, query, string):
"""Parse and run DB query.
Return On error, raise exception and log why.
"""
try:
self.cursor.execute(query, string)
self.connection.commit()
try:
results = self.cursor.fetchall()
return results
except psycopg2.ProgrammingError:
results = {}
return results
except psycopg2.DatabaseError as e:
print(self.cursor.query)
raise QuasarException(e)
def query_str_rogue(self, query, string, record,
event_id=None):
"""Parse and run DB query, on failure backup data.
On query failure, assuming a single column table with data type jsonb,
with column name "record", backup entire JSON record.
Optional event_id for logging provided.
"""
try:
self.cursor.execute(query, string)
self.connection.commit()
try:
results = self.cursor.fetchall()
return results
except psycopg2.ProgrammingError:
results = {}
return results
except psycopg2.DatabaseError:
logerr("The query: {} FAILED!".format(self.cursor.query))
self.disconnect()
self.connect()
logerr("Backing up message {}.".format(event_id))
self.cursor.execute(''.join(("INSERT INTO "
"rogue.error_message VALUES (%s)")),
(json.dumps(record),))
class NorthstarDatabase(Database):
def __init__(self, options={}):
super().__init__(options)
def query(self, query, record):
"""Parse and run DB query.
Return On error, raise exception and log why.
"""
try:
self.cursor.execute(query)
self.connection.commit()
try:
results = self.cursor.fetchall()
return results
except psycopg2.ProgrammingError:
results = {}
return results
except psycopg2.DatabaseError:
print(self.cursor.query)
self.connection = _connect(opts)
if self.connection is None:
print("Error, couldn't connect to database with opts:", opts)
else:
self.cursor = self.connection.cursor()
self.cursor.execute(''.join(("INSERT INTO "
"northstar.unprocessed_users "
"(northstar_record) VALUES "
"(%s)")), (json.dumps(record),))
self.connection.commit()
print("ID {} not processed. Backing up.".format(record['id']))
def query_str(self, query, string, record):
"""Parse and run DB query.
Return On error, raise exception and log why.
"""
try:
self.cursor.execute(query, string)
self.connection.commit()
try:
results = self.cursor.fetchall()
return results
except psycopg2.ProgrammingError:
results = {}
return results
except psycopg2.DatabaseError:
print(self.cursor.query)
self.connection = _connect(opts)
if self.connection is None:
print("Error, couldn't connect to database with opts:", opts)
else:
self.cursor = self.connection.cursor()
self.cursor.execute(''.join(("INSERT INTO "
"northstar.unprocessed_users "
"(northstar_record) VALUES "
"(%s)")), (json.dumps(record),))
self.connection.commit()
print("ID {} not processed. Backing up.".format(record['id']))
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,746
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/user_activity.py
|
from .sql_utils import run_sql_file_raw, refresh_materialized_view
def create():
run_sql_file_raw('./data/sql/derived-tables/user_activity.sql')
def create_for_dbt_validation():
run_sql_file_raw(''.join(("./data/sql/derived-tables/"
"user_activity_dbt_validation.sql")))
def refresh():
refresh_materialized_view('public.user_activity')
if __name__ == "__create__":
create()
if __name__ == "__refresh__":
refresh()
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,747
|
blisteringherb/quasar
|
refs/heads/master
|
/quasar/gdpr_comply.py
|
import csv
import os
import sys
from .sql_utils import run_sql_file
from .utils import log
def remove_northstar(nsid):
# Removes Northstar user data for GDPR compliance.
data = {'users': os.getenv('NORTHSTAR_USERS'), 'nsid': nsid}
run_sql_file('./data/sql/misc/gdpr_northstar_removal.sql', data)
def remove_cio(nsid):
# Removes CIO user data for GDPR compliance.
data = {'customer_event': os.getenv('CIO_CUSTOMER_EVENT'),
'email_bounced': os.getenv('CIO_EMAIL_BOUNCED'),
'email_event': os.getenv('CIO_EMAIL_EVENT'),
'email_sent': os.getenv('CIO_EMAIL_SENT'),
'event_log': os.getenv('CIO_EVENT_LOG'),
'nsid': nsid}
run_sql_file('./data/sql/misc/gdpr_cio_removal.sql', data)
def gdpr_from_file():
with open(sys.argv[1]) as csvfile:
ids = csv.reader(csvfile, delimiter=',')
for id in ids:
# First line might contain "id" as column name
if id[0] == 'id':
pass
else:
log("Removing Northstar ID {}".format(id))
remove_northstar(id[0])
remove_cio(id[0])
|
{"/quasar/northstar_to_user_table.py": ["/quasar/northstar_scraper.py"], "/quasar/ghost_killer.py": ["/quasar/database.py"]}
|
5,750
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/keiba_machine_learning/netkeiba/models.py
|
import os
from typing import IO
from keiba_machine_learning.models import Race as Base
from keiba_machine_learning.netkeiba.constants import DATABASE_PAGE_BASE_URL, RACE_DATA_DIR, ENCODING_OF_WEB_PAGE
class Race(Base):
def __hash__(self) -> int:
return int(f'{self.year}{self.race_track.value:02d}{self.series_number:02d}{self.day_number:02d}{self.race_number:02d}')
@property
def id(self) -> int:
return self.__hash__()
@property
def url(self) -> str:
"""
以下のようなnetkeibaでのレース結果ページを返す
https://db.netkeiba.com/race/201901010101
Returns:
str: netkeibaでのレース結果ページのURL
"""
return '/'.join([str(url_parts)
for url_parts in [DATABASE_PAGE_BASE_URL, "race", self.id]])
@property
def file_path(self) -> str:
return os.path.join(RACE_DATA_DIR, f'{self.id}.html')
@property
def file(self) -> IO:
return open(self.file_path, mode='r', encoding=ENCODING_OF_WEB_PAGE)
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,751
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/scripts/netkeiba/download_race_pages.py
|
"""netkeibaからレースファイルをダウンロードするスクリプト
例えば以下のURLのようなものがレースファイルである
https://db.netkeiba.com/race/201901010101
このスクリプトでは指定された年のレースのファイルを全てダウンロードする
netkeiba側の仕様でレースデータが存在しないページにアクセスしても404をHTTPステータスコードとしてレスポンスしないので、
ここでは内容を気にせず保存を行う
(データが存在しないことによる異常の処理はパーサーの責務とする)
Args:
year (int): コマンドライン引数としてダウンロード対象とするレースが開催された年を指定する
Examples:
※ 実行時はパスを通すこと
※ 以下はコマンドライン上にて
source venv/bin/activate
export PYTHONPATH=".:$PYTHONPATH"
python scripts/netkeiba/download_race_pages.py 2019
"""
import os
import time
import sys
import urllib.request
from tqdm import tqdm
from keiba_machine_learning.netkeiba.constants import RACE_DATA_DIR
from keiba_machine_learning.models import RaceTrac
from keiba_machine_learning.netkeiba.models import Race
os.makedirs(RACE_DATA_DIR, exist_ok=True)
args = sys.argv
YEAR = int(args[1])
races = []
for race_track in RaceTrac:
for series_number in range(1, Race.MAX_SERIES_NUMBER + 1):
for day_number in range(1, Race.MAX_DAY_NUMBER + 1):
for race_number in range(1, Race.MAX_RACE_NUMBER + 1):
races.append(Race(year=YEAR, race_track=race_track,
series_number=series_number, day_number=day_number, race_number=race_number))
for race in tqdm(races):
if os.path.exists(race.file_path):
continue
else:
urllib.request.urlretrieve(race.url, race.file_path)
time.sleep(1)
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,752
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/keiba_machine_learning/types.py
|
from datetime import datetime
from typing import TypedDict
from keiba_machine_learning.models import RaceTrac, TrackKind, TrackDirection, TrackSurface, Weather, HorseGender
class RaceInformation(TypedDict):
"""レース情報をスクレイピングした結果として返すべき dict の構造を定義するクラス"""
title: str
race_track: RaceTrac
track_kind: TrackKind
track_direction: TrackDirection
race_distance_by_meter: int
track_surface: TrackSurface
weather: Weather
race_number: int
starts_at: datetime
class RaceRecord(TypedDict):
"""レース結果の1行として返すべき dict の構造を定義するクラス
各々の着に対応
例えば以下のページだと9頭立てなので1〜9着まであり、各々の着にこのdictが対応する
https://db.netkeiba.com/race/201901010101
> 1 1 1 ゴルコンダ 牡2 54 ルメール 1:48.3 ** 1-1-1-1 36.5 1.4 1 518(-16) [東]木村哲也 サンデーレーシング 500.0
上記のようなレース結果の表の1行を保持するデータ構造
※ 全項目を保持するわけではない
"""
# TODO 重要指標である「着差」を入れる
# アタマ、クビ、ハナ、n馬身など競馬特有の尺度をどう保持するのが適切なのかは一考する必要がある
order_of_placing: int # 着順
bracket_number: int # 枠番
horse_number: int # 馬番
horse_id: int
horse_name: str
horse_age: int
horse_gender: HorseGender
impost: float # 斤量
jockey_id: str # ※ "05203"のような0埋めで5桁が大半だが引退した騎手だと"z0004"みたいな変則的な書式も存在している
jockey_name: str
race_time: float # タイム
win_betting_ratio: float # 単勝倍率
favorite_order: int # 人気
horse_weight: float # 馬体重
weight_change: float # 体重変化
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,753
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/keiba_machine_learning/netkeiba/scrapers.py
|
import re
from datetime import datetime
from typing import IO, List
from bs4 import BeautifulSoup
from keiba_machine_learning.models import RaceTracFactory, TrackKindFactory, TrackDirectionFactory, TrackSurfaceFactory, WeatherFactory, HorseGenderFactory, TrackSurface
from keiba_machine_learning.types import RaceInformation, RaceRecord
# NOTE: バージョニングは必要に応じて行う
# 例えばスクレイピング先がリニューアルされてDOMががらりと変わってしまったらこのスクリプトは使えなくなる
# その場合、コード自体は残しておいてリニューアルされたサイトに対応するものは新しいバージョンで実装を行う
# その際に名前空間も切る
# (YAGNIの原則に則って今の段階では作らない)
class DataNotFound(Exception):
pass
class IncompatibleDataDetected(Exception):
pass
class RaceInformationScraper:
@staticmethod
def scrape(file: IO) -> RaceInformation:
"""
Args:
file (IO): netkeibaのレース結果ページのHTMLファイル
Returns:
RaceInformation
"""
soup = BeautifulSoup(file, 'html.parser')
# netkeiba側の仕様でレースデータが存在しないページにアクセスしても404をHTTPステータスコードとしてレスポンスしないので、
# データが存在しない掲載されていないファイルが渡ってくることも想定してここでデータがない場合の制御をする
race_result_table = soup.find('table', attrs={'summary': 'レース結果'})
if race_result_table is None:
raise DataNotFound
title_element = soup.select_one(
'#main > div > div > div > diary_snap > div > div > dl > dd > h1')
text_under_the_title = soup.select_one(
'#main > div > div > div > diary_snap > div > div > dl > dd > p > diary_snap_cut > span').get_text()
race_number_element = soup.select_one(
'#main > div > div > div > diary_snap > div > div > dl > dt')
if s := re.search(r'(\d{4})m', text_under_the_title):
race_distance_by_meter = int(s.group(1))
else:
raise ValueError("can't parse race distance.")
if s := re.search(r'\d+', race_number_element.get_text()):
race_number = int(s.group())
else:
raise ValueError("can't parse race number.")
race_track_name = soup.select_one(
'#main > div > div > div > ul > li > a.active').get_text()
track_kind_mark = text_under_the_title[0]
if track_kind_mark == '障':
raise IncompatibleDataDetected
track_direction_mark = text_under_the_title[1]
if track_direction_mark == '直':
raise IncompatibleDataDetected
if s := re.search(r'天候 : (\w+)', text_under_the_title):
weather_mark = s.group(1)
else:
raise ValueError("can't parse weather.")
return {
'title': title_element.get_text(),
'race_track': RaceTracFactory.create(race_track_name),
'track_kind': TrackKindFactory.create(track_kind_mark),
'track_direction': TrackDirectionFactory.create(track_direction_mark),
'race_distance_by_meter': race_distance_by_meter,
'track_surface': TrackSurfaceParser.parse(text_under_the_title),
'weather': WeatherFactory.create(weather_mark),
'race_number': race_number,
'starts_at': datetime(2019, 7, 27, 9, 50),
}
class RaceResultScraper:
@staticmethod
def scrape(file: IO) -> List[RaceRecord]:
"""
Args:
file (IO): netkeibaのレース結果ページのHTMLファイル
Returns:
List[RaceRecord]
"""
soup = BeautifulSoup(file, 'html.parser')
race_result_table = soup.find('table', attrs={'summary': 'レース結果'})
if race_result_table is None:
raise DataNotFound
race_result_table_rows = race_result_table.find_all('tr')
race_records = []
# 最初の要素は項目行(header)なのでスキップ
for row in race_result_table_rows[1:]:
cells = row.find_all('td')
try:
order_of_placing = int(cells[0].get_text())
except ValueError:
# 正常な結果として入ってくる自然数以外に
# 2(降) 、中、除、取 などが入ってくる
# 最初のものは降着とわかるが、それ以外のものはまだ意味がわかってないのでいったん記録しない
continue
bracket_number = int(cells[1].get_text())
horse_number = int(cells[2].get_text())
if s := re.search(r'horse/(\d+)', cells[3].find('a')['href']):
horse_id = int(s.group(1))
else:
raise ValueError("can't parse horse id.")
horse_name = cells[3].get_text().strip()
horse_age = int(cells[4].get_text()[1])
horse_gender = HorseGenderFactory.create(cells[4].get_text()[0])
impost = float(cells[5].get_text())
if s := re.search(r'jockey/(\d+)', cells[6].find('a')['href']):
jockey_id = s.group(1)
else:
raise ValueError("can't parse jockey id.")
jockey_name = cells[6].get_text().strip()
if f := re.findall(r'^(\d{1}):(\d{2})\.(\d{1})', cells[7].get_text()):
minute, second, split_second = [
int(time_data) for time_data in f[0]]
race_time = (minute * 60) + second + (split_second * 0.1)
else:
raise ValueError("can't parse jockey id.")
win_betting_ratio = float(cells[12].get_text())
favorite_order = int(cells[13].get_text())
if f := re.findall(r'(\d{3})\(([+-]?\d{1,2})\)', cells[14].get_text()):
horse_weight, weight_change = [
int(weight_data) for weight_data in f[0]]
else:
raise ValueError("can't parse weight data.")
race_record: RaceRecord = {
'order_of_placing': order_of_placing,
'bracket_number': bracket_number,
'horse_number': horse_number,
'horse_id': horse_id,
'horse_name': horse_name,
'horse_age': horse_age,
'horse_gender': horse_gender,
'impost': impost,
'jockey_id': jockey_id,
'jockey_name': jockey_name,
'race_time': race_time,
'win_betting_ratio': win_betting_ratio,
'favorite_order': favorite_order,
'horse_weight': horse_weight,
'weight_change': weight_change,
}
race_records.append(race_record)
return race_records
class TrackSurfaceParser:
@staticmethod
def parse(text: str) -> TrackSurface:
"""
Args:
text (str): レース情報のテキスト
例:
ダ左1200m / 天候 : 曇 / ダート : 良 / 発走 : 13:10
Returns:
TrackSurface
"""
if s := re.search(r'(芝|ダート) : (\w+)', text):
return TrackSurfaceFactory.create(s.group(2))
else:
raise ValueError("can't parse track surface.")
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,754
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py
|
import pytest
import os
from keiba_machine_learning.models import HorseGender
from keiba_machine_learning.netkeiba.scrapers import RaceResultScraper, DataNotFound
from keiba_machine_learning.netkeiba.constants import ENCODING_OF_WEB_PAGE
base_path = os.path.dirname(os.path.abspath(__file__))
def test_to_scrape_general_race_result():
file_path = os.path.normpath(os.path.join(
base_path, "./fixtures/201901010101.html"))
with open(file_path, mode="r", encoding=ENCODING_OF_WEB_PAGE) as file:
expect_data = [
{
'order_of_placing': 1,
'bracket_number': 1,
'horse_number': 1,
'horse_id': 2017105318,
'horse_name': 'ゴルコンダ',
'horse_age': 2,
'horse_gender': HorseGender.MALE,
'impost': 54,
'jockey_id': '05339',
'jockey_name': 'ルメール',
'race_time': 108.3,
'win_betting_ratio': 1.4,
'favorite_order': 1,
'horse_weight': 518,
'weight_change': -16,
},
{
'order_of_placing': 2,
'bracket_number': 3,
'horse_number': 3,
'horse_id': 2017104612,
'horse_name': 'プントファイヤー',
'horse_age': 2,
'horse_gender': HorseGender.MALE,
'impost': 54,
'jockey_id': '05203',
'jockey_name': '岩田康誠',
'race_time': 110.1,
'win_betting_ratio': 3.5,
'favorite_order': 2,
'horse_weight': 496,
'weight_change': -8,
},
{
'order_of_placing': 3,
'bracket_number': 4,
'horse_number': 4,
'horse_id': 2017103879,
'horse_name': 'ラグリマスネグラス',
'horse_age': 2,
'horse_gender': HorseGender.MALE,
'impost': 51,
'jockey_id': '01180',
'jockey_name': '団野大成',
'race_time': 110.9,
'win_betting_ratio': 46.6,
'favorite_order': 6,
'horse_weight': 546,
'weight_change': 6,
},
{
'order_of_placing': 4,
'bracket_number': 8,
'horse_number': 9,
'horse_id': 2017106259,
'horse_name': 'キタノコドウ',
'horse_age': 2,
'horse_gender': HorseGender.MALE,
'impost': 51,
'jockey_id': '01179',
'jockey_name': '菅原明良',
'race_time': 111.5,
'win_betting_ratio': 56.8,
'favorite_order': 7,
'horse_weight': 458,
'weight_change': -8,
},
{
'order_of_placing': 5,
'bracket_number': 5,
'horse_number': 5,
'horse_id': 2017104140,
'horse_name': 'ネモフィラブルー',
'horse_age': 2,
'horse_gender': HorseGender.MALE,
'impost': 54,
'jockey_id': '01062',
'jockey_name': '川島信二',
'race_time': 111.7,
'win_betting_ratio': 140.3,
'favorite_order': 9,
'horse_weight': 436,
'weight_change': 0,
},
{
'order_of_placing': 6,
'bracket_number': 8,
'horse_number': 8,
'horse_id': 2017101930,
'horse_name': 'マイネルラクスマン',
'horse_age': 2,
'horse_gender': HorseGender.MALE,
'impost': 54,
'jockey_id': '01091',
'jockey_name': '丹内祐次',
'race_time': 112.1,
'win_betting_ratio': 9.7,
'favorite_order': 3,
'horse_weight': 480,
'weight_change': 8,
},
{
'order_of_placing': 7,
'bracket_number': 2,
'horse_number': 2,
'horse_id': 2017100184,
'horse_name': 'サンモンテベロ',
'horse_age': 2,
'horse_gender': HorseGender.FEMALE,
'impost': 54,
'jockey_id': '01109',
'jockey_name': '黛弘人',
'race_time': 112.5,
'win_betting_ratio': 114.7,
'favorite_order': 8,
'horse_weight': 450,
'weight_change': 2,
},
{
'order_of_placing': 8,
'bracket_number': 7,
'horse_number': 7,
'horse_id': 2017102953,
'horse_name': 'エスカレーション',
'horse_age': 2,
'horse_gender': HorseGender.FEMALE,
'impost': 54,
'jockey_id': '01093',
'jockey_name': '藤岡佑介',
'race_time': 112.5,
'win_betting_ratio': 26.1,
'favorite_order': 5,
'horse_weight': 448,
'weight_change': -4,
},
{
'order_of_placing': 9,
'bracket_number': 6,
'horse_number': 6,
'horse_id': 2017102421,
'horse_name': 'セイウンジュリア',
'horse_age': 2,
'horse_gender': HorseGender.FEMALE,
'impost': 54,
'jockey_id': '01032',
'jockey_name': '池添謙一',
'race_time': 112.6,
'win_betting_ratio': 16.4,
'favorite_order': 4,
'horse_weight': 470,
'weight_change': 0,
},
]
assert RaceResultScraper.scrape(file) == expect_data
def test_to_scrape_empty_page():
file_path = os.path.normpath(os.path.join(
base_path, "./fixtures/empty_page.html"))
with open(file_path, mode="r", encoding=ENCODING_OF_WEB_PAGE) as file:
with pytest.raises(DataNotFound):
assert RaceResultScraper.scrape(file)
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,755
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/keiba_machine_learning/models.py
|
from enum import Enum
from pydantic import Field
from pydantic.dataclasses import dataclass
import datetime
class RaceTrac(Enum):
"""競馬場に対応するモデル
東京競馬場・阪神競馬場など
全10場が個々のオブジェクトに対応
"""
SAPPORO = 1
HAKODATE = 2
FUKUSHIMA = 3
NIGATA = 4
TOKYO = 5
NAKAYAMA = 6
CHUKYO = 7
KYOTO = 8
HANSHIN = 9
KOKURA = 10
class RaceTracFactory:
@staticmethod
def create(race_track_name: str) -> RaceTrac:
""" 文字列からRaceTracオブジェクトを生成する
Args:
race_track_name (str): 競馬場の名前
Returns:
RaceTrac:
"""
NAMES_INDEXED_BY_MARK_STR = {
'札幌': 'SAPPORO',
'函館': 'HAKODATE',
'福島': 'FUKUSHIMA',
'新潟': 'NIGATA',
'東京': 'TOKYO',
'中山': 'NAKAYAMA',
'中京': 'CHUKYO',
'京都': 'KYOTO',
'阪神': 'HANSHIN',
'小倉': 'KOKURA',
}
return RaceTrac[NAMES_INDEXED_BY_MARK_STR[race_track_name]]
@dataclass
class Race:
""" 必要最低限の属性のみを保持したレースの基底モデル """
# ブラウザURL直打ちして2着以下も取得できた年を暫定的に指定
# 1985年はページ自体は閲覧できるが1着しか見れない(ログインすれば見れる旨は記載されていた)
OLDEST_READABLE_YEAR = 1986
# 2020年東京競馬場を基準にURL直打ちして確認したところ5までしかなかった
# バッファ取って以下の値とした
MAX_SERIES_NUMBER = 7
# 上記と同様
# 9日目まではあったがバッファを取って以下の値に
MAX_DAY_NUMBER = 10
MAX_RACE_NUMBER = 12
race_track: RaceTrac
year: int = Field(ge=OLDEST_READABLE_YEAR, le=datetime.date.today().year)
series_number: int = Field(ge=1, le=MAX_SERIES_NUMBER)
day_number: int = Field(ge=1, le=MAX_DAY_NUMBER)
race_number: int = Field(ge=1, le=MAX_RACE_NUMBER)
class Weather(Enum):
CLOUD = 1
FINE = 2
RAIN = 3
LIGHT_RAIN = 4
LIGHT_SNOW = 5
SNOW = 6
class WeatherFactory:
@staticmethod
def create(weather_name: str) -> Weather:
""" 文字列からWeatherオブジェクトを生成する
Args:
weather_name (str): 曇 | 晴 | 雨 | 小雨 | 小雪 | 雪
Returns:
Weather:
"""
NAMES_INDEXED_BY_MARK_STR = {
'曇': 'CLOUD',
'晴': 'FINE',
'雨': 'RAIN',
'小雨': 'LIGHT_RAIN',
'小雪': 'LIGHT_SNOW',
'雪': 'SNOW',
}
return Weather[NAMES_INDEXED_BY_MARK_STR[weather_name]]
class TrackDirection(Enum):
LEFT = 1
RIGHT = 2
class TrackDirectionFactory:
@staticmethod
def create(track_direction_name: str) -> TrackDirection:
"""文字列からTrackDirectionオブジェクトを生成する
Args:
track_direction_name (str): 右 | 左
Returns:
TrackDirection:
"""
NAMES_INDEXED_BY_MARK_STR = {
'左': 'LEFT',
'右': 'RIGHT',
}
return TrackDirection[NAMES_INDEXED_BY_MARK_STR[track_direction_name]]
class TrackKind(Enum):
GRASS = 1
DIRT = 2
JUMP = 3
class TrackKindFactory:
@staticmethod
def create(track_kind_name: str) -> TrackKind:
"""文字列からTrackKindオブジェクトを生成する
Args:
track_kind_name (str): 芝 | ダート | 障害
Returns:
TrackKind:
"""
NAMES_INDEXED_BY_MARK_STR = {
'芝': 'GRASS',
'ダート': 'DIRT',
'ダ': 'DIRT',
'障害': 'JUMP',
'障': 'JUMP',
}
return TrackKind[NAMES_INDEXED_BY_MARK_STR[track_kind_name]]
class TrackSurface(Enum):
GOOD_TO_FIRM = 1 # 馬場が芝だと "GOOD_TO_FIRM"で、ダートだと"Standard"らしいが前者で統一
GOOD = 2
YIELDING = 3 # これもダートだと "Muddy" らしいが芝の用語だけを使う
SOFT = 4 # 同じくダートだと "Sloppy" らしいが芝の用語だけを使う
class TrackSurfaceFactory:
@staticmethod
def create(track_surface_name: str) -> TrackSurface:
"""文字列からTrackSurfaceオブジェクトを生成する
Args:
track_surface_name (str): 良 | 稍重 | 重 | 不良
Returns:
TrackSurface:
"""
NAMES_INDEXED_BY_MARK_STR = {
'良': 'GOOD_TO_FIRM',
'稍重': 'GOOD',
'重': 'YIELDING',
'不良': 'SOFT',
}
return TrackSurface[NAMES_INDEXED_BY_MARK_STR[track_surface_name]]
class HorseGender(Enum):
MALE = 1
FEMALE = 2
CASTRATED = 3
class HorseGenderFactory:
@staticmethod
def create(gender_string: str) -> HorseGender:
"""文字列からGenderオブジェクトを生成する
Args:
gender_string (str): 牡 | 牝 | セ
※ セ は騸馬(去勢された牡馬)を意味する
Returns:
HorseGender
"""
NAMES_INDEXED_BY_MARK_STR = {
'牡': 'MALE',
'牝': 'FEMALE',
'セ': 'CASTRATED',
}
return HorseGender[NAMES_INDEXED_BY_MARK_STR[gender_string]]
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,756
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/scripts/netkeiba/create_race_result_data_frame.py
|
"""netkeibaからダウンロードしたHTMLからpandasのDataFrameを生成するスクリプト
Examples:
※ 実行時はパスを通すこと
※ 事前にファイルが用意されていること(`./download_race_pages.py` が実行済みであること)
※ 以下はコマンドライン上にて
source venv/bin/activate
export PYTHONPATH=".:$PYTHONPATH"
python scripts/netkeiba/create_race_result_data_frame.py
"""
import os
from tqdm import tqdm
import pandas as pd
from keiba_machine_learning.netkeiba.constants import RACE_DATA_DIR, ENCODING_OF_WEB_PAGE
from keiba_machine_learning.netkeiba.scrapers import RaceInformationScraper, RaceResultScraper, DataNotFound, IncompatibleDataDetected
race_records = []
for file_name in tqdm(os.listdir(RACE_DATA_DIR)):
file_path = os.path.normpath(os.path.join(RACE_DATA_DIR, file_name))
with open(file_path, mode="r", encoding=ENCODING_OF_WEB_PAGE) as file:
try:
race_id, _ = file_name.split('.')
race_id = int(race_id)
except ValueError:
continue
try:
race_information = RaceInformationScraper.scrape(file)
file.seek(0)
rows = [{
'race_id': race_id,
'race_track': race_information['race_track'].value,
'track_kind': race_information['track_kind'].value,
'track_direction': race_information['track_direction'].value,
'race_distance_by_meter': race_information['race_distance_by_meter'],
'track_surface': race_information['track_surface'].value,
'weather': race_information['weather'].value,
'race_number': race_information['race_number'],
'starts_at': race_information['starts_at'],
'order_of_placing': race_record['order_of_placing'],
'bracket_number': race_record['bracket_number'],
'horse_number': race_record['horse_number'],
'horse_id': race_record['horse_id'],
'horse_name': race_record['horse_name'],
'horse_age': race_record['horse_age'],
'horse_gender': race_record['horse_gender'].value,
'impost': race_record['impost'],
'jockey_id': race_record['jockey_id'],
'jockey_name': race_record['jockey_name'],
'race_time': race_record['race_time'],
'win_betting_ratio': race_record['win_betting_ratio'],
'favorite_order': race_record['favorite_order'],
'horse_weight': race_record['horse_weight'],
'weight_change': race_record['weight_change'],
} for race_record in RaceResultScraper.scrape(file)]
race_records.extend(rows)
except DataNotFound:
pass
except IncompatibleDataDetected:
pass
except Exception as e:
print(f"race_id: {race_id} can't parse.")
raise e
df = pd.DataFrame([], columns=race_records[0].keys())
df = pd.concat([df, pd.DataFrame.from_dict(race_records)])
df.to_pickle(os.path.normpath(os.path.join(
RACE_DATA_DIR, 'race_results_data_frame.pickle')))
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,757
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py
|
import pytest
import os
from datetime import datetime
from keiba_machine_learning.models import RaceTrac, TrackKind, TrackDirection, TrackSurface, Weather
from keiba_machine_learning.netkeiba.scrapers import RaceInformationScraper, DataNotFound, IncompatibleDataDetected
from keiba_machine_learning.netkeiba.constants import ENCODING_OF_WEB_PAGE
base_path = os.path.dirname(os.path.abspath(__file__))
def test_to_scrape_general_race_information():
file_path = os.path.normpath(os.path.join(
base_path, "./fixtures/201901010101.html"))
with open(file_path, mode="r", encoding=ENCODING_OF_WEB_PAGE) as file:
expect_data = {
'title': '2歳未勝利',
'race_track': RaceTrac.SAPPORO,
'track_kind': TrackKind.GRASS,
'track_direction': TrackDirection.RIGHT,
'race_distance_by_meter': 1800,
'track_surface': TrackSurface.GOOD_TO_FIRM,
'weather': Weather.CLOUD,
'race_number': 1,
'starts_at': datetime(2019, 7, 27, 9, 50),
}
assert RaceInformationScraper.scrape(file) == expect_data
def test_to_scrape_disability_race_page():
file_path = os.path.normpath(os.path.join(
base_path, "./fixtures/disability_race_page.html"))
with open(file_path, mode="r", encoding=ENCODING_OF_WEB_PAGE) as file:
with pytest.raises(IncompatibleDataDetected):
assert RaceInformationScraper.scrape(file)
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,758
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/keiba_machine_learning/netkeiba/tests/test_race.py
|
from keiba_machine_learning.models import RaceTrac
from keiba_machine_learning.netkeiba.models import Race
def test_identifier():
race = Race(year=2020, race_track=RaceTrac.SAPPORO,
series_number=1, day_number=1, race_number=1)
assert race.id == 202001010101
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,759
|
k0kishima/machine_learning_hands_on
|
refs/heads/master
|
/keiba_machine_learning/netkeiba/constants.py
|
import os
from constants import DATA_DIR
DATABASE_PAGE_BASE_URL = "https://db.netkeiba.com"
ENCODING_OF_WEB_PAGE = "EUC-JP"
RACE_DATA_DIR = os.path.join(DATA_DIR, "netkeiba", "race")
|
{"/keiba_machine_learning/netkeiba/models.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/download_race_pages.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"], "/keiba_machine_learning/types.py": ["/keiba_machine_learning/models.py"], "/keiba_machine_learning/netkeiba/scrapers.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/types.py"], "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/scripts/netkeiba/create_race_result_data_frame.py": ["/keiba_machine_learning/netkeiba/constants.py", "/keiba_machine_learning/netkeiba/scrapers.py"], "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/scrapers.py", "/keiba_machine_learning/netkeiba/constants.py"], "/keiba_machine_learning/netkeiba/tests/test_race.py": ["/keiba_machine_learning/models.py", "/keiba_machine_learning/netkeiba/models.py"]}
|
5,773
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/views.py
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse, HttpResponseForbidden
from django.urls import reverse, reverse_lazy
from django.views import generic
from django.contrib.auth import authenticate, login
#For @login_required
from django.contrib.auth.decorators import login_required
#For UserCreationForm
from django.contrib.auth.forms import UserCreationForm
#Because you can't decorate classes...
from django.utils.decorators import method_decorator
# Create your views here.
from .models import *
from reminders.forms import *
#Reverse lazy to stop there being a circular import error.
@method_decorator(login_required, name='dispatch')
class IndexView(generic.ListView):
template_name = 'reminders/index.html'
context_object_name = 'tasks'
next = reverse_lazy("adulting:index")
def get_queryset(self):
return Task.objects.filter(user=self.request.user)
@method_decorator(login_required,name='dispatch')
class MasonryView(generic.ListView):
template_name = 'reminders/index_masonry.html'
context_object_name = 'tasks'
next = reverse_lazy("adulting:index")
form_media = TaskForm()
form_media = str(form_media.media)
def get_context_data(self,**kwargs):
#Ask yourself the question, is this necessary?
#Add in the form media for the datepicker widget
#The 'create reminder' button depends on the media you supply to the form - if you change the form, you want the button to still work, because it's a pain in the arse.
context = super(MasonryView,self).get_context_data(**kwargs)
context['form']=str(self.form_media)
return context
def get_queryset(self):
return Task.objects.filter(user=self.request.user).order_by('-entry_date')
class RegisterView(generic.edit.CreateView):
template_name = 'reminders/register.html'
form_class = UserCreationForm
success_url = '/'
class TaskDetailView(generic.DetailView):
model = Task
@method_decorator(login_required,name='dispatch')
class TaskDeleteView(generic.edit.DeleteView):
#def ajax to send Json instead
model = Task
success_url = reverse_lazy("reminders:index")
#Ajax response mixin.
class AJAXMixin(object):
def form_invalid(self,form):
response = super(AJAXMixin,self).form_invalid(form)
if self.request.is_ajax:
return JsonResponse(form.errors,status=400)
else:
return response
def form_valid(self,form):
#Redirects to success_url normally
response = super(AJAXMixin,self).form_valid(form)
if self.request.is_ajax():
#Let the view object query the object's PK.
data = { 'pk': self.object.pk, }
return JsonResponse(data)
else:
#Defined elsewhere
return response
@method_decorator(login_required, name='dispatch')
class TaskCreateView(AJAXMixin,generic.edit.CreateView):
model = Task
template_name_suffix = '_create'
success_url = '/'
form_class = TaskForm
def form_valid(self,form):
form.instance.user = self.request.user
return super(TaskCreateView,self).form_valid(form)
@login_required
def TaskCreateAjaxView(request):
if request.is_ajax():
form = TaskForm()
#Pass the form to the JSON so it can be dynamically rendered.
data = { 'status':'200', 'form':form.as_p() }
return JsonResponse(data)
else:
return HttpResponseForbidden("Maybe you meant to go to <a href='{0}'>{0}</a>?".format(reverse_lazy("reminders:create")))
class PerformanceCreateView(generic.edit.CreateView):
model = Performance
fields = ['perf_date']
template_name_suffix = '_update_form'
def get_context_data(self,**kwargs):
context = super(PerformanceChangeView,self).get_context_data(**kwargs)
return context
def PerformView(request,task_id):
the_task = get_object_or_404(Task,pk=task_id)
if request.method == "POST":
new_perf = Performance(perf_date=datetime.now(),task=the_task)
if request.is_ajax:
data = {}
try:
new_perf.save()
data["message"]="Success"
data["is_countdown"] = the_task.countdown
return JsonResponse(data)
except Exception:
data["message"]="Failure"
return JsonResponse(data)
else:
return HTTPResponseRedirect(reverse("adulting:index"))
if request.method == "GET":
form = PerformanceForm
extra_context = {}
extra_context['form'] = form
return render(request,"reminders/performance_update_form.html",extra_context)
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,774
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/migrations/0007_auto_20170225_0041.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-25 00:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reminders', '0006_task_colour'),
]
operations = [
migrations.AlterField(
model_name='task',
name='colour',
field=models.CharField(choices=[('#F44336', 'red'), ('#4CAF50', 'green'), ('#FFEB3B', 'yellow'), ('#2196F3', 'blue')], default='#F44336', max_length=7, verbose_name='Colour'),
),
]
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,775
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/forms.py
|
from django import forms
from bootstrap3_datetime.widgets import DateTimePicker
from reminders.models import *
my_attrs = {
"inline":True,
"sideBySide":True,
"todayBtn":"linked",
"bootstrap_version":3,
"usel10n":True,
"format":"YYYY-M-D H:mm",
}
class TaskForm(forms.ModelForm):
class Meta:
model = Task
exclude = ['user']
widgets = { 'deadline': DateTimePicker(options=my_attrs), 'entry_date': DateTimePicker(options=my_attrs) }
class PerformanceForm(forms.ModelForm):
class Meta:
model = Performance
exclude = ('task',)
widgets = { 'perf_date': DateTimePicker(options=my_attrs) }
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,776
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/models.py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from datetime import datetime
def _(arg):
#Dummy _ function pending i18n, which you've forgotten.
return arg
# Create your models here.
class Icon(models.Model):
#Sysadmin only model, so no need to isolate by user.
icon = models.ImageField(upload_to='uploads/%Y/%m/%d/')
description = models.CharField(_('Description'),blank=True,null=True,max_length=100)
def __str__(self):
return self.description
COLOUR_CHOICES = (
('red','red'),
('yellow','yellow'),
('blue','blue'),
('green','green'),
)
class Task(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,blank=True,null=True)
label = models.CharField(max_length=200)
entry_date = models.DateTimeField(_('Date entered'))
deadline = models.DateTimeField(_('Deadline'),blank=True,null=True)
icon = models.ForeignKey(Icon,null=True,blank=True)
is_secret = models.BooleanField(_('Secret?'))
#Mebs change the widget on this.
countdown = models.BooleanField(_('Countdown'),help_text=_('Countdown or countup?'),default=True)
colour = models.CharField(_('Colour'),choices=COLOUR_CHOICES,default=COLOUR_CHOICES[0][0],max_length=7)
def last_performed(self):
return self.performance_set.order_by('-perf_date')[0].perf_date
def __str__(self):
return self.label
class Performance(models.Model):
#For when a task is performed.
task = models.ForeignKey(Task)
perf_date = models.DateTimeField(_('Date performed'))
def __str__(self):
return "{}: {}".format(self.task.pk,self.perf_date)
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,777
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/urls.py
|
from django.conf.urls import url
from . import views
app_name = "reminders"
urlpatterns = [
url(r'^$',views.MasonryView.as_view(),name='index'),
#url(r'^masonry$',views.MasonryView.as_view(),name='masonry'),
url(r'^register/?$', views.RegisterView.as_view(),name='register'),
url(r'^detail/(?P<pk>[0-9]*)/?$', views.TaskDetailView.as_view(),name='detail'),
url(r'^perform/(?P<task_id>[0-9]*)/?$', views.PerformView, name='perform'),
url(r'^create/?$',views.TaskCreateView.as_view(),name='create'),
url(r'^create_task_ajax/?', views.TaskCreateAjaxView, name='create_ajax'),
url(r'^delete/(?P<pk>[0-9]*)/?',views.TaskDeleteView.as_view(),name='delete'),
]
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,778
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/templatetags/remindertags.py
|
from django import template
register = template.Library()
@register.filter(name='starout')
def starout(value):
''' Expects 'string', returns 's****g' '''
v_space_list = value.split(" ")
star = lambda x: x[0] + "*"*len(x[1:-1]) + x[-1]
starred_out = [star(v) for v in v_space_list]
return " ".join(starred_out)
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,779
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/migrations/0003_icon_description.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-18 19:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reminders', '0002_auto_20170218_1956'),
]
operations = [
migrations.AddField(
model_name='icon',
name='description',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Description'),
),
]
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,780
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/migrations/0005_task_countdown.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-23 21:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reminders', '0004_task_deadline'),
]
operations = [
migrations.AddField(
model_name='task',
name='countdown',
field=models.BooleanField(default=True, help_text='Countdown or countup?', verbose_name='Countdown'),
),
]
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,781
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/migrations/0004_task_deadline.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-18 23:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reminders', '0003_icon_description'),
]
operations = [
migrations.AddField(
model_name='task',
name='deadline',
field=models.DateTimeField(blank=True, null=True, verbose_name='Deadline'),
),
]
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,782
|
mikelaughton/reimagined-fiesta
|
refs/heads/master
|
/reminders/admin.py
|
from django.contrib import admin
from reminders.models import *
# Register your models here.
admin.site.register(Icon)
admin.site.register(Task)
admin.site.register(Performance)
|
{"/reminders/views.py": ["/reminders/models.py", "/reminders/forms.py"], "/reminders/forms.py": ["/reminders/models.py"], "/reminders/admin.py": ["/reminders/models.py"]}
|
5,801
|
SafeNavDev/SafeNav2.0
|
refs/heads/master
|
/FilteredPath.py
|
# print(data.json().keys()) to find all keys in dict
import requests
import math
earth_radius = 3960.0
degrees_to_radians = math.pi / 180.0
radians_to_degrees = 180.0 / math.pi
class FilteredPath:
def get_data(lat1, long1, lat2, long2):
# generate a token with your client id and client secret
# print(token.json()['access_token'])
token = FilteredPath.gen_token()
coordinates = long1 + "," + lat1 + "; " + long2 + "," + lat2
data = requests.post('http://route.arcgis.com/arcgis/rest/services/World/Route/NAServer/Route_World/solve?stops=' + coordinates, params={
'f': 'json',
'token': token.json()['access_token'],
'studyAreas': '[{"geometry":{"x":-117.1956,"y":34.0572}}]'
})
return data.json()['routes']['features'][0]['geometry']['paths'][0]
def gen_token():
token = requests.post('https://www.arcgis.com/sharing/rest/oauth2/token/', params={
'f': 'json',
'client_id': 'OYBSyP4UMttEkIlp',
'client_secret': '65057b2bafcf4e27bde6bcabff2dcc3c',
'grant_type': 'client_credentials',
'expiration': '1440'
})
return token
def change_in_latitude(miles):
# "Given a distance north, return the change in latitude."
return (miles / earth_radius) * radians_to_degrees
def change_in_longitude(latitude, miles):
# Given a latitude and a distance west, return the change in longitude
# Find the radius of a circle around the earth at given latitude.
r = earth_radius * math.cos(latitude * degrees_to_radians)
return (miles / r) * radians_to_degrees
def filter_path_average(path):
filtered_path = []
long_diffs = []
lat_diffs = []
avg_delta_long = 0
avg_delta_lat = 0
sum1 = 0
sum2 = 0
for x in range(1, len(path)):
long_diffs.append(abs(path[x][0] - path[x - 1][0]))
lat_diffs.append(abs(path[x][1] - path[x - 1][1]))
for x in long_diffs:
sum1 += x
avg_delta_long = sum1 / len(long_diffs)
for x in lat_diffs:
sum2 += x
avg_delta_lat = sum2 / len(lat_diffs)
for x in range(1, len(path)):
if long_diffs[x - 1] < avg_delta_long and lat_diffs[x - 1] < avg_delta_lat:
filtered_path.append(path[x])
return filtered_path
def filter_path_theory(path, segment):
delta_lat = FilteredPath.change_in_latitude(segment)
delta_long = FilteredPath.change_in_longitude(42.3314, segment)
filtered_path = []
long_diffs = []
lat_diffs = []
for x in range(1, len(path)):
long_diffs.append(abs(path[x][0] - path[x - 1][0]))
lat_diffs.append(abs(path[x][1] - path[x - 1][1]))
for x in range(1, len(path)):
if long_diffs[x - 1] < delta_long and lat_diffs[x - 1] < delta_lat:
filtered_path.append(path[x])
return filtered_path
|
{"/flaskAPI.py": ["/CompareData.py"], "/CompareData.py": ["/FetchCrimeData.py", "/FilteredPath.py"]}
|
5,802
|
SafeNavDev/SafeNav2.0
|
refs/heads/master
|
/FetchCrimeData.py
|
import requests
class FetchCrimeData:
static_base_url = "https://data.detroitmi.gov/resource/i9ph-uyrp.json?"
static_category_param = ""
static_date_param = ""
# A helper function to format the month field
def format_month(month):
int_month = int(month)
if int_month < 10:
return '0' + str(int_month)
else:
return str(month)
# A custom initializer for efficiency when computing multiple lines along a path
# Having certain data members stored is more efficient
def __init__(self, year_in, month_in, day_in):
month_in = FetchCrimeData.format_month(int(month_in))
date = year_in + '-' + month_in + '-' + day_in + 'T12:00:00'
prev_month = FetchCrimeData.format_month(int(month_in) - 2)
prev_date = year_in + '-' + prev_month + '-' + day_in + 'T12:00:00'
self.static_category_param = """and(category='ASSAULT' or category='ROBBERY' or category='AGGRAVATED ASSAULT' or category='HOMICIDE' or category='KIDNAPPING' or category='DRUNKENNESS' or category='DISORDERLY CONDUCT' or category='DANGEROUS DRUGS')"""
self.static_date_param = '$where= (incidentdate between ' + "'" + prev_date + "'" + ' and ' + "'" + date + "'"
# NOTE: DEPRECATED! Use initialized and static_query
# NOTE: Requires location list formatted: ['longitude', 'latitude']
def query_database(year_in, month_in, day_in, category_in, location, radius):
incidents = []
# This generates the category of crime
category_param = 'and (category=' + category_in +')'
# This generates the date paadfsasdframeter
month = FetchCrimeData.format_month(month_in)
date = year_in + '-' + month + '-' + day_in + 'T12:00:00'
prev_month = FetchCrimeData.format_month(int(month) - 2)
prev_date = year_in + '-' + prev_month + '-' + day_in + 'T12:00:00'
date_param = '$where= (incidentdate between ' + "'" + prev_date + "'" + ' and ' + "'" + date + "'"
# This generates the radius in which we are searching
location_param = 'and within_circle(location, ' + location[1] + ', ' + location[0] + ', ' + str(radius) + '))'
# Finally, concatenate and pass http get request
url = 'https://data.detroitmi.gov/resource/i9ph-uyrp.json?' + date_param + location_param + category_param
local_data = requests.get(url).json()
for x in local_data:
incidents.append(x['location']['coordinates'])
return incidents
# Allows a more efficient query by not initializing local variables every time
def static_query(self, incidents, location, radius):
location_param = 'and within_circle(location, ' + str(location[1]) + ', ' + str(location[0]) + ', ' + str(radius) + '))'
url = self.static_base_url + self.static_date_param + location_param + self.static_category_param
local_data = requests.get(url).json()
if local_data:
for x in local_data:
incidents.append(x['location']['coordinates'])
else:
return incidents
|
{"/flaskAPI.py": ["/CompareData.py"], "/CompareData.py": ["/FetchCrimeData.py", "/FilteredPath.py"]}
|
5,803
|
SafeNavDev/SafeNav2.0
|
refs/heads/master
|
/flaskAPI.py
|
from flask import Flask, request, jsonify
import datetime
from CompareData import CompareData
app = Flask(__name__)
@app.route('/coordinate/<lat1>,<long1>;<lat2>,<long2>')
def get_route(lat1, long1, lat2, long2):
# Validate all parameters are valid latitudes and longitudes
if not is_latitude(lat1):
return invalid('Latitude 1 not valid')
elif not is_latitude(lat2):
return invalid('Latitude 2 not valid')
elif not is_longitude(long1):
return invalid('Longitude 1 not valid')
elif not is_longitude(long2):
return invalid('Longitude 2 not valid')
now = datetime.datetime.now()
crimes_on_path = CompareData.return_crimes(lat1, long1, lat2, long2, str(now.year), str(now.month), str(now.day))
# add all params to a dict to be converted to a json array
newDict = {'coordinates':crimes_on_path}
return jsonify(**newDict)
# check if a latitude is valid
def is_latitude(s):
try:
f = float(s)
if not 90 > f > -90:
return False
return True
except ValueError:
return False
# check if a longitude is valid
def is_longitude(s):
try:
f = float(s)
if not 180 > f > -180:
return False
return True
except ValueError:
return False
@app.route('/post/<int:post_id>')
def show_post(post_id):
# show the post with the given id, the id is an integer
return 'Post %d' % post_id
# handle any 404 errors
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 404
return resp
# handle any invalid requests
@app.errorhandler(400)
def invalid(error=None):
error_message = 'Bad Request'
if error is not None:
error_message = 'Bad Request: ' + error
message = {
'status': 400,
'message': error_message,
}
resp = jsonify(message)
resp.status_code = 400
return resp
|
{"/flaskAPI.py": ["/CompareData.py"], "/CompareData.py": ["/FetchCrimeData.py", "/FilteredPath.py"]}
|
5,804
|
SafeNavDev/SafeNav2.0
|
refs/heads/master
|
/CompareData.py
|
import requests
from FetchCrimeData import FetchCrimeData
from FilteredPath import FilteredPath
segment_length_miles = .15
segment_length_meters = 200
class CompareData:
def return_crimes(lat1_in, long1_in, lat2_in, long2_in, year_in, month_in, day_in):
incidents_on_route = []
data = FilteredPath.get_data(lat1_in, long1_in, lat2_in, long2_in)
path = FilteredPath.filter_path_theory(data, segment_length_miles)
CrossReferencer = FetchCrimeData(year_in, month_in, day_in)
for x in path:
CrossReferencer.static_query(incidents_on_route, x, segment_length_meters)
return incidents_on_route
|
{"/flaskAPI.py": ["/CompareData.py"], "/CompareData.py": ["/FetchCrimeData.py", "/FilteredPath.py"]}
|
5,808
|
jay-khandelwal/ml-proj
|
refs/heads/master
|
/core/forms.py
|
from django import forms
class mlform(forms.Form):
sepal_len = forms.FloatField()
sepal_width = forms.FloatField()
petal_len = forms.FloatField()
petal_width = forms.FloatField()
#['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard','IsActiveMember', 'EstimatedSalary', 'Germany', 'Spain', 'Male']
class svm(forms.Form):
CreditScore =forms.FloatField()
Age = forms.IntegerField()
Tenure =forms.IntegerField()
Balance =forms.FloatField()
NumOfProducts =forms.IntegerField()
HasCrCard =forms.IntegerField()
IsActiveMember =forms.IntegerField()
EstimatedSalary =forms.FloatField()
Germany =forms.IntegerField()
Spain =forms.IntegerField()
Male =forms.IntegerField()
|
{"/core/views.py": ["/core/forms.py"]}
|
5,809
|
jay-khandelwal/ml-proj
|
refs/heads/master
|
/core/form.py
|
from django import forms
class mlform(forms.Form):
sepal_len = forms.FloatField()
sepal_width = forms.FloatField()
petal_len = forms.FloatField()
petal_width = forms.FloatField()
|
{"/core/views.py": ["/core/forms.py"]}
|
5,810
|
jay-khandelwal/ml-proj
|
refs/heads/master
|
/core/newfile.py
|
#import pickle
#pickle_in = open('iris.pkl','rb')
#clf = pickle.load(pickle_in)
#y = clf.predict({'sepal_len':[ 1.7], 'sepal_width': [8.5], 'petal_len': [4.5], 'petal_width':[6.1]})
#print(y)
#return HttpResponse('predicted class :' ,y)
import pickle
import pandas as pd
pickle_in = open('iris.pkl', 'rb')
clf = pickle.load(pickle_in)
test_x = pd.DataFrame({'sepal length':[4.9], 'sepal width': [3.0],'petal length':[1.4], 'petal width':[0.2]})
predicted_y = clf.predict(test_x)
print('predicted_y :', predicted_y)
|
{"/core/views.py": ["/core/forms.py"]}
|
5,811
|
jay-khandelwal/ml-proj
|
refs/heads/master
|
/core/views.py
|
from django.shortcuts import render, HttpResponse
import pickle
from .forms import mlform,svm
# Create your views here.
def index(request):
if request.method == 'POST':
form = mlform(request.POST)
if form.is_valid():
sepal_len = form.cleaned_data['sepal_len']
sepal_width = form.cleaned_data['sepal_width']
petal_len = form.cleaned_data['petal_len']
petal_width = form.cleaned_data['petal_width']
pickle_in = open('iris.pkl','rb')
clf = pickle.load(pickle_in)
import pandas as pd
test_x = pd.DataFrame({'sepal_len':[ sepal_len], 'sepal_width': [sepal_width], 'petal_len': [petal_len], 'petal_width':[petal_width]})
predicted_y = int(clf.predict(test_x))
list = ['Setosa', 'Versicolor', 'Verginica']
context =( 'Type of leave :', list[predicted_y])
return HttpResponse(context)
else:
form = mlform()
context= {'form':form}
return render(request, 'index.html', context)
#['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard','IsActiveMember', 'EstimatedSalary', 'Germany', 'Spain', 'Male']
def churn(request):
if request.method == 'POST':
form = svm(request.POST)
if form.is_valid():
CreditScore = form.cleaned_data['CreditScore']
Age = form.cleaned_data['Age']
Tenure = form.cleaned_data['Tenure']
Balance = form.cleaned_data['Balance']
NumOfProducts = form.cleaned_data['NumOfProducts']
HasCrCard = form.cleaned_data['HasCrCard']
IsActiveMember = form.cleaned_data['IsActiveMember']
EstimatedSalary = form.cleaned_data['EstimatedSalary']
Germany = form.cleaned_data['Germany']
Spain = form.cleaned_data['Spain']
Male = form.cleaned_data['Male']
pickle_in = open('SupportVectorMachineScaler.pkl','rb')
scaler = pickle.load(pickle_in)
import pandas as pd
test_x = pd.DataFrame({
'CreditScore':[ CreditScore],
'Age': [Age],
'Tenure': [Tenure],
'Balance':[Balance],
'Spain':[NumOfProducts],
'HasCrCard':[HasCrCard],
'IsActiveMember':[IsActiveMember],
'EstimatedSalary':[EstimatedSalary],
'Germany':[Germany],
'Spain':[Spain],
'Male':[Male]})
print('main :',test_x)
# test_x = scaler.transform(test_x)
print('*' *100)
print('Scaler :',test_x)
pickle_in = open('SupportVectorMachine accuracy.pkl','rb')
clf = pickle.load(pickle_in)
#import pandas as pd
# test_x = pd.DataFrame({'sepal_len':[ sepal_len], 'sepal_width': [sepal_width], 'petal_len': [petal_len], 'petal_width':[petal_width]})
predicted_y = clf.predict(test_x)
# list = ['Yes', 'No']
# context =( 'did person leave ?? :', list[predicted_y])
return HttpResponse(predicted_y)
else:
form = svm()
context= {'form':form}
return render(request, 'index.html', context)
|
{"/core/views.py": ["/core/forms.py"]}
|
5,815
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/common/parsed_sentence.py
|
class parsed_sentence:
def __init__(self):
self.words = []
self.NPs = []
def add_word(self, w, l, pos, i, par, parent_index, rel , ty):
word = parsed_word(w, l, pos, i, par, parent_index, rel, ty)
self.words.append(word)
def add_NP(self, np, root, ri, start, end):
np2 = noun_phrase(np, root, ri, start, end)
self.NPs.append(np2)
def __repr__(self):
return str(self)
def __str__(self):
return " ".join([word.word for word in self.words])
class parsed_word:
def __init__(self, w, l, pos, i, par, parent_index, rel, ty):
self.word = w
self.lemma = l
self.pos = pos
self.index = i
self.dep_rel = rel
self.parent = par
self.parent_index = parent_index
self.type = ty
def __repr__(self):
return str(self)
def __str__(self):
return "(" + self.word + ", " + self.lemma + ", " + self.pos + ", " + str(self.index) + ", " + self.parent + ", " + str(self.parent_index) + ", " + self.dep_rel+ ", " + self.type + ")"
class noun_phrase:
def __init__(self, np, root, ri, start, end):
self.text = np
self.root = root
self.root_index = ri
self.start = start
self.end = end
def __repr__(self):
return str(self)
def __str__(self):
return "(" + self.text + ", " + self.root + ", " + str(self.root_index) + ", " + str(self.start) + ", " + str(self.end) + ")"
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,816
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/dependency_Hearst_patterns/DHP.py
|
# The dependency patterns:
# ("nsubj(hyperHead, hypoHead), cop(hyperHead, was|were|is|are)"),
# ("case(hypoHead, such), mwe(such, as), nmod:such_as(hyperHead, hypoHead)"),
# ("case(hypoHead, including), nmod:including(hyperHead, hypoHead)"),
# ("amod(hyperHead, such), case(hypoHead, as), nmod:as(hyperHead, hypoHead)"),
# ("cc(hypoHead, and/or), amod(hyperHead, other), conj:and/or(hypoHead, hyperHead)"),
# ("advmod(hyperHead, especially), dep(hyperHead, hypoHead)")
from common import HyperHypoCouple as HH
import common.core_functions as cf
def get_NP(NPs, index):
for np in NPs:
if int(index) in range(int(np.start), int(np.end) + 1):
return np.text
return ""
def get_couples(parsed_sentence, hyper_index, hypo_index):
hyper_np = cf.remove_first_occurrences_stopwords(get_NP(parsed_sentence.NPs, hyper_index))
hypo_np = cf.remove_first_occurrences_stopwords(get_NP(parsed_sentence.NPs, hypo_index))
couples = []
if hyper_np != "" and hypo_np != "" and hypo_np != hyper_np:
hh = HH.HHCouple(hypo_np, hyper_np)
couples.append(hh)
parsed_words = parsed_sentence.words
for i in range(int(hypo_index) + 1, len(parsed_words)):
parsed_word = parsed_words[i]
if str(parsed_word.dep_rel).__contains__("conj") and parsed_word.parent_index == hypo_index:
new_hypo_index = parsed_word.index
new_hypo_np = get_NP(parsed_sentence.NPs, new_hypo_index)
if hyper_np != "" and hypo_np != "" and new_hypo_np != hyper_np:
new_hh = HH.HHCouple(new_hypo_np, hyper_np)
couples.append(new_hh)
return couples
def such_A_as_B(parsed_sentence):
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("amod(hyperHead, such), case(hypoHead, as), nmod:as(hyperHead, hypoHead)"),
if str(parsed_word.dep_rel).__contains__("nmod:as"):
hypo_index = parsed_word.index
hyper_index = parsed_word.parent_index
flag1 = False
flag2 = False
for j in range(i - 1, max(-1, i-10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("case") and pre_word.word == "as" and pre_word.parent_index == hypo_index:
flag1 = True
elif str(pre_word.dep_rel).__contains__("amod") and pre_word.word == "such" and pre_word.parent_index == hyper_index:
flag2 = True
if flag1 and flag2:
couples = get_couples(parsed_sentence, hyper_index, hypo_index)
if len(couples) > 0:
return True, couples
return False, []
def A_is_a_B(parsed_sentence):
vtb = ["is", "are", "was", "were"]
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("nsubj(hyperHead, hypoHead), cop(hyperHead, was|were|is|are)"),
if str(parsed_word.dep_rel).__contains__("nsubj"):
hypo_index = parsed_word.index
hyper_index = parsed_word.parent_index
for j in range(i + 1, min(len(parsed_words), i + 10)):
next_word = parsed_words[j]
if str(next_word.dep_rel).__contains__("cop") and next_word.word in vtb and next_word.parent_index == hyper_index:
couples = get_couples(parsed_sentence, hyper_index, hypo_index)
if len(couples) > 0:
return True, couples
return False, []
def A_and_other_B(parsed_sentence):
conj = ["or", "and"]
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("cc(hypoHead, and/or), amod(hyperHead, other), conj:and/or(hypoHead, hyperHead)"),
if str(parsed_word.dep_rel).__contains__("conj"):
hyper_index = parsed_word.index
hypo_index = parsed_word.parent_index
flag1 = False
flag2 = False
for j in range(i - 1, max(-1, i - 10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("amod") and pre_word.word == "other" and pre_word.parent_index == hyper_index:
flag1 = True
elif str(pre_word.dep_rel).__contains__(
"cc") and pre_word.word in conj and pre_word.parent_index == hypo_index:
flag2 = True
if flag1 and flag2:
couples = get_couples(parsed_sentence, hyper_index, hypo_index)
if len(couples) > 0:
return True, couples
return False, []
def A_especially_B(parsed_sentence):
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("advmod(hyperHead, especially), dep(hyperHead, hypoHead)")
if str(parsed_word.dep_rel).__contains__("dep"):
hypo_index = parsed_word.index
hyper_index = parsed_word.parent_index
for j in range(i - 1, max(-1, i - 10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("advmod") and pre_word.word == "especially" and pre_word.parent_index == hyper_index:
couples = get_couples(parsed_sentence, hyper_index, hypo_index)
if len(couples) > 0:
return True, couples
return False, []
def A_including_B(parsed_sentence):
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("case(hypoHead, including), nmod:including(hyperHead, hypoHead)"),
if str(parsed_word.dep_rel).__contains__("nmod:including"):
hypo_index = parsed_word.index
hyper_index = parsed_word.parent_index
for j in range(i - 1, max(-1, i - 10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("case") and pre_word.word == "including" and pre_word.parent_index == hypo_index:
couples = get_couples(parsed_sentence, hyper_index, hypo_index)
if len(couples) > 0:
return True, couples
return False, []
def A_such_as_B(parsed_sentence):
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i]
if str(parsed_word.dep_rel).__contains__("nmod:such_as"):
hypo_index = parsed_word.index
hyper_index = parsed_word.parent_index
flag1 = False
flag2 = False
for j in range(i - 1, max(-1, i-10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("mwe") and pre_word.word == "as" and pre_word.parent == "such":
flag1 = True
elif str(pre_word.dep_rel).__contains__("case") and pre_word.word == "such" and pre_word.parent_index == hypo_index:
flag2 = True
if flag1 and flag2:
couples = get_couples(parsed_sentence, hyper_index, hypo_index)
if len(couples) > 0:
return True, couples
return False, []
def sentence_couples_annotation(sentence, couples):
sentence = sentence.replace("_hypo", "").replace("_hyper", "").replace("_", " ")
for couple in couples:
hyper = couple.hypernym
hyper2 = hyper.replace(" ", "_")
sentence = sentence.replace(" " + hyper + " ", " " + hyper2 + "_hyper ").strip()
hypo = couple.hyponym
hypo2 = hypo.replace(" ", "_")
try:
sentence = sentence.replace(" " + hypo + " ", " " + hypo2 + "_hypo ").strip()
except:
sentence = sentence
return sentence
def DHP_matching(parsed_sentence, sentence = ""):
couples = []
patterns = []
# NP such as NP
flag, co = A_such_as_B(parsed_sentence)
if flag:
couples.extend(co)
patterns.append("NP such as NP")
# NP including NP
flag, co = A_including_B(parsed_sentence)
if flag:
couples.extend(co)
patterns.append("NP including NP")
# NP is a NP
flag, co = A_is_a_B(parsed_sentence)
if flag:
couples.extend(co)
patterns.append("NP is a NP")
# NP and other NP
flag, co = A_and_other_B(parsed_sentence)
if flag:
couples.extend(co)
patterns.append("NP and other NP")
# NP especially NP
flag, co = A_especially_B(parsed_sentence)
if flag:
couples.extend(co)
patterns.append("NP especially NP")
# such NP as NP
flag, co = such_A_as_B(parsed_sentence)
if flag:
couples.extend(co)
patterns.append("such NP as NP")
if len(couples) == 0:
return False, "", "", ""
return True, couples, patterns, sentence_couples_annotation(sentence, couples)
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,817
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/dependency_Hearst_patterns/extracted_couples_validation.py
|
from os import listdir
from os.path import isfile, join
from common import core_functions as cf
def main():
"""
Goal: validate the extracted couples using DHP
inputs:
-res_files_directory: a directory path for the DHP matching result files
"""
res_files_directory = r"..\matching_DHP_subcorpora"
allfiles = [join(res_files_directory, f) for f in listdir(res_files_directory) if
isfile(join(res_files_directory, f))]
dataset_path = r"..\datasets\Music.txt"
for file in allfiles:
s = ""
for res in cf.get_result_sentences(file):
s += "<s>\n"
s += str(res[0]).strip() + "\n"
s += str(res[1]) + "\n"
s += "Label: " + str(res[2]).strip() + "\n"
predicted, predicted_by = cf.check_extracted_couples(res[1], dataset_path)
s += "Validated: " + str(predicted).strip() + "\n"
s += "Validated by: " + str(predicted_by).strip() + "\n"
s += "</s>\n"
f = open(file, "w")
f.write(s)
f.close()
if __name__ == '__main__':
main()
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,818
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/common/evaluation.py
|
from os import listdir
from os.path import isfile, join
import core_functions as cf
def evaluate(res_files_directory, sem_pos_labeled_corpus_file):
"""
Evaluate precision, recall, and F-measure from a set of files corresponding the results of matching patterns
:param res_files_directory: the directory of result files
:param sem_pos_labeled_corpus_file: the file path of semantically positive labeled corpus
:return: the measures: precision, recall, and F-measure
"""
all_pos = len(open(sem_pos_labeled_corpus_file).readlines())
TM = 0
PTM = 0
FM = 0
allfiles = [join(res_files_directory, f) for f in listdir(res_files_directory) if isfile(join(res_files_directory, f))]
for file in allfiles:
for res in cf.get_result_sentences(file):
label = res[2]
predicted = res[3]
if predicted == "True":
TM += 1
if label == "positive":
PTM += 1
else:
FM += 1
FNM = all_pos - PTM
precision = TM*1.0 / (TM + FM)
recall = TM * 1.0 / (TM + FNM)
f_measure = 2.0 * precision * recall / (precision + recall)
return precision, recall, f_measure
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,819
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/corpus_labeling/cleaning_pos_labeled_sentences.py
|
import re
import nltk
import spacy
from Hearst_patterns import HearstPattern
HP = HearstPattern.HearstPatterns()
def main():
'''
Goal: remove non semantically positive sentences from positive labeled sentences and select the same number of negative sentences as negative samples
inputs:
-posSentFile: a file path for the labeled positive sentences by the sentence labeling process
-semPosSentOutputFile: an output file path of semantically positive labeled sentences
-negSentFile: a file path for the labeled negative sentences by the sentence labeling process
-samplesNegSentOutputFile: an output file path of samples of negative labeled sentences
'''
# inputs
posSentFile = r"..\labeled_corpus\Music_Pos.txt"
semPosSentOutputFile = r"..\labeled_corpus\Music_Sem_Pos.txt"
negSentFile = r"..\labeled_corpus\Music_Neg.txt"
samplesNegSentOutputFile = r"..\labeled_corpus\Music_Neg_Samples.txt"
# open output file
ofsp = open(semPosSentOutputFile, "wb")
# process positive sentences
i = 0
count = 0
with open(posSentFile, "rb") as f:
for line in f:
annSent = line.decode("ascii", "ignore")
i += 1
print i
if not is_non_sem_pos(annSent):
count += 1
ofsp.write(annSent.strip() + "\n")
ofsp.close()
f.close()
# open output file
ofsn = open(samplesNegSentOutputFile, "wb")
# process negative sentences
i = 0
with open(negSentFile, "rb") as f:
for line in f:
sent = line.decode("ascii", "ignore")
i += 1
print i
ofsn.write(sent.strip() + "\n")
if i == count:
break
ofsn.close()
f.close()
def is_non_sem_pos(annotatedSentence):
# check if couples occur between brackets and not in the same brackets
if btw_brackets(annotatedSentence):
print "btw brackets"
return True
# check if there is conjunction relation between couple terms
if is_conjunction(annotatedSentence):
return True
return False
def is_conjunction(sent):
res = HP.label_cohyponyms(sent)
if not res:
return False
cohyponymCouples = res[1]
hypoFlag = False
hyperFlag = False
for cop in cohyponymCouples:
if str(cop.hyponym).__contains__("hypo") or str(cop.hypernym).__contains__("hypo"):
hypoFlag = True
if str(cop.hyponym).__contains__("hyper") or str(cop.hypernym).__contains__("hyper"):
hyperFlag = True
if (hypoFlag and hyperFlag):
return True
return False
def btw_brackets(sent):
brackets = re.findall(r'\((.*?)\)', sent)
if len(brackets) == 0:
return False
hypoBrackets = []
hyperBrackets = []
i = 0
for bracket in brackets:
if str(bracket).__contains__("_hypo"):
hypoBrackets.append(i)
if str(bracket).__contains__("_hyper"):
hyperBrackets.append(i)
i += 1
if len(hyperBrackets)==0 or len(hypoBrackets)==0:
return False
return not any(x in hyperBrackets for x in hypoBrackets)
def remove_HH_annotation(annSent):
return annSent.replace("_hypo", "").replace("_hyper", "").replace("_", " ")
def get_hypo_hyper(annSent):
words = str(annSent).strip().split()
hypo = ""
hyper = ""
for word in words:
if word.__contains__("_hypo"):
hypo = word.replace("_hypo", "").replace("_", " ")
elif word.__contains__("_hyper"):
hyper = word.replace("_hyper", "").replace("_", " ")
return hypo, hyper
# def is_conjunction(annSent):
# sent = remove_HH_annotation(annSent)
# hypo, hyper = get_hypo_hyper(annSent)
# res = HP.label_cohyponyms(sent)
# if not res:
# return False
# cohyponymCouples = res[1]
# hypoFlag = False
# hyperFlag = False
# for cop in cohyponymCouples:
# if str(cop.hyponym) == hypo or str(cop.hypernym) == hypo:
# hypoFlag = True
# if str(cop.hyponym) == hyper or str(cop.hypernym) == hyper:
# hyperFlag = True
# if (hypoFlag and hyperFlag):
# return True
# return False
if __name__ == '__main__':
main()
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,820
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/dependency_Hearst_patterns/DHP_matching.py
|
from common import core_functions as cf
from DHP import DHP_matching
def main():
"""
Goal: Match DHP and output the couples extracted (with sentence annotation) by a specific pattern into a corresponding output file
inputs:
-sem_pos_file: a file path for the semantically positive sentences (result of cleaning process)
-sem_pos_processed_file: a file path for the semantically positive sentences after processing (result of java preprocessing step)
-neg_samples_file: a file path for samples of negative sentences (same number of semantically positive sentences may be selected randomly)
-sem_pos_processed_file: a file path for samples of negative sentences after processing (result of java preprocessing step)
-output_files: a list of output files, each one corresponds for a specific DHP
"""
#inputs
sem_pos_file = r"..\labeled_corpus\Music_Test_Sem_Pos.txt"
sem_pos_processed_file = r"..\processed_corpus\Music_Test_Sem_Pos_processed.txt"
neg_samples_file = r"..\labeled_corpus\Music_Test_Neg_samples.txt"
neg_samples_processed_file = r"..\processed_corpus\Music_Test_Neg_Samples_processed.txt"
output_files = [r"..\matching_DHP_subcorpora\matching_such_as.txt",
r"..\matching_DHP_subcorpora\matching_including.txt",
r"..\matching_DHP_subcorpora\matching_is_a.txt",
r"..\matching_DHP_subcorpora\matching_and_other.txt",
r"..\matching_DHP_subcorpora\matching_especially.txt",
r"..\matching_DHP_subcorpora\matching_such_NP_as.txt"]
patterns = ["NP such as NP", "NP including NP", "NP is a NP", "NP and other NP", "NP especially NP",
"such NP as NP"]
f0 = open(output_files[0], "w")
f1 = open(output_files[1], "w")
f2 = open(output_files[2], "w")
f3 = open(output_files[3], "w")
f4 = open(output_files[4], "w")
f5 = open(output_files[5], "w")
matching_DHP_and_write_into_files(sem_pos_file, sem_pos_processed_file, patterns, f0, f1, f2, f3, f4, f5, "positive")
matching_DHP_and_write_into_files(neg_samples_file, neg_samples_processed_file, patterns, f0, f1, f2, f3, f4, f5, "negative",)
f0.close()
f1.close()
f2.close()
f3.close()
f4.close()
f5.close()
def matching_DHP_and_write_into_files(file, processed_file, patterns, f0, f1, f2, f3, f4, f5, label):
sentences = open(file).readlines()
i = 0
for parsed_sentence in cf.get_sentences(processed_file):
sentence = sentences[i]
print i
i += 1
res = DHP_matching(parsed_sentence, sentence)
if res[0]:
if res[2][0] == patterns[0]:
cf.write_sentence_matching_result_into_file(f0, res[3], res[1], label)
elif res[2][0] == patterns[1]:
cf.write_sentence_matching_result_into_file(f1, res[3], res[1], label)
elif res[2][0] == patterns[2]:
cf.write_sentence_matching_result_into_file(f2, res[3], res[1], label)
elif res[2][0] == patterns[3]:
cf.write_sentence_matching_result_into_file(f3, res[3], res[1], label)
elif res[2][0] == patterns[4]:
cf.write_sentence_matching_result_into_file(f4, res[3], res[1], label)
elif res[2][0] == patterns[5]:
cf.write_sentence_matching_result_into_file(f5, res[3], res[1], label)
if __name__ == '__main__':
main()
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,821
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/common/core_functions.py
|
from nltk import word_tokenize
from nltk import pos_tag
from nltk.corpus import wordnet as wn
from nltk import WordNetLemmatizer
from common import HyperHypoCouple as HH
import spacy
import gzip
import shutil
import parsed_sentence as ps
nlp = spacy.load('en_core_web_sm')
from spacy.lang.en.stop_words import STOP_WORDS
stopWords = set(STOP_WORDS)
lemma = WordNetLemmatizer()
def write_sentence_matching_result_into_file(f, ann_sent, couples, label):
f.write("<s>\n")
f.write(ann_sent + "\n")
f.write(str(couples) + "\n")
f.write("Label: " + label + "\n")
f.write("</s>\n")
def check_extracted_couples(extracted_couples, dataset_path):
dataset_couples = get_couples(dataset_path)
for extC in extracted_couples:
if extC in dataset_couples:
return True, "dataset"
if check_wordNet_hypernymy(extC.hyponym, extC.hypernym):
return True, "wordNet"
lemma_extC = HH.HHCouple(HeadWithLemma(extC.hyponym), HeadWithLemma(extC.hypernym))
if lemma_extC in dataset_couples:
return True, "dataset"
if check_wordNet_hypernymy(lemma_extC.hyponym, lemma_extC.hypernym):
return True, "wordNet"
if check_structural_hypernym_relation(extC.hyponym, extC.hypernym):
return True, "structural"
return False, "None"
def check_structural_hypernym_relation(hypo, hyper):
if len(hypo.split()) == 1 or len(hypo) <= len(hyper) or not str(hypo + " ").endswith(" " + hyper + " "):
return False
tokens = word_tokenize(hypo)
tags = pos_tag(tokens)
print tags
hypo2 = str(hypo).replace(" " + hyper, "")
hypos = hypo2.split()
for tag in tags:
if tag[0] == hypos[len(hypos) - 1] and (tag[1].__contains__("NN") or tag[1].__contains__("JJ")):
return True
return False
def check_wordNet_hypernymy(hypo, hyper):
hypos = wn.synsets(hypo)
if len(hypos) == 0:
return False
hypo = hypos[0]
hypers = set([i for i in hypo.closure(lambda s:s.hypernyms())])
hypers2 = wn.synsets(hyper)
if len(hypers2) == 0:
return False
hyper = hypers2[0]
if hyper in hypers:
return True
else:
return False
def get_couples_from_string(couples_string): #[(sonatas, works), (symphonies, works)]
"""
get a string of couples and return them as list of HH couples
:param couples_string: the string representing a list of couples
:return: HH couples list
"""
couples = []
couples_temp = couples_string.replace("[", "").replace("]", "").split("),")
for co in couples_temp:
hypo, hyper = str(co).replace("(", "").replace(")", "").split(",")
hh = HH.HHCouple(hypo.strip(), hyper.strip())
couples.append(hh)
return couples
def get_result_sentences(result_file):
"""
Returns all the content of a matched corpus file
:param result_file: the processed corpus file (.gz)
:return: the next sentence result (yield)
"""
sent = ps.parsed_sentence()
# Read all the sentences in the file
with open(result_file, 'r') as f_in:
i = 0
ann_sent = ""
couples = []
label = ""
predicted = ""
predicted_by = ""
for line in f_in:
line = line.decode('ISO-8859-2')
# Ignore start and end of doc
if '<s>' in line:
i += 1
continue
# End of sentence
elif '</s>' in line:
yield ann_sent, couples, label, predicted, predicted_by
i = 0
ann_sent = ""
couples = []
label = ""
predicted = ""
predicted_by = ""
else:
if i == 1:
ann_sent = line
elif i == 2:
couples = get_couples_from_string(line)
elif i == 3:
label = line.split(":")[1].strip()
elif i == 4:
predicted = line.split(":")[1].strip()
elif i == 5:
predicted_by = line.split(":")[1].strip()
i += 1
def get_sentences(corpus_file):
"""
Returns all the (content) sentences in a processed corpus file
:param corpus_file: the processed corpus file (may be compressed or not)
:return: the next sentence (yield)
"""
sent = ps.parsed_sentence()
# Read all the sentences in the file
if str(corpus_file).endswith(".gz"):
f_in = gzip.open(corpus_file, 'r')
elif str(corpus_file).endswith(".txt"):
f_in = open(corpus_file, 'r')
else:
print "wrong input file."
# with gzip.open(corpus_file, 'r') as f_in:
s = []
isNP = False
is_root = False
root = ""
ri = 0
np = ""
np_indexes = []
for line in f_in:
line = line.decode('ISO-8859-2')
# Ignore start and end of doc
if '<text' in line or '</Text' in line or '<s>' in line:
continue
# End of sentence
elif '</s>' in line:
yield sent
s = []
isNP = False
is_root = False
root = ""
ri = 0
np = ""
np_indexes = []
sent = ps.parsed_sentence()
elif '<NP>' in line:
isNP = True
elif '</NP>' in line:
isNP = False
sent.add_NP(np.strip(), root, ri, min(np_indexes), max(np_indexes))
np = ""
np_indexes = []
elif '<root>' in line:
is_root = True
elif '</root>' in line:
is_root = False
else:
try:
word, lemma, pos, index, parent, parent_index, dep, type = line.split("\t")
if is_root:
root = word
ri = int(index)
if isNP:
np_indexes.append(int(index))
np = np + " " + word
sent.add_word(word, lemma, pos, int(index), parent, int(parent_index), dep, type.strip())
# One of the items is a space - ignore this token
except Exception, e:
print str(e)
continue
def remove_first_occurrences_stopwords(text):
"""
:param text: text string
:return: the text after removing the first occurrences of stop words in the text
"""
if text == "":
return text
words = text.split()
if words[0] in stopWords:
text = str(" " + text + " ").replace(" " + words[0] + " ", "").strip()
return remove_first_occurrences_stopwords(text)
else:
return text
def noun_phrase_chunker(sentence):
"""
:param sentence: a sentence string
:return: a list of sentence noun phrases
"""
nps = []
sentParsing = nlp(sentence.decode("ascii", "ignore"))
for chunk in sentParsing.noun_chunks:
np = chunk.text.lower().encode("ascii", "ignore")
np = remove_first_occurrences_stopwords(np)
nps.append(np)
return nps
def label_sentence(sentence, couples, min_gap = 1, max_gap = 10):
"""
:param sentence: a sentence string
:param couples: list of dataset HH-couples
:param min_gap: the minimum gap between the index of occurrence of hypernym and hyponym (default = 1)
:param max_gap: the maximum gap between the index of occurrence of hypernym and hyponym (default = 7)
:return: tuple (occur boolean, annotated sentence), the occur boolean is true if any of the couple occur at the sentence
"""
nps = noun_phrase_chunker(sentence)
nps.sort(key=lambda s: len(s), reverse=True)
sentence1 = " " + sentence
for np in nps:
np_ann = str(np).replace(" ", "_") + "_np"
sentence1 = sentence1.lower().replace(" " + np + " ", " " + np_ann + " ")
for hh in couples:
hypo = hh.hyponym
hyper = hh.hypernym
if hypo.lower() in nps and hyper.lower() in nps:
hypo_np = str(hypo).replace(" ", "_") + "_np"
hypo2 = hypo_np.replace("_np", "_hypo")
sentence2 = str(sentence1).replace(" " + hypo_np + " ", " " + hypo2 + " ")
hyper_np = str(hyper).replace(" ", "_") + "_np"
hyper2 = hyper_np.replace("_np", "_hyper")
sentence3 = str(sentence2).replace(" " + hyper_np + " ", " " + hyper2 + " ")
hypoIndexes = get_indexes(sentence3, hypo2)
hyperIndexes = get_indexes(sentence3, hyper2)
for index1 in hypoIndexes:
for index2 in hyperIndexes:
if abs(index2 - index1) > min_gap and abs(index2 - index1) <= max_gap:
for np in nps:
np_ann = str(np).replace(" ", "_") + "_np"
sentence3 = sentence3.replace(" " + np_ann + " ", " " + np + " ")
return True, sentence3.strip()
return False, sentence.strip()
def get_indexes(sentence, token):
"""
:param sentence: a string sentence
:param token: a string token (such as word)
:return: a list of all indexes where the token occurs in the sentence
"""
tokens = word_tokenize(sentence)
indexes = []
while True:
try:
ind = tokens.index(token)
indexes.append(ind)
tokens[ind] = "_"
except:
break
return indexes
def get_couples(datasetPath):
"""
:param datasetPath: dataset file path (dataset format --> hyponym\thypernym\n)
:return: return a list of dataset HH-couples
"""
couples = []
with open(datasetPath, "rb") as f:
for line in f:
hypo, hyper = line.split("\t")
hh = HH.HHCouple(hypo.strip(), hyper.strip())
couples.append(hh)
f.close()
return couples
def compressFile(output_file):
with open(output_file, 'rb') as f_in, gzip.open(output_file + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def HeadWithLemma(couple_term):
if len(str(couple_term).split(" ")) == 1:
try:
hyper = lemma.lemmatize(couple_term)
except:
print "exception"
return hyper
nn = 0
text = word_tokenize(couple_term)
tags = pos_tag(text)
allTags = [tag[1] for tag in tags]
ConjFlag = False
if "CC" in allTags:
ConjFlag = True
i = 0
word = ""
for tag in tags:
if str(tag[1]).__eq__("IN") or (ConjFlag and str(tag[1]).__eq__(",")) or (ConjFlag and str(tag[1]).__eq__("CC")):
break
if str(tag[1]).__contains__("NN"):
word = tag[0]
nn += 1
try:
word = lemma.lemmatize(word)
except:
print "exception"
if word == "":
return couple_term
return word
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,822
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/dependency_Hearst_patterns/evaluate.py
|
from os import listdir
from os.path import isfile, join
from common import evaluation as ev
def main():
"""
Goal: evaluate precision, recall, F-measure of DHP
inputs:
-res_files_directory: a directory path for the DHP matching result files
-sem_pos_labeled_corpus: a file path for the semantically positive labeled sentences (all positive used to evaluate recall)
"""
res_files_directory = r"..\matching_DHP_subcorpora"
sem_pos_labeled_corpus = r"..\labeled_corpus\Music_Test_Sem_Pos.txt"
precision, recall, f_measure = ev.evaluate(res_files_directory, sem_pos_labeled_corpus)
print "DHP evaluation:"
print "precision : " + str(precision)
print "recall : " + str(recall)
print "F-measure : " + str(f_measure)
if __name__ == '__main__':
main()
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,823
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/common/HyperHypoCouple.py
|
class HHCouple:
def __init__(self, hypo, hyper):
self.hypernym = hyper
self.hyponym = hypo
def __repr__(self):
return str(self)
def __str__(self):
return "(" + self.hyponym + ", " + self.hypernym + ")"
def __eq__(self, other):
return self.hypernym == other.hypernym and self.hyponym == other.hyponym
def __ne__(self, other):
"""Override the default Unequal behavior"""
return self.hypernym != other.hypernym or self.hyponym != other.hyponym
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,824
|
AhmadIssaAlaa/SequentialHearstPatterns
|
refs/heads/master
|
/corpus_labeling/sentence_labeling_pos_neg.py
|
from nltk import word_tokenize
from nltk import WordNetLemmatizer
from spacy.lang.en import English
nlp = English()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
from spacy.lang.en.stop_words import STOP_WORDS
stopWords = set(STOP_WORDS)
lemma = WordNetLemmatizer()
from common import core_functions as cf
def main():
"""
Goal: Take a list of corpus text files and label the sentences as positive and negative according to a dataset
after filtering the sentences that contains number of tokens above N
inputs:
-corpusFilesInput: A list of paths for corpus text files
-posSentOutputFile: an output file path for positive labeled sentences
-negSentOutputFile: an output file path for negative labeled sentences
-datasetFilePath: a dataset file path
-minTokens: minimum number of tokens in a sentence
-maxTokens: maximum number of tokens in a sentence
"""
#inputs
corpusFilesInput = [r"E:\SemEvalData\SemEval18-Task9\corpuses\2B_music_bioreviews_tokenized_Training.txt",
r"E:\SemEvalData\SemEval18-Task9\corpuses\2B_music_bioreviews_tokenized_Testing.txt"]
posSentOutputFile = r"..\labeled_corpus\Music_Pos.txt"
negSentOutputFile = r"..\labeled_corpus\Music_Neg.txt"
datasetFilePath = r"..\datasets\Music.txt"
minTokens = 5
maxTokens = 50
#get dataset couples
couples = cf.get_couples(datasetFilePath)
#open output files
ofp = open(posSentOutputFile, "wb")
ofn = open(negSentOutputFile, "wb")
#process each corpus file
for cFile in corpusFilesInput:
with open(cFile, "rb") as f:
i = 0
for line in f:
line = line.decode("ascii", "ignore")
i += 1
print i
sentences = nlp(line.decode("ascii", "ignore"))
for sentence in sentences.sents:
sentence = sentence.string.strip()
tokens = word_tokenize(sentence)
if len(tokens) < minTokens or len(tokens) > maxTokens:
continue
else:
label, resSent = cf.label_sentence(sentence, couples)
if label:
ofp.write(resSent.encode("ascii", "ignore").strip()+"\n")
else:
ofn.write(resSent.encode("ascii", "ignore").strip()+"\n")
ofp.close()
ofn.close()
f.close()
if __name__ == '__main__':
main()
|
{"/dependency_Hearst_patterns/DHP.py": ["/common/core_functions.py"]}
|
5,863
|
alexarirok/Flask-Project
|
refs/heads/master
|
/config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'secret_key'
SQLALCHEMY_DATABASE_URI = "postgresql://alex:postgres@localhost/store"
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,864
|
alexarirok/Flask-Project
|
refs/heads/master
|
/migrations/versions/7a336e26d0b1_.py
|
"""empty message
Revision ID: 7a336e26d0b1
Revises: b47c79195e47
Create Date: 2019-07-30 11:15:04.320630
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7a336e26d0b1'
down_revision = 'b47c79195e47'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('parcel_name', sa.String(length=100), nullable=True),
sa.Column('parcel_number', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('parcel_number')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=40), nullable=True),
sa.Column('password', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.drop_table('item')
op.drop_table('user')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('email', sa.VARCHAR(length=40), autoincrement=False, nullable=True),
sa.Column('password', sa.VARCHAR(length=60), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='user_pkey'),
sa.UniqueConstraint('email', name='user_email_key')
)
op.create_table('item',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('parcel_name', sa.VARCHAR(length=100), autoincrement=False, nullable=True),
sa.Column('parcel_number', sa.VARCHAR(length=100), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='item_pkey'),
sa.UniqueConstraint('parcel_number', name='item_parcel_number_key')
)
op.drop_table('users')
op.drop_table('items')
# ### end Alembic commands ###
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,865
|
alexarirok/Flask-Project
|
refs/heads/master
|
/app.py
|
from flask import Flask, render_template, Blueprint
from flask_restful import Api
from resources.Hello import Hello
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
api_bp = Blueprint('api', __name__)
api = Api(api_bp)
api.add_resource(Hello, '/Hello')
app = Flask(__name__)
app.config['SECRET_KEY'] = 'helloworld'
app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://alex:postgres@localhost/store"
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
login_manager = LoginManager(app)
# if __name__ == "__main__":
# app.run(debug=True)
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,866
|
alexarirok/Flask-Project
|
refs/heads/master
|
/migrations/versions/90f79c1e9d89_.py
|
"""empty message
Revision ID: 90f79c1e9d89
Revises: 7a336e26d0b1
Create Date: 2019-07-30 14:26:00.074882
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '90f79c1e9d89'
down_revision = '7a336e26d0b1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('items', sa.Column('parcel_id', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('items', 'parcel_id')
# ### end Alembic commands ###
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,867
|
alexarirok/Flask-Project
|
refs/heads/master
|
/resources/form.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextField
from wtforms.validators import DataRequired, ValidationError, Length, Email, EqualTo
from models import User, Order
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=4, max=40)])
password = PasswordField('Password', validators=[DataRequired(), Length(min=4)])
remember = BooleanField('Remember_Me')
submit = SubmitField('Login')
class SignupForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=4, max=40)])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('This email is already taken. Please try another one.')
class OrderForm(FlaskForm):
parcel_name = StringField('Parcel_Name', validators=[DataRequired(), Length(min=2, max=255)])
parcel_number = StringField('Parcel_Number', validators=[DataRequired(), Length(min=4)])
submit = SubmitField('Place Order')
# def validate_parcel(self, order):
# order = Order.query.filter_by(parcel=parcel.data).first()
class ContactForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=2, max=100)])
subject = TextField('Subject', validators=[DataRequired])
message = TextField('Message', validators=[DataRequired])
submit = SubmitField('Send')
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,868
|
alexarirok/Flask-Project
|
refs/heads/master
|
/resources/auth.py
|
from flask import Blueprint, render_template, redirect, request, flash, url_for
from werkzeug.security import generate_password_hash, check_password_hash
from .form import LoginForm, SignupForm, OrderForm
from models import User, Order
from app import db, bcrypt
from flask_login import current_user, logout_user, login_user
auth = Blueprint('auth', __name__)
@auth.route('/login', methods=['POST', 'GET'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:# and bcrypt.check_password_hash(form.password.data, user.password):
login_user(user, remember=form.remember.data)
return redirect(url_for('main.index'))
else:
flash('Login unsuccessfully, Please check your email and password', 'danger')
return render_template("login.html", title="login", form=form)
@auth.route('/signup', methods=['POST', 'GET'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = SignupForm(request.form)
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data)
user = User(email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash(f"Thanks for sining up {form.email.data}", "success")
print ("Sucessfully")
return redirect(url_for('auth.login'))
return render_template('signup.html', form=form)
@auth.route('/logout')
def logout():
logout_user()
return redirect(url_for("auth.login"))
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,869
|
alexarirok/Flask-Project
|
refs/heads/master
|
/db.py
|
# from flask_marshmallow import Marshmallow
# from flask_sqlalchemy import SQLAlchemy
# from flask import Flask
# app = Flask(__name__)
# ma = Marshmallow()
# db = SQLAlchemy(app)
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,870
|
alexarirok/Flask-Project
|
refs/heads/master
|
/migrations/versions/8c2ec4d38e0a_.py
|
"""empty message
Revision ID: 8c2ec4d38e0a
Revises:
Create Date: 2019-07-29 12:12:03.568584
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8c2ec4d38e0a'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('parcel',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('parcel_name', sa.String(length=100), nullable=True),
sa.Column('parcel_number', sa.String(length=100), nullable=True),
sa.Column('pickup_destination', sa.String(length=100), nullable=True),
sa.Column('delivery_destination', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('parcel_number')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=40), nullable=True),
sa.Column('password', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
op.drop_table('parcel')
# ### end Alembic commands ###
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,871
|
alexarirok/Flask-Project
|
refs/heads/master
|
/notes/notes.py
|
{{ form.hidden_tag() }}
<fieldset>
<legend>Sign Up</legend>
<div>
{{ form.email.label(class="form-control-label") }}
{% if form.email.errors %}
{{ form.email(class="form-control form-control-lg is-invalid") }}
<div class="is-invalid-feedback">
{% for error in form.email.errors %}
<span> {{ error }}</span>
{% endfor %}
</div>
{% else %}
{{ form.email(class="form-control form-control-lg") }}
{% endif %}
</div>
</fieldset>
</form>
</div>
{% endblock %}
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,872
|
alexarirok/Flask-Project
|
refs/heads/master
|
/resources/main.py
|
from flask import Blueprint, render_template, redirect, request, flash, url_for
from .form import OrderForm, ContactForm
from models import Order
from flask_mail import Message, Mail
from app import db
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template("index.html")
@main.route('/user_change')
def user_change():
return render_template("user_change.html")
@main.route('/cancelorder')
def cancelorder():
return render_template("cancelorder.html")
@main.route('/contactform', methods=['POST', 'GET'])
def contactform():
form = ContactForm(request.form)
if request.method == 'POST':
if form.validate() == False:
flash(f"All fields are required.")
return render_template('contactform.html', form=form)
else:
msg = Message(subject=form.subject.data, sender=form.email.data, recipients=["akorir233@gmail.com"])
msg.body = "Thanks your message has been recieved. We will get back to you shortly"
# (form.name.data, form.email.data, form.message.data)
# mail.send(msg)
return redirect(url_for("main.index"))
elif request.method == 'GET':
return render_template("contactform.html", form=form)
@main.route('/order', methods=['POST', 'GET'])
def order():
form = OrderForm(request.form)
if request.method == 'POST':
parcel_name = request.form.get('parcel_name')
parcel_number = request.form.get('parcel_number')
order = Order(parcel_name=form.parcel_name.data, parcel_number=form.parcel_number.data)
db.session.add(order)
db.session.commit()
flash(f"Parcel ordered succesfully")
return redirect(url_for('main.order'))
return render_template('order.html', form=form)
@main.route('/orders', methods=['GET'])
def orders():
if request.method == 'GET':
order = Order.query.all()
return render_template('order_items.html', order=order)
@main.route('/items')
def status():
return render_template("status.html")
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,873
|
alexarirok/Flask-Project
|
refs/heads/master
|
/models.py
|
from app import db, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get((user_id))
class User(db.Model, UserMixin):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String(40), unique = True)
password = db.Column(db.String(40))
def __repr__(self):
return f"{self.email}"
def self_to_db(self):
db.session.add(self)
db.session.commit()
class Order(db.Model):
__tablename__ = "items"
id = db.Column(db.Integer, primary_key = True)
parcel_name = db.Column(db.String(100))
parcel_number = db.Column(db.String(100), unique = True)
def __repr__(self):
return f"{self.parcel_name}"
def self_to_db(self):
db.session.add(self)
db.session.commit()
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,874
|
alexarirok/Flask-Project
|
refs/heads/master
|
/migrations/versions/b47c79195e47_.py
|
"""empty message
Revision ID: b47c79195e47
Revises: 8c2ec4d38e0a
Create Date: 2019-07-29 17:46:14.435052
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b47c79195e47'
down_revision = '8c2ec4d38e0a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('parcel_name', sa.String(length=100), nullable=True),
sa.Column('parcel_number', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('parcel_number')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('item')
# ### end Alembic commands ###
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,875
|
alexarirok/Flask-Project
|
refs/heads/master
|
/run.py
|
from flask import Flask
from app import db, bcrypt
from flask_login import LoginManager
from models import User
from meinheld import server
from flask_mail import Message, Mail
mail = Mail()
def create_app(config_filename):
app = Flask(__name__)
#db = SQLAlchemy(app)
db.init_app(app)
app.config.from_object(config_filename)
bcrypt.init_app(app)
app.config["MAIL_SERVER"] = "smtp.gmail.com"
app.config["MAIL_PORT"] = 465
app.config["MAIL_USE_SSL"] = True
app.config["MAIL_USERNAME"] = "akorir233@gmail.com"
app.config["MAIL-PASSWORD"] = "Alex1920$$"
mail.init_app(app)
from app import api_bp
app.register_blueprint(api_bp, url_prefix='/api')
from resources.main import main as main_blueprint
app.register_blueprint(main_blueprint)
from resources.auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
login_manager = LoginManager(app)
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return User.query.get((user_id))
#from models import db
#db.init_(app)
return app
if __name__ == "__main__":
app = create_app("config")
app.run(debug=True, port=5001, threaded=True)
|
{"/resources/form.py": ["/models.py"], "/resources/auth.py": ["/resources/form.py", "/models.py", "/app.py"], "/resources/main.py": ["/resources/form.py", "/models.py", "/app.py"], "/models.py": ["/app.py"], "/run.py": ["/app.py", "/models.py", "/resources/main.py", "/resources/auth.py"]}
|
5,895
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/models.py
|
from django.db import models
class Topping(models.Model):
title = models.CharField(max_length=255, blank=False, null=False)
def __str__(self):
return self.title
class Pizza(models.Model):
title = models.CharField(max_length=255, blank=False, null=False)
image = models.FileField(upload_to="uploads", blank=True, null=True)
toppings = models.ManyToManyField(Topping, blank=True)
price = models.DecimalField(max_digits=4, decimal_places=2)
def __str__(self):
return self.title
class Order(models.Model):
name = models.CharField(max_length=255, blank=False, null=False)
address = models.CharField(max_length=255, blank=False, null=False)
phone = models.CharField(max_length=255, blank=False, null=False)
email = models.CharField(max_length=255, blank=False, null=False)
pizza = models.ForeignKey(Pizza, on_delete=models.CASCADE)
quantity = models.IntegerField(blank=False, null=False)
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,896
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/sqs.py
|
from djangosqs.apps.website.pdf import Pdf
from djangosqs.apps.website.postmark import Postmark
from djangosqs.settings import DEFAULT_FROM_EMAIL
import boto3
import json
import time
import typing as t
class Sqs:
def __init__(
self,
region_name: str,
queue_name: str,
dl_queue_name: str,
template_id: str = "",
delay_seconds: int = 0,
visibility_timeout: int = 20,
max_receive_count: int = 5,
wait_seconds: int = 20,
sleep_seconds: int = 5,
) -> None:
self.region_name = region_name
self.queue_name = queue_name
self.dl_queue_name = dl_queue_name
self.template_id = template_id
self.delay_seconds = delay_seconds
self.delay_seconds_str = str(delay_seconds)
self.visibility_timeout = visibility_timeout
self.max_receive_count = max_receive_count
self.wait_seconds = wait_seconds
self.wait_seconds_str = str(wait_seconds)
self.sleep_seconds = sleep_seconds
sqs = boto3.resource("sqs", region_name=self.region_name)
dl_queue_attributes = {"DelaySeconds": self.delay_seconds_str}
sqs.create_queue(QueueName=self.dl_queue_name, Attributes=dl_queue_attributes)
dl_queue = sqs.get_queue_by_name(QueueName=self.dl_queue_name)
dl_queue_arn = dl_queue.attributes["QueueArn"]
redrive_policy = {
"deadLetterTargetArn": dl_queue_arn,
"maxReceiveCount": self.max_receive_count,
}
queue_attributes = {
"DelaySeconds": self.delay_seconds_str,
"ReceiveMessageWaitTimeSeconds": self.wait_seconds_str,
"RedrivePolicy": json.dumps(redrive_policy),
}
self.queue = sqs.create_queue(
QueueName=self.queue_name, Attributes=queue_attributes
)
self.client = boto3.client("sqs", region_name=self.region_name)
def get_queue(self):
return self.queue
def get_client(self):
return self.client
def send_message(
self, message_body: t.Dict[str, t.Union[str, t.Dict[str, str]]]
) -> t.Dict[str, t.Union[str, t.Dict[str, t.Union[int, str, t.Dict[str, str]]]]]:
body = json.dumps(message_body, sort_keys=True)
response = self.client.send_message(
QueueUrl=self.queue.url, MessageBody=body, DelaySeconds=self.delay_seconds
)
return response
def process_queue(self) -> None:
response = self.client.receive_message(
QueueUrl=self.queue.url,
AttributeNames=["SentTimestamp"],
MaxNumberOfMessages=1,
MessageAttributeNames=["All"],
VisibilityTimeout=self.visibility_timeout,
WaitTimeSeconds=self.wait_seconds,
)
if response and "Messages" in response:
response_message = response["Messages"][0]
message_id = response_message["MessageId"]
receipt_handle = response_message["ReceiptHandle"]
message = json.loads(response_message["Body"])
success = self.process_message(message)
if success:
print("Message {} processed.".format(message_id))
self.client.delete_message(
QueueUrl=self.queue.url, ReceiptHandle=receipt_handle
)
else:
print("There was an error with message {}.".format(message_id))
time.sleep(self.sleep_seconds)
def process_message(self, message: dict) -> bool:
pdf = Pdf()
message["action_url"] = pdf.receipt(message)
postmark = Postmark(
subject="",
body="",
from_email=DEFAULT_FROM_EMAIL,
to=[message["to"]],
template_id=self.template_id,
data=message,
)
num_sent = postmark.send()
return num_sent > 0
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,897
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/views.py
|
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views.generic import TemplateView
from djangosqs.apps.website.forms import OrderForm
from djangosqs.apps.website.models import Order
from djangosqs.apps.website.models import Pizza
from djangosqs.apps.website.sqs import Sqs
from djangosqs.settings import MICRO_CONFIG
from djangosqs.settings import TEMPLATE_ID
import datetime
class HomeView(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context["page_title"] = "Order a pizza"
context["pizzas"] = Pizza.objects.all().order_by("id")
return context
class OrderView(TemplateView):
template_name = "order.html"
def get_context_data(self, **kwargs):
context = super(OrderView, self).get_context_data(**kwargs)
context["page_title"] = "Order a pizza"
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data()
pizza = Pizza.objects.get(pk=1)
if "pizza" in kwargs:
id = int(kwargs["pizza"])
pizza = Pizza.objects.get(pk=id)
context["form"] = OrderForm(None, initial={"pizza": pizza})
return super(OrderView, self).render_to_response(context)
def post(self, request, *args, **kwargs):
context = self.get_context_data()
form = OrderForm(self.request.POST)
if form.is_valid():
order = form.save()
details = []
quantity = order.quantity
while quantity > 0:
details.append(
{
"description": order.pizza.title,
"amount": "{} EUR".format(order.pizza.price),
}
)
quantity -= 1
total = order.pizza.price * order.quantity
message_body = {
"to": order.email,
"name": order.name,
"product_name": "Order a Pizza",
"receipt_id": "#{}".format(str(order.id).zfill(4)),
"date": datetime.date.today().strftime("%B %d, %Y"),
"receipt_details": details,
"total": "{} EUR".format(total),
"image": "{}".format(order.pizza.image),
"action_url": "",
}
region_name = str(MICRO_CONFIG["REGION_NAME"])
queue_name = str(MICRO_CONFIG["STANDARD_QUEUE"])
dl_queue_name = str(MICRO_CONFIG["DL_QUEUE"])
sqs = Sqs(
region_name=region_name,
queue_name=queue_name,
dl_queue_name=dl_queue_name,
template_id=TEMPLATE_ID,
)
sqs.send_message(message_body)
return HttpResponseRedirect(reverse("website:orders"))
return super(OrderView, self).render_to_response(context)
class OrdersView(TemplateView):
template_name = "orders.html"
def get_context_data(self, **kwargs):
context = super(OrdersView, self).get_context_data(**kwargs)
context["page_title"] = "Pizza Orders"
context["orders"] = Order.objects.all().order_by("id")
return context
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,898
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/postmark.py
|
from django.core.mail import EmailMessage
class Postmark(EmailMessage):
def __init__(
self,
subject: str = "",
body="",
from_email=None,
to=None,
bcc=None,
connection=None,
attachments=None,
headers=None,
cc=None,
reply_to=None,
template_id="",
data={},
):
self.template_id = template_id
self.merge_global_data = data
super(Postmark, self).__init__(
subject,
body,
from_email,
to,
bcc,
connection,
attachments,
headers,
cc,
reply_to,
)
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,899
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/urls.py
|
from django.urls import path
from djangosqs.apps.website.views import HomeView
from djangosqs.apps.website.views import OrdersView
from djangosqs.apps.website.views import OrderView
app_name = "website"
urlpatterns = [
path("", HomeView.as_view(), name="home"),
path("order/", OrderView.as_view(), name="order"),
path("order/<int:pizza>/", OrderView.as_view(), name="order"),
path("orders/", OrdersView.as_view(), name="orders"),
]
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,900
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/admin.py
|
from django.contrib import admin
from djangosqs.apps.website.models import Order
from djangosqs.apps.website.models import Pizza
from djangosqs.apps.website.models import Topping
class ToppingAdmin(admin.ModelAdmin):
list_display = ("title",)
admin.site.register(Topping, ToppingAdmin)
class PizzaAdmin(admin.ModelAdmin):
list_display = ("title", "image", "price")
admin.site.register(Pizza, PizzaAdmin)
class OrderAdmin(admin.ModelAdmin):
list_display = ("name", "address", "phone", "email", "pizza", "quantity")
admin.site.register(Order, OrderAdmin)
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,901
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/forms.py
|
from django import forms
from djangosqs.apps.website.models import Order
from djangosqs.apps.website.models import Pizza
class RequestForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request", None)
super(RequestForm, self).__init__(*args, **kwargs)
class OrderForm(RequestForm):
name = forms.CharField(
widget=forms.TextInput(
attrs={"class": "form-control", "placeholder": "Your Name"}
)
)
address = forms.CharField(
widget=forms.TextInput(
attrs={"class": "form-control", "placeholder": "Your Address"}
)
)
phone = forms.CharField(
widget=forms.TextInput(
attrs={"class": "form-control", "placeholder": "Your Phone"}
)
)
email = forms.CharField(
widget=forms.TextInput(
attrs={"class": "form-control", "placeholder": "Your Email"}
)
)
CHOICES = ((1, "1 pizza"), (2, "2 pizzas"), (3, "3 pizzas"))
quantity = forms.ChoiceField(
choices=CHOICES,
initial="1",
widget=forms.Select(attrs={"class": "form-control"}),
)
pizza = forms.ModelChoiceField(
queryset=Pizza.objects.all(),
widget=forms.Select(attrs={"class": "form-control"}),
)
class Meta:
model = Order
fields = ["name", "address", "phone", "email", "quantity", "pizza"]
def __init__(self, *args, **kwargs):
super(OrderForm, self).__init__(*args, **kwargs)
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,902
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/tests/test_py.py
|
import pytest
def test_py():
assert 1 + 1 == 2, "1 + 1 should equal 2"
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,903
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/management/commands/sqsrunner.py
|
from django.core.management.base import BaseCommand
from djangosqs.apps.website.sqs import Sqs
from djangosqs.settings import MICRO_CONFIG
from djangosqs.settings import TEMPLATE_ID
class Command(BaseCommand):
help = "SQS Runner"
def handle(self, *args, **options):
print("========================")
region_name = str(MICRO_CONFIG["REGION_NAME"])
queue_name = str(MICRO_CONFIG["STANDARD_QUEUE"])
dl_queue_name = str(MICRO_CONFIG["DL_QUEUE"])
sqs = Sqs(
region_name=region_name,
queue_name=queue_name,
dl_queue_name=dl_queue_name,
template_id=TEMPLATE_ID,
)
while True:
sqs.process_queue()
print("========================")
print("Done!")
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,904
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/pdf.py
|
from djangosqs import settings
from reportlab.lib.enums import TA_CENTER
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Image
from reportlab.platypus import Paragraph
from reportlab.platypus import SimpleDocTemplate
from reportlab.platypus import Spacer
import hashlib
class Pdf:
receipt_url = None
def __init__(self):
font = "{}/fonts/{}".format(settings.STATIC_ROOT, "RobotoSlab-Regular.ttf")
pdfmetrics.registerFont(TTFont("roboto", font))
def receipt(self, message):
receipt = []
styles = getSampleStyleSheet()
styles.add(
ParagraphStyle(
name="pizza-title",
fontName="roboto",
fontSize=16,
leading=18,
alignment=TA_CENTER,
)
)
styles.add(
ParagraphStyle(
name="pizza-center",
fontName="roboto",
fontSize=10,
leading=12,
alignment=TA_CENTER,
)
)
styles.add(
ParagraphStyle(
name="pizza-normal",
fontName="roboto",
fontSize=10,
leading=14,
alignment=TA_JUSTIFY,
)
)
text = "Receipt {}".format(message["receipt_id"])
receipt.append(Paragraph(text, styles["pizza-title"]))
receipt.append(Spacer(1, 25))
text = "Thanks for using {}. This PDF is the receipt for your purchase. No payment is due.".format(
message["product_name"]
)
receipt.append(Paragraph(text, styles["pizza-center"]))
receipt.append(Spacer(1, 25))
pizza_image = "{}/{}".format(settings.MEDIA_ROOT, message["image"])
receipt.append(Image(pizza_image))
receipt.append(Spacer(1, 25))
hash_object = hashlib.md5(message["receipt_id"].encode())
file_name = "receipt/{}.pdf".format(hash_object.hexdigest())
doc = SimpleDocTemplate(
"{}/{}".format(settings.MEDIA_ROOT, file_name),
pagesize=A4,
rightMargin=40,
leftMargin=40,
topMargin=150,
bottomMargin=0,
)
doc.build(receipt)
self.receipt_url = "http://127.0.0.1:8000/media/{}".format(file_name)
return self.receipt_url
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,905
|
codehutlabs/django_sqs
|
refs/heads/master
|
/djangosqs/apps/website/migrations/0001_initial.py
|
# Generated by Django 2.2.4 on 2019-08-29 13:44
from django.db import migrations
from django.db import models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Topping",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name="Pizza",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=255)),
("image", models.FileField(blank=True, null=True, upload_to="uploads")),
("price", models.DecimalField(decimal_places=2, max_digits=4)),
("toppings", models.ManyToManyField(blank=True, to="website.Topping")),
],
),
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("address", models.CharField(max_length=255)),
("phone", models.CharField(max_length=255)),
("email", models.CharField(max_length=255)),
("quantity", models.IntegerField()),
(
"pizza",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="website.Pizza"
),
),
],
),
]
|
{"/djangosqs/apps/website/sqs.py": ["/djangosqs/apps/website/pdf.py", "/djangosqs/apps/website/postmark.py"], "/djangosqs/apps/website/views.py": ["/djangosqs/apps/website/forms.py", "/djangosqs/apps/website/models.py", "/djangosqs/apps/website/sqs.py"], "/djangosqs/apps/website/urls.py": ["/djangosqs/apps/website/views.py"], "/djangosqs/apps/website/admin.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/forms.py": ["/djangosqs/apps/website/models.py"], "/djangosqs/apps/website/management/commands/sqsrunner.py": ["/djangosqs/apps/website/sqs.py"]}
|
5,916
|
HQhalo/AI_PL_resolution
|
refs/heads/master
|
/main.py
|
from knowledge import *
def input(path):
f = open(path,"r")
lines = f.readlines()
alpha = lines[0][:-1]
N = int(lines[1])
PLs = lines[2:2+N]
for i in range(len(PLs)-1):
PLs[i] = PLs[i][:-1]
return alpha,knowledge(PLs)
def output(path,flag, PLs):
f = open(path,"w")
if PLs != None:
for i in PLs:
f.write(str(len(i))+"\n")
for j in i:
f.write(j+"\n")
f.write(flag)
if __name__ == "__main__":
alpha,KB = input("input.txt")
flag , PLs = KB.PL_resolution(alpha)
output("output.txt",flag , PLs)
|
{"/main.py": ["/knowledge.py"]}
|
5,917
|
HQhalo/AI_PL_resolution
|
refs/heads/master
|
/knowledge.py
|
class knowledge:
def __init__(self,PLs):
listPls = []
# split each line, add the line into list
for PL in PLs:
listPls.append(self.splitLine(PL))
# count words
self.charCount = self.countWords(listPls)
self.charCount.sort()
# create dictionary of word to store index in matrix
self.words = {}
self.NoWords = 0
for i in self.charCount:
self.words[i] = self.NoWords
self.NoWords +=1
self.matrix = []
for i in listPls:
self.matrix.append(self.sentenceToMatrix(i))
def splitLine(self,line):
tokens = line.split()
temp = []
for i in tokens:
if i != "OR":
temp.append(i)
return temp
def countWords(self, listPls):
charCount = []
for line in listPls:
for word in line:
temp = word
if word[0] == "-":
temp = temp[1:]
if not temp in charCount:
charCount.append(temp)
return charCount
def sentenceToMatrix(self,PL):
rowMatrix = [0 for x in range(self.NoWords)]
for word in PL:
temp = word
value = 1
if temp[0] == "-":
temp = temp[1:]
value = -1
if temp in self.words:
rowMatrix[self.words[temp]] = value
return rowMatrix
def toSentence(self,M):
if len(M) != self.NoWords:
print("err")
return None
else:
sen =""
for i in range(self.NoWords):
if M[i] == 1:
if sen != "":
sen+=" OR "
sen += self.charCount[i]
if M[i] == -1:
if sen != "":
sen+=" OR "
sen += "-"
sen += self.charCount[i]
if sen == "":
sen +="{}"
return sen
def resolutionHelper(self,PL1,PL2,index):
re = [0 for x in range(self.NoWords)]
if abs(PL1[index] - PL2[index]) != 2 :
return False,None
else:
for i in range(self.NoWords):
if i != index:
add = PL1[i] + PL2[i]
if add == 0:
if PL1[i] != 0:
return False, None
elif add == 2 :
re[i] = 1
elif add == -2:
re[i] = -1
else:
re[i] = add
return True, re
def resolution(self,PL1,PL2):
rel = []
flag = False
for i in range(self.NoWords):
f ,re = self.resolutionHelper(PL1,PL2,i)
if f == True :
flag = True
rel.append(re)
return rel
def combine(self,PL1,PL2):
added = []
if len(PL2) == 0:
return PL1,PL1
else:
rel = PL2.copy()
for i in PL1:
if not i in PL2 :
rel.append(i)
added.append(i)
return rel,added
def negative(self,alpha):
rel = []
for i in range(len(alpha)):
if alpha[i] != 0:
temp = [0 for x in range(self.NoWords)]
temp[i] = - alpha[i]
rel.append(temp)
return rel
def PL_resolution(self,alpha):
flag , rel = self.PL_resolutionHelper(alpha)
PLs = []
if rel != None:
for i in rel:
pl = []
for j in i:
pl .append(self.toSentence(j))
PLs.append(pl)
return flag,PLs
def PL_resolutionHelper(self,alpha):
alphaToken = self.splitLine(alpha)
rowAlpha = self.sentenceToMatrix(alphaToken)
rowAlphaNegative = self.negative(rowAlpha)
empty = [0 for x in range(self.NoWords)]
alphaMatrix = self.matrix.copy()
alphaMatrix += rowAlphaNegative
m = len(alphaMatrix)
newPLs = []
relPls = []
while True:
rel = []
for i in range(m):
for j in range(m):
temp = self.resolution(alphaMatrix[i],alphaMatrix[j])
for k in temp:
if not k in rel:
rel.append(k)
if len(rel) != 0:
newPLs,plM = self.combine(rel,newPLs)
alphaMatrix,added= self.combine(newPLs,alphaMatrix)
relPls.append(added)
else:
relPls.append([])
return "NO1",relPls
if empty in newPLs:
return "YES",relPls
if len(added) == 0:
return "NO2",relPls
m = len(alphaMatrix)
return 0
|
{"/main.py": ["/knowledge.py"]}
|
5,923
|
ushadeepp/Web3pyBlockExplorer
|
refs/heads/master
|
/Config/Config.py
|
class Config:
RpcConnection='HTTP://127.0.0.1:7545'
FrontEndWindowSize=5
|
{"/Controller/BlockController.py": ["/Config/Config.py", "/Service/BlockService.py", "/Service/TransactionService.py", "/Helper/helper.py"], "/Service/BlockService.py": ["/Helper/helper.py"], "/Service/TransactionService.py": ["/Helper/helper.py", "/Service/BlockService.py"]}
|
5,924
|
ushadeepp/Web3pyBlockExplorer
|
refs/heads/master
|
/Controller/BlockController.py
|
from flask import Flask
from flask_cors import CORS, cross_origin
import json
from Config.Config import *
from Service.BlockService import *
from Service.TransactionService import *
from Helper.helper import *
import web3
from web3 import Web3
from web3.contract import ConciseContract
from web3.middleware import geth_poa_middleware
app = Flask(__name__)
CORS(app, support_credentials=True)
w3 = Web3(Web3.HTTPProvider(Config.RpcConnection))
# w3.middleware_stack.inject(geth_poa_middleware, layer=0)
@app.route('/getBlock/<blockNumber>')
@cross_origin(supports_credentials=True)
def getBlockController(blockNumber):
result=getBlock(w3,int(blockNumber))
return toDict(result)
@app.route('/getBlockList/<startBlockNumber>/<endBlockNumber>')
@cross_origin(supports_credentials=True)
def getBlockListController(startBlockNumber,endBlockNumber):
return json.dumps(getBlockList(w3,int(startBlockNumber),int(endBlockNumber)))
@app.route('/getBlockListLatest')
@cross_origin(supports_credentials=True)
def getBlockListLatestController():
return json.dumps(getLatestBlockList(w3,Config.FrontEndWindowSize))
@app.route('/getTransaction/<txhash>')
@cross_origin(supports_credentials=True)
def getTransactionController(txhash):
# 0x2e01ef55de66c8d99c61b56e70da12dc442c23f83e8a6a9ed021b215e2467209
result=getTransaction(w3,txhash)
return toDict(result)
@app.route('/getTransactionList/<startBlock>/<offSet>')
@cross_origin(supports_credentials=True)
def getTransactionListController(startBlock,offSet):
app.logger.info('Controller hit')
# recentTransactionList(app,w3,2199491,0,5)
return json.dumps(recentTransactionList(app,w3,int(startBlock),int(offSet),Config.FrontEndWindowSize))
|
{"/Controller/BlockController.py": ["/Config/Config.py", "/Service/BlockService.py", "/Service/TransactionService.py", "/Helper/helper.py"], "/Service/BlockService.py": ["/Helper/helper.py"], "/Service/TransactionService.py": ["/Helper/helper.py", "/Service/BlockService.py"]}
|
5,925
|
ushadeepp/Web3pyBlockExplorer
|
refs/heads/master
|
/Helper/helper.py
|
def toDict(dictToParse):
# convert any 'AttributeDict' type found to 'dict'
parsedDict = dict(dictToParse)
for key, val in parsedDict.items():
if 'list' in str(type(val)):
parsedDict[key] = [_parseValue(x) for x in val]
else:
parsedDict[key] = _parseValue(val)
return parsedDict
def _parseValue(val):
# check for nested dict structures to iterate through
if 'dict' in str(type(val)).lower():
return toDict(val)
# convert 'HexBytes' type to 'str'
elif 'HexBytes' in str(type(val)):
return val.hex()
else:
return val
|
{"/Controller/BlockController.py": ["/Config/Config.py", "/Service/BlockService.py", "/Service/TransactionService.py", "/Helper/helper.py"], "/Service/BlockService.py": ["/Helper/helper.py"], "/Service/TransactionService.py": ["/Helper/helper.py", "/Service/BlockService.py"]}
|
5,926
|
ushadeepp/Web3pyBlockExplorer
|
refs/heads/master
|
/Service/BlockService.py
|
from Helper.helper import *
def getBlock(w3,blockNumber):
return w3.eth.getBlock(blockNumber)
# To get the transactions in blocks range
def getTransactionHashList(w3,startBlock,EndBlock):
transactionsHashList=[]
for i in range(startBlock,EndBlock):
currentBlock = w3.eth.getBlock(i)
transactionsHashList.extend(currentBlock['transactions'])
return transactionsHashList
def getBlockList(w3,startBlock,endBlock):
resultList=[]
if startBlock<0:
startBlock=0
for i in range(endBlock,startBlock-1,-1):
resultList.append(toDict(getBlock(w3,i)))
return resultList
def getLatestBlockList(w3,listSize):
resultList=[]
resultList.append(toDict(w3.eth.getBlock('latest')))
endBlock=resultList[0]['number']-1
if endBlock-listSize+2>=0:
startBlock=endBlock-listSize+2
else:
startBlock=0
for i in range(endBlock,startBlock-1,-1):
resultList.append(toDict(getBlock(w3,i)))
return resultList
|
{"/Controller/BlockController.py": ["/Config/Config.py", "/Service/BlockService.py", "/Service/TransactionService.py", "/Helper/helper.py"], "/Service/BlockService.py": ["/Helper/helper.py"], "/Service/TransactionService.py": ["/Helper/helper.py", "/Service/BlockService.py"]}
|
5,927
|
ushadeepp/Web3pyBlockExplorer
|
refs/heads/master
|
/Service/TransactionService.py
|
from Helper.helper import *
from Service.BlockService import *
import logging
def getTransaction(w3,txHash):
return w3.eth.getTransaction(txHash)
def recentTransactionList(app,w3,maxblock,offset,listlen):
resultList=[]
app.logger.info('Inside Service transaction list')
currentBlock=getBlock(w3,maxblock)
app.logger.info('transactions')
app.logger.info(currentBlock['transactions'])
if len(currentBlock['transactions']) > offset:
j=1
if len(currentBlock['transactions'])>0:
while ((len(resultList) < listlen) and (len(currentBlock['transactions'])>=(j+offset))):
result=toDict(getTransaction(w3,currentBlock['transactions'][-(j+offset)]))
app.logger.info('result')
app.logger.info(result)
resultList.append(result)
app.logger.info(resultList)
j+=1
while ((len(resultList)<listlen) and (maxblock>0)):
maxblock-=1
offset=0
j=1
currentBlock=getBlock(w3,maxblock)
app.logger.info('transactions')
app.logger.info(currentBlock['transactions'])
app.logger.info(len(resultList))
if len(currentBlock['transactions'])>0:
while ((len(resultList)<listlen) and len(currentBlock['transactions'])>=(j+offset)):
result=toDict(getTransaction(w3,currentBlock['transactions'][-(j+offset)]))
app.logger.info('result')
app.logger.info(result)
resultList.append(result)
app.logger.info(resultList)
j+=1
return resultList
|
{"/Controller/BlockController.py": ["/Config/Config.py", "/Service/BlockService.py", "/Service/TransactionService.py", "/Helper/helper.py"], "/Service/BlockService.py": ["/Helper/helper.py"], "/Service/TransactionService.py": ["/Helper/helper.py", "/Service/BlockService.py"]}
|
5,961
|
Nagarakshith1/Modified-RRT-star-for-dynamic-path-planning
|
refs/heads/master
|
/dynamic_path_planning_map2.py
|
import numpy as np
from shapely.geometry import Point, MultiPoint
from shapely.geometry.polygon import Polygon
from math import log
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pickle
import shapely
from queue import Queue
from obstacles import Obstacles
from RRT import RRT
from replan import Replan
def plot(nodes,robot_pose,obstacle_pose,plot_no=0,path=0,path_present=False,two_paths=False,sigma_separate=[]):
'''
FUnction to plot the nodes, trajectoris, the path and the robot motions
Parameters
----------
nodes : array
The tree expanded.
robot_pose : Point
The current position of the robot.
obstacle_pose : Point
The current position of the obstacle in the environment.
plot_no : int, optional
Number of plot for indexing the plot for the purpose of saving. The default is 0.
path : list(int)
The list of path values represented by th eindices of the nodes in the tree.
path_present : boolean, optional
To determine whether to print path on plot or not. The default is False.
two_paths : boolean, optional
To determine whetehr there are two paths to printed or not. The default is False.
sigma_separate : int(list), optional
Takes in a list of nodes to plot on the pathon the map. The default is [].
Returns
-------
None.
'''
# plt the grid space
# plot the obstacles
# plot the edges
fig, ax = plt.subplots(figsize=(5, 5))
ax.axis('square')
ax.set(xlim=(-10, 10), ylim=(-10, 10))
rect1 = patches.Rectangle((-0.5, -7), 1, 14, fill=True, facecolor='gray')
rect2 = patches.Rectangle((-7, -0.5), 14, 1, fill=True, facecolor='gray')
ax.add_patch(rect1)
ax.add_patch(rect2)
valid_nodes = np.argwhere(nodes[:,4]==1).flatten()
for j in range(valid_nodes.shape[0]):
i = valid_nodes[j]
if nodes[i][4] == 1:
parent = int(nodes[i][3])
if (parent ==-1 or nodes[parent,4]!=1):
continue
x = [nodes[i][0], nodes[parent][0]]
y = [nodes[i][1], nodes[parent][1]]
plt.plot(x, y, color='cyan', linewidth=1,zorder=-1)
if path_present is True:
points_x = []
points_y = []
for i in path:
points_x.append(nodes[i][0])
points_y.append(nodes[i][1])
plt.plot(points_x, points_y, color='red', linewidth=2)
plt.plot(points_x[0], points_y[0], color='blue', zorder=0)
if two_paths:
points_x = []
points_y = []
for i in sigma_separate:
points_x.append(nodes[i][0])
points_y.append(nodes[i][1])
plt.plot(points_x, points_y, color='red', linewidth=2)
plt.plot(points_x[0], points_y[0], color='blue', zorder=0)
plt.scatter(robot_pose.x, robot_pose.y, s=10 ** 2, color='blue', zorder=1)
plt.scatter(8, 8, s=10 ** 2, color='green', zorder=1)
if obstacle_pose is not(None):
plt.scatter(obstacle_pose.x,obstacle_pose.y, s=10**2, color = 'red', zorder=1)
# plt.show()
plt.savefig('Plots_map2/Fig' + str(plot_no))
# main function to intial RRT* algorithm
map_obstacles = Obstacles()
obstacles = []
obstacles.append(Polygon([(-1.1,7.6),(1.1,7.6),(1.1,-7.6),(-1.1,-7.6)]))
obstacles.append(Polygon([(-7.6,1.1),(7.6,1.1),(7.6,-1.1),(-7.6,-1.1)]))
obstacles.append(Polygon([(-10,9.4),(-10,10),(10,10),(10,9.4)]))
obstacles.append(Polygon([(-10,10),(-10,-10),(-9.4,-10),(-9.4,10)]))
obstacles.append(Polygon([(-10,-10),(10,-10),(10,-9.4),(-10,-9.4)]))
obstacles.append(Polygon([(9.4,-10),(10,-10),(10,10),(9.4,10)]))
map_obstacles.addNewObstacle(obstacles)
obstacles_list = map_obstacles.getObstacles()
cost_total = []
rrt = RRT(obstacles_list)
##################################################################################################################
try:
with open('rrt_map2.pkl', 'rb') as input:
rrt = pickle.load(input)
except:
print('Pickle file of RRT* data nor found---Running RRT*')
rrt.start = np.array([-9,-9])
rrt.nodes[0][0:2] = rrt.start
rrt.nodes[0][2] = 0
rrt.nodes[0][3] = 0
for i in range(8000):
print(i)
x = rrt.sample()
nearest_index = rrt.nearest(x)
if nearest_index is None:
continue
x_new, cost_steer = rrt.steer(x, rrt.nodes[nearest_index][0:2])
check = rrt.is_in_collision(x_new,rrt.nodes[nearest_index][0:2])
if check is False:
near_indices = rrt.get_near_list(x_new)
rrt.connect(x_new, nearest_index, cost_steer,new=True)
if near_indices.size != 0:
best_index, cost = rrt.nearest_from_list(x_new,near_indices,reconnect=False)
if best_index is not(None):
cost_steer = rrt.get_dist(x_new,rrt.nodes[best_index][0:2])
rrt.connect(x_new, best_index,cost_steer,new=False)
rrt.rewire(x_new,near_indices)
# final (after 10,000 iterations)
found, min_cost_idx = rrt.check_goal()
if found is True:
path = rrt.find_path(min_cost_idx)
rrt.plot(path,path_present=True)
else:
print('path not found')
path.reverse()
print(path)
cost_total.append(rrt.get_cost(min_cost_idx))
with open('rrt_map2.pkl', 'wb') as output:
pickle.dump(rrt, output, pickle.HIGHEST_PROTOCOL)
# Main Loop
def runDynamicObstacles():
# Stacking two columns to the tree
rrt.nodes = np.hstack((rrt.nodes,np.arange(rrt.nodes.shape[0]).reshape(rrt.nodes.shape[0],1)))
found, min_cost_idx = rrt.check_goal()
if found is True:
path = rrt.find_path(min_cost_idx)
else:
print('path not found')
path.reverse()
rrt.nodes = rrt.nodes[0:rrt.nodes_num,:]
replan = Replan()
obstacle_pose = Point(-5,5)
prev_obstacle_pose = obstacle_pose
new_obstacle = Polygon(obstacle_pose.buffer(1+0.01))
i=-1
path_len = len(path)
plot_no = 0
step = 0
blocked_tree_indices = replan.removeOccupiedNodes(rrt, path, i, new_obstacle)
rrt.nodes[blocked_tree_indices, 4] = 0
while (i!=path_len-1):
recheck_goal = False
i+=1
print(plot_no)
node = path[i]
rrt.p_current = node
robot_pose = Point(rrt.nodes[int(path[i]), 0], rrt.nodes[int(path[i]), 1])
if step == 3:
rrt.nodes[rrt.nodes[:,4]==0,4]=1
obstacle_pose = Point(3.5,-5.0)
prev_obstacle_pose = obstacle_pose
new_obstacle = Polygon(obstacle_pose.buffer(1 + 0.01))
map_obstacles.addNewObstacle(new_obstacle)
rrt.obstacles = map_obstacles.getObstacles()
replan.prune_till_pcurr(node, rrt)
path = path[i:]
i = 0
path_len = len(path)
replan.modify_tree_and_path(rrt,path)
rrt.p_current = path[0]
recheck_goal = True
if step == 8:
rrt.nodes[rrt.nodes[:,4]==0,4]=1
obstacle_pose = Point(7.5,-7.5)
new_obstacle = Polygon(obstacle_pose.buffer(1 + 0.01))
map_obstacles.updateObstacle(new_obstacle,prev_obstacle_pose)
prev_obstacle_pose = obstacle_pose
rrt.obstacles = map_obstacles.getObstacles()
replan.prune_till_pcurr(node, rrt)
path = path[i:]
i = 0
path_len = len(path)
replan.modify_tree_and_path(rrt,path)
rrt.p_current = path[0]
recheck_goal = True
blocked_tree_indices = replan.removeOccupiedNodes(rrt, path, i, new_obstacle)
rrt.nodes[blocked_tree_indices, 4] = 0
if step == 10:
rrt.nodes[rrt.nodes[:,4]==0,4]=1
obstacle_pose = Point(7.5,2.5)
new_obstacle = Polygon(obstacle_pose.buffer(1 + 0.01))
map_obstacles.updateObstacle(new_obstacle,prev_obstacle_pose)
prev_obstacle_pose = obstacle_pose
rrt.obstacles = map_obstacles.getObstacles()
replan.prune_till_pcurr(node, rrt)
path = path[i:]
i = 0
path_len = len(path)
replan.modify_tree_and_path(rrt,path)
rrt.p_current = path[0]
recheck_goal = True
if step == 12:
rrt.nodes[rrt.nodes[:,4]==0,4]=1
obstacle_pose = Point(7.5,-7.5)
new_obstacle = Polygon(obstacle_pose.buffer(1 + 0.01))
map_obstacles.updateObstacle(new_obstacle,prev_obstacle_pose)
prev_obstacle_pose = obstacle_pose
rrt.obstacles = map_obstacles.getObstacles()
replan.prune_till_pcurr(node, rrt)
path = path[i:]
i = 0
path_len = len(path)
replan.modify_tree_and_path(rrt,path)
rrt.p_current = path[0]
recheck_goal = True
blocked_tree_indices = replan.removeOccupiedNodes(rrt, path, i, new_obstacle)
rrt.nodes[blocked_tree_indices, 4] = 0
flag_obstacle = replan.detectObstacle(rrt,path,new_obstacle)
if flag_obstacle is True:
recheck_goal = False
print('Obstacle encountered')
sigma_current, sigma_separate, obstructed_path_ids, blocked_tree_indices = replan.removeOccupiedNodes(rrt, path, i, new_obstacle,True)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, sigma_current, True,True,sigma_separate)
plot_no += 1
replan.modify_tree_and_path(rrt,sigma_current,sigma_separate, blocked_tree_indices)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, sigma_current, True,True,sigma_separate)
plot_no += 1
rrt.p_current = sigma_current[0]
print('Reconnecting.....')
flag_reconnect, new_path = replan.reconnect(rrt,sigma_current, sigma_separate)
if flag_reconnect:
path = new_path
i= 0
path_len = len(path)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, path, True)
plot_no += 1
else:
print('Reconnect failed trying regrow')
temp = -1*np.ones((3000,6))
rrt.nodes = np.vstack((rrt.nodes,temp))
rrt.nodes[:,5] = np.arange(rrt.nodes.shape[0])
for i in range(1500):
replan.regrow(rrt,new_obstacle,sigma_separate[0])
flag_reconnect, new_path = replan.reconnect(rrt, sigma_current, sigma_separate)
if flag_reconnect is True:
path = new_path
i = 0
path_len = len(path)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, path, True)
plot_no+=1
else:
print('Regrow failed')
else:
plot(rrt.nodes,robot_pose,obstacle_pose,plot_no,path[i:],True)
plot_no += 1
if recheck_goal is True:
if (plot_no !=0):
flag,min_cost_idx = replan.check_goal(rrt)
path,cost = replan.find_path(rrt,min_cost_idx)
i = 0
path_len = len(path)
step = step+1
def runReconnect():
# Stacking two columns to the tree
rrt.nodes = np.hstack((rrt.nodes,np.arange(rrt.nodes.shape[0]).reshape(rrt.nodes.shape[0],1)))
found, min_cost_idx = rrt.check_goal()
if found is True:
path = rrt.find_path(min_cost_idx)
else:
print('path not found')
path.reverse()
rrt.nodes = rrt.nodes[0:rrt.nodes_num,:]
replan = Replan()
# starting point of the obstacle which is added after the RRT* is done
obstacle_pose = Point(-5,5)
prev_obstacle_pose = obstacle_pose
new_obstacle = Polygon(obstacle_pose.buffer(1+0.01))
i=-1
path_len = len(path)
plot_no = 0
step = 0
blocked_tree_indices = replan.removeOccupiedNodes(rrt, path, i, new_obstacle)
rrt.nodes[blocked_tree_indices, 4] = 0
while (i!=path_len-1):
recheck_goal = False
i+=1
print(plot_no)
node = path[i]
rrt.p_current = node
robot_pose = Point(rrt.nodes[int(path[i]), 0], rrt.nodes[int(path[i]), 1])
if step == 7:
rrt.nodes[rrt.nodes[:,4]==0,4]=1
obstacle_pose = Point(5,-5)
prev_obstacle_pose = obstacle_pose
new_obstacle = Polygon(obstacle_pose.buffer(1 + 0.01))
map_obstacles.addNewObstacle(new_obstacle)
rrt.obstacles = map_obstacles.getObstacles()
replan.prune_till_pcurr(node, rrt)
path = path[i:]
i = 0
path_len = len(path)
replan.modify_tree_and_path(rrt,path)
rrt.p_current = path[0]
recheck_goal = True
flag_obstacle = replan.detectObstacle(rrt,path,new_obstacle)
if flag_obstacle is True:
print('Obstacle encountered')
sigma_current, sigma_separate, obstructed_path_ids, blocked_tree_indices = replan.removeOccupiedNodes(rrt, path, i, new_obstacle,True)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, sigma_current, True,True,sigma_separate)
plot_no += 1
# replan.prune_till_pcurr(node,rrt)
replan.modify_tree_and_path(rrt,sigma_current,sigma_separate, blocked_tree_indices)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, sigma_current, True,True,sigma_separate)
plot_no += 1
rrt.p_current = sigma_current[0]
print('Reconnecting.....')
flag_reconnect, new_path = replan.reconnect(rrt,sigma_current, sigma_separate)
# print(flag_reconnect)
if flag_reconnect:
path = new_path
i= 0
path_len = len(path)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, path, True)
plot_no += 1
else:
print('Reconnect failed trying regrow')
temp = -1*np.ones((3000,6))
rrt.nodes = np.vstack((rrt.nodes,temp))
rrt.nodes[:,5] = np.arange(rrt.nodes.shape[0])
for i in range(1500):
replan.regrow(rrt,new_obstacle,sigma_separate[0])
flag_reconnect, new_path = replan.reconnect(rrt, sigma_current, sigma_separate)
if flag_reconnect is True:
path = new_path
i = 0
path_len = len(path)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, path, True)
plot_no+=1
else:
print('Regrow failed')
else:
plot(rrt.nodes,robot_pose,obstacle_pose,plot_no,path[i:],True)
plot_no+=1
if recheck_goal is True:
if (plot_no !=0):
flag,min_cost_idx = replan.check_goal(rrt)
path,cost = replan.find_path(rrt,min_cost_idx)
i = 0
path_len = len(path)
step = step+1
def runRegrow():
# Stacking two columsn to the tree
rrt.nodes = np.hstack((rrt.nodes,np.arange(rrt.nodes.shape[0]).reshape(rrt.nodes.shape[0],1)))
found, min_cost_idx = rrt.check_goal()
if found is True:
path = rrt.find_path(min_cost_idx)
else:
print('path not found')
path.reverse()
rrt.nodes = rrt.nodes[0:rrt.nodes_num,:]
replan = Replan()
# starting point of the obstacle which is added after the RRT* is done
obstacle_pose = Point(-5,5)
prev_obstacle_pose = obstacle_pose
new_obstacle = Polygon(obstacle_pose.buffer(1+0.01))
i=-1
path_len = len(path)
plot_no = 0
step = 0
blocked_tree_indices = replan.removeOccupiedNodes(rrt, path, i, new_obstacle)
rrt.nodes[blocked_tree_indices, 4] = 0
while (i!=path_len-1):
recheck_goal = False
i+=1
print(plot_no)
node = path[i]
rrt.p_current = node
robot_pose = Point(rrt.nodes[int(path[i]), 0], rrt.nodes[int(path[i]), 1])
if step == 3:
rrt.nodes[rrt.nodes[:,4]==0,4]=1
obstacle_pose = Point(-5.0,-8.0)
prev_obstacle_pose = obstacle_pose
new_obstacle = Polygon(obstacle_pose.buffer(1 + 0.01))
map_obstacles.addNewObstacle(new_obstacle)
rrt.obstacles = map_obstacles.getObstacles()
replan.prune_till_pcurr(node, rrt)
path = path[i:]
i = 0
path_len = len(path)
replan.modify_tree_and_path(rrt,path)
rrt.p_current = path[0]
recheck_goal = True
flag_obstacle = replan.detectObstacle(rrt,path,new_obstacle)
if flag_obstacle is True:
print('Obstacle encountered')
sigma_current, sigma_separate, obstructed_path_ids, blocked_tree_indices = replan.removeOccupiedNodes(rrt, path, i, new_obstacle,True)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, sigma_current, True,True,sigma_separate)
plot_no += 1
replan.modify_tree_and_path(rrt,sigma_current,sigma_separate, blocked_tree_indices)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, sigma_current, True,True,sigma_separate)
plot_no += 1
rrt.p_current = sigma_current[0]
print('Reconnecting.....')
flag_reconnect, new_path = replan.reconnect(rrt,sigma_current, sigma_separate)
if flag_reconnect:
path = new_path
i= 0
path_len = len(path)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, path, True)
plot_no += 1
else:
print('Reconnect failed trying regrow')
temp = -1*np.ones((3000,6))
rrt.nodes = np.vstack((rrt.nodes,temp))
rrt.nodes[:,5] = np.arange(rrt.nodes.shape[0])
for i in range(1500):
replan.regrow(rrt,new_obstacle,sigma_separate[0])
flag_reconnect, new_path = replan.reconnect(rrt, sigma_current, sigma_separate)
if flag_reconnect is True:
path = new_path
i = 0
path_len = len(path)
plot(rrt.nodes, robot_pose, obstacle_pose, plot_no, path, True)
plot_no+=1
else:
print('Regrow failed')
else:
plot(rrt.nodes,robot_pose,obstacle_pose,plot_no,path[i:],True)
plot_no+=1
if recheck_goal is True:
if (plot_no !=0):
flag,min_cost_idx = replan.check_goal(rrt)
path,cost = replan.find_path(rrt,min_cost_idx)
i = 0
path_len = len(path)
step = step+1
# Below function was written for testing and genrating some comparision results and not part of algorithm (please ignore)
def RNN():
# Stacking two columsn to the tree
rrt.nodes = np.hstack((rrt.nodes,np.arange(rrt.nodes.shape[0]).reshape(rrt.nodes.shape[0],1)))
found, min_cost_idx = rrt.check_goal()
if found is True:
path = rrt.find_path(min_cost_idx)
else:
print('path not found')
path.reverse()
rrt.nodes = rrt.nodes[0:rrt.nodes_num,:]
replan = Replan()
# starting point of the obstacle which is added after the RRT* is done
obstacle_pose = Point(-5,5)
prev_obstacle_pose = obstacle_pose
new_obstacle = Polygon(obstacle_pose.buffer(1+0.01))
i=-1
path_len = len(path)
plot_no = 0
step = 0
while (i!=path_len-1):
recheck_goal = False
i+=1
node = path[i]
rrt.p_current = node
robot_pose = Point(rrt.nodes[int(path[i]), 0], rrt.nodes[int(path[i]), 1])
# if step == 0:
# obstacle_pose = Point(-5,5)
# prev_obstacle_pose = obstacle_pose
# new_obstacle = Polygon(obstacle_pose.buffer(1+0.01))
# prev_obstacle_pose,path_len,recheck_goal = replan.moveObstacle(rrt,node,obstacle_pose,prev_obstacle_pose,path,i,step)
# i = 0
if step == 3:
# obstacle_pose = Point(7.5,2.5)
# # prev_obstacle_pose = obstacle_pose
# new_obstacle = Polygon(obstacle_pose.buffer(1+0.01))
# prev_obstacle_pose,path_len,recheck_goal = replan.moveObstacle(rrt,node,obstacle_pose,prev_obstacle_pose,path,i,step)
# i = 0
# note whenever updating obstacle update rrt.obstacles as wwll
rrt.nodes[rrt.nodes[:,4]==0,4]=1
obstacle_pose = Point(7.5,2.5)
prev_obstacle_pose = obstacle_pose
new_obstacle = Polygon(obstacle_pose.buffer(1 + 0.01))
map_obstacles.addNewObstacle(new_obstacle)
rrt.obstacles = map_obstacles.getObstacles()
replan.prune_till_pcurr(node, rrt)
path = path[i:]
i = 0
path_len = len(path)
replan.modify_tree_and_path(rrt,path)
rrt.p_current = path[0]
recheck_goal = True
flag_obstacle = replan.detectObstacle(rrt,path,new_obstacle)
if flag_obstacle is True:
print('obstacle')
rrt2 = RRT(rrt.obstacles)
rrt2.p_current = 0
rrt2.start = rrt.nodes[rrt.p_current,0:2]
rrt2.nodes[0,0:2] = rrt2.start
rrt2.nodes[0][2] = 0
rrt2.nodes[0][3] = 0
j = 0
replan.prune_till_pcurr(rrt.p_current, rrt)
replan.modify_tree_and_path(rrt,path)
for i in range(6000):
print(j)
j = j+1
x = rrt2.sample()
nearest_index = rrt2.nearest(x)
if nearest_index is None:
continue
x_new, cost_steer = rrt2.steer(x, rrt2.nodes[nearest_index][0:2])
check = rrt2.is_in_collision(x_new,rrt2.nodes[nearest_index][0:2])
if check is False:
near_indices = rrt2.get_near_list(x_new)
rrt2.connect(x_new, nearest_index, cost_steer,new=True)
if near_indices.size != 0:
best_index, cost = rrt2.nearest_from_list(x_new,near_indices,reconnect=False)
if best_index is not(None):
cost_steer = rrt2.get_dist(x_new,rrt2.nodes[best_index][0:2])
rrt2.connect(x_new, best_index,cost_steer,new=False)
rrt2.rewire(x_new,near_indices)
found, min_cost_idx = rrt2.check_goal()
if found is True:
path = rrt2.find_path(min_cost_idx)
# plot(rrt2.nodes,robot_pose,None,0,path,True)
break
step = step+1
# Function calls. Please run only one of thema at a time. When running one, comment out the other two calls
runDynamicObstacles()
# runReconnect()
# runRegrow()
|
{"/dynamic_path_planning_map2.py": ["/obstacles.py", "/RRT.py", "/replan.py"]}
|
5,962
|
Nagarakshith1/Modified-RRT-star-for-dynamic-path-planning
|
refs/heads/master
|
/obstacles.py
|
class Obstacles():
'''
Type: Class
To store details of the obstacles on the map
'''
def __init__(self):
'''
Initialize values for class variables
self.obstacles_list: list of obstacles where each obstacle is reprsented by the type shapely.geometry.Polygon from the shapely class
'''
self.obstacles_list = []
def getObstacles(self):
return self.obstacles_list
def addNewObstacle(self,new_obstacle):
'''
Add an obstacle to the map
If list is passed to the function, then the list is extended, else the Polygon obstacle is appended
new_obstacle: list of Polygons or Polygon
'''
if type(new_obstacle)==list:
self.obstacles_list.extend(new_obstacle)
else:
self.obstacles_list.append(new_obstacle)
def updateObstacle(self,new_obstacle,old_obstacle):
'''
Update the obstacle present already in the map.
The old obstacle is removed and a new obstacle is added
Parameters
----------
new_obstacle : Polygon
representing the new obstacle.
old_obstacle : Polygon
represemting the obstacle to be updated.
Returns
-------
None.
'''
for i,obs in enumerate(self.obstacles_list):
if old_obstacle==obs:
del self.obstacles_list[i]
self.obstacles_list.append(new_obstacle)
else:
print('false')
def removeObstacle(self,removed_obstacle):
'''
Parameters
----------
removed_obstacle : Polygon
Remove an obstacle completely from the list of obstacles
Returns
-------
None.
'''
for i,obs in enumerate(self.obstacles_list):
if removed_obstacle==obs:
del self.obstacles_list[i]
|
{"/dynamic_path_planning_map2.py": ["/obstacles.py", "/RRT.py", "/replan.py"]}
|
5,963
|
Nagarakshith1/Modified-RRT-star-for-dynamic-path-planning
|
refs/heads/master
|
/RRT.py
|
import numpy as np
from shapely.geometry import Point, MultiPoint
from math import log
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class RRT():
def __init__(self,obstacles_list):
'''
Initialization of variables of object of class RRT().
Parameters
----------
obstacles_list : list(Polygon)
List of obstacles in the environment
Returns
-------
None.
'''
self.nodes = -1*np.ones((10050,5)) # x, y, cost of segment b/w node and parent, parent, in_tree(1/0), id_number
self.nodes_num = 1 # number of vertices
self.start = np.array([0,0])
self.goal = np.array([8,8])
self.gamma = 30
self.dimension = 2
self.epsilon = 2
self.p_current = 0
self.obstacles = obstacles_list
def sample(self):
'''
Function to sample a random point in a given region
Returns
-------
x : numpy array (2x1)
The random point generated which do not collide with any of the obstacles in the map.
'''
while True:
x = 10*np.random.uniform(-1,1,size=(2))
x_point = Point(x)
# check if the value already prsent inside the tree
collision = False
for i,obs in enumerate(self.obstacles):
check = obs.intersects(x_point)
if check is True:
collision = True
break
if collision is False:
return x
def get_cost(self,index):
'''
To get cost of reaching the desired node in a tree from the start
Parameters
----------
index : int
index of the node in the tree
Returns
-------
cost : float
Cost measured in terms of the distance from the node to the start node.
'''
cost = 0
if index is not None:
while True:
cost = cost + self.nodes[index][2]
index = int(self.nodes[index][3])
if(self.nodes[index][3]==self.p_current):
return cost
return None
else:
return None
def steer(self, x1, nearest):
'''
Function to return the optimal control trajectory of traveling from x1 to x2
and the cost between these two points
Parameters
----------
x1 : numpy
The previously randomly sampled point
nearest : numpy
The nearest node already present in the tree.
Returns
-------
x1/x_new : numpy vector
The new random vector which is feasible by the steering of the robot.
distance : float
The distance between the new random point and the selected nearest point.
'''
x_new = np.zeros((2))
# distance is the cost of the path connecting the two points
distance = np.linalg.norm(x1-nearest,axis=0)
if distance>1:
x_new[0] = nearest[0] + (x1[0]-nearest[0])/distance
x_new[1] = nearest[1] + (x1[1]-nearest[1])/distance
return x_new,1
else:
return x1,distance
def get_dist(self,x1,x2):
distance = np.linalg.norm(x2-x1)
return distance
def is_in_collision(self,point1,point2):
'''
Function to check if a trajectory between two node leads to a collision
Parameters
----------
point1 : numpy
Point 1.
point2 : numpy
Point 2.
Returns
-------
collision : Boolean
Collsion check: True if the trajectory collided with any obstacle, else False.
'''
t = np.linspace(0,1,20)
x = point1[0] + (point2[0]-point1[0]) * t
y = point1[1] + (point2[1]-point1[1]) * t
x = x.reshape((-1,1))
y = y.reshape((-1,1))
points = np.concatenate((x,y),axis=1)
points = MultiPoint(points)
collision = False
for i,obs in enumerate(self.obstacles):
check = obs.intersects(points)
if check is True:
collision = True
break
return collision #true if there is an intersection with either obstacle
def nearest(self, x_rand):
'''
Function to find the nearest node to a point pass to the function
Parameters
----------
x_rand : numpy
Position of randomly sampled point.
Returns
-------
p : int
The tree index of the node which is closest to the point in question.
'''
x_nearest_dist = np.linalg.norm(self.nodes[0:self.nodes_num,0:2]-x_rand,axis=1) #NOTE: I haven't added the memeber to the tree yet
x_nearest_ids = np.argsort(x_nearest_dist)
p = x_nearest_ids[0]
if p==self.nodes_num:
return None
else:
return p
def get_near_list(self, x_rand):
'''
Function to get list of indices preseny inside a volume (circle) of tuned radius
Parameters
----------
x_rand : numpy
Randomly sampled point.
Returns
-------
near_indices : list
A list of position of nodes in the tree within a volume defined by radius defined by 'vol' variable.
'''
x_nearest_dist = np.linalg.norm(self.nodes[0:self.nodes_num,0:2]-x_rand,axis=1)
x_nearest_ids = np.argsort(x_nearest_dist)
# nodes_temp = np.concatenate((self.nodes[0:self.nodes_num+1,:],x_nearest_ids),axis=1)
vol = self.gamma*(log(self.nodes_num)/self.nodes_num)**(1/self.dimension)
vol = min(vol, self.epsilon)
near_indices = x_nearest_ids[x_nearest_dist[x_nearest_ids]<=vol]
return near_indices
def nearest_from_list(self, x_rand, near_indices, reconnect):
'''
To find the node from neaar_indices which leads to lowest global cost for the node under consideration
Parameters
----------
x_rand : numpy
Randomly sampled point.
near_indices : list(int)
List of indices with the vicinity of therandomly sampled point.
reconnect : Boolean
DESCRIPTION.
Returns
-------
final_index : int
The globally lowest cost parent for the random point.
cost : float
The cost vector for all the near_indices.
'''
cost = np.zeros_like(near_indices)
for k,index in enumerate(near_indices):
condition = self.is_in_collision(x_rand,self.nodes[index][0:2])
if condition is False:
cost[k] = self.get_cost(index)
cost[k] = cost[k] + np.linalg.norm(x_rand-self.nodes[index][0:2])
else:
cost[k] = 1000000
final_index = near_indices[np.argmin(cost)]
if reconnect is False:
cost_current = self.get_cost(self.nodes_num-1)
if cost_current > np.min(cost):
return final_index,cost
else:
return None,None
else:
return final_index,cost
def connect(self, x, dist_index, cost_steer, new = False):
'''
add state x to the tree
cost of state x = cost of the parent + cost of steer(parent, x)
'''
if new is True:
self.nodes[self.nodes_num][0:2] = x
self.nodes[self.nodes_num][2] = cost_steer # cost of that node
self.nodes[self.nodes_num][3] = dist_index # parent index
self.nodes[self.nodes_num][4] = 1 # in tree condition
self.nodes_num +=1 # adding up the total number of nodes
else:
self.nodes[self.nodes_num-1][0:2] = x
self.nodes[self.nodes_num-1][2] = cost_steer # cost of that node
self.nodes[self.nodes_num-1][3] = dist_index # parent index
def rewire(self, x_rand, near_indices):
'''
Function to rewire all nodes in the tree within the O(gamma (log n/n)ˆ{1/d}} ball
near the state x with x as parent (if cost is lowered for the node), and thus update the costs of all rewired neighbors
Parameters
----------
x_rand : numpy vector
The randomly sampled point.
near_indices : list(int)
The list of nodes in th vicinty of the randomly sampled point
Returns
-------
None.
'''
# to obtain cost of the randomly generated point
cost_new = self.get_cost(self.nodes_num-1)
for i in near_indices:
if self.nodes[i,3] == self.p_current:
continue
condition = self.is_in_collision(x_rand,self.nodes[i][0:2])
if condition is False:
dist = np.linalg.norm(x_rand-self.nodes[i][0:2])
cost_near = cost_new + dist
if cost_near<self.get_cost(i):
self.nodes[i][3] = self.nodes_num-1
self.nodes[i][2] = dist
def check_goal(self):
'''
To check if goal is present in the tree expanded and return the index
of the node nearest to the goal. The path between this point and
the start is lowest global cost possible to reach the goal in the tree
Returns
-------
bool
Whether nodes in the vicinity of the nodes are found such that robot just reaches the goal.
min_cost_idx : int, None
The index of the node nearest to the goal such the path between this point and the start is lowest
global cost possible to reach the goal in the tree.
'''
dist_from_goal = np.linalg.norm(self.nodes[0:self.nodes_num,0:2]-self.goal,axis=1)
idx = np.argwhere(dist_from_goal<0.6).flatten()
if len(idx) != 0:
cost = [self.get_cost(i) for i in idx.flatten()]
min_cost_idx = idx[np.argmin(np.array(cost))]
return True, min_cost_idx
else:
return False, None
def find_path(self, min_cost_idx):
'''
Function to find the path given an node index in the tree
Parameters
----------
min_cost_idx : int
The node index.
Returns
-------
path : list(int)
A list of tree nodes the robot needs to traverse from the start to
reach the goal.
'''
path = []
path.append(min_cost_idx)
min_cost_idx = int(min_cost_idx)
while min_cost_idx!=self.p_current:
p = int(self.nodes[min_cost_idx][3])
path.append(p)
min_cost_idx = p
return path
def plot(self, path=0, path_present=False):
'''
Function to plot the map with the robot
Parameters
----------
path : list(int), optional
The list of path nodes (wrt index of the nodes in the tree). The default is 0.
path_present : boolean, optional
Indicator whether path is to be printed or not. The default is False.
Returns
-------
None.
'''
# plt the grid space
# plot the obstacles
# plot the edges
fig, ax = plt.subplots(figsize=(6,6))
ax.axis('square')
ax.set(xlim=(-10, 10), ylim=(-10, 10))
rect1 = patches.Rectangle((-0.5, -7), 1, 14, fill=True, facecolor='gray')
rect2 = patches.Rectangle((-7, -0.5), 14, 1, fill=True, facecolor='gray')
ax.add_patch(rect1)
ax.add_patch(rect2)
for i in range(self.nodes_num):
if self.nodes[i][4]==1:
parent = int(self.nodes[i][3])
x = [self.nodes[i][0],self.nodes[parent][0]]
y = [self.nodes[i][1],self.nodes[parent][1]]
plt.plot(x,y,color='cyan',linewidth=1,zorder=-1)
if path_present is True:
points_x = []
points_y = []
for i in path:
points_x.append(self.nodes[i][0])
points_y.append(self.nodes[i][1])
plt.plot(points_x,points_y,color='red',linewidth=2)
plt.plot(points_x[0],points_y[0],color='blue',zorder=0)
plt.scatter(self.start[0],self.start[1],s=10**2,color='blue',zorder=1)
plt.scatter(8,8,s=10**2,color='green',zorder=1)
plt.show()
|
{"/dynamic_path_planning_map2.py": ["/obstacles.py", "/RRT.py", "/replan.py"]}
|
5,964
|
Nagarakshith1/Modified-RRT-star-for-dynamic-path-planning
|
refs/heads/master
|
/replan.py
|
import numpy as np
from shapely.geometry import Point, MultiPoint
from queue import Queue
class Replan():
def __init__(self):
pass
def bfs_removal(self,node_id,rrt,blocked_nodes = False,ignore_id=-1):
'''
Parameters
----------
node_id : int
DESCRIPTION.
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
blocked_nodes : boolean, optional
A boolean to which determies whether nodes are obstructed by dynamic
obstacles. The default is False.
ignore_id : integer, optional
The child node (which lies on the path) ignored fpr deletion when
using the BFS method for pruning the tree. The default is -1.
Returns
-------
None.
'''
q = Queue()
q.put(node_id)
while(not q.empty()):
parent_id = q.get()
if blocked_nodes:
rrt.nodes[parent_id,4] = 0
else:
rrt.nodes[parent_id, 4] = 2
child_ids = np.argwhere(np.logical_and(rrt.nodes[:,3]==parent_id, np.logical_or(rrt.nodes[:,4]==1, rrt.nodes[:,4]==0))).flatten()
# child_ids = np.argwhere(rrt.nodes[:,3]==parent_id).flatten()
child_ids = child_ids[child_ids!=ignore_id]
if (child_ids.shape[0]):
if blocked_nodes:
rrt.nodes[child_ids,4] = 0
else:
rrt.nodes[child_ids, 4] = 2
for i in range(child_ids.shape[0]):
q.put(child_ids[i])
def prune_till_pcurr(self,pcurr_id,rrt):
'''
Prune the tree till the current node where the robot is present
Parameters
----------
pcurr_id : int
Index of the current position of the robot according to the index in the tree.
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*..
Returns
-------
None.
'''
# find all the parent_ids till start
list_id = [pcurr_id]
# Get the current of the
parent_id = int(rrt.nodes[pcurr_id,3])
old_start_id = int(np.argwhere(rrt.nodes[:,3] == rrt.nodes[:,5])[0])
while(parent_id != old_start_id):
list_id.append(parent_id)
parent_id = int(rrt.nodes[parent_id, 3])
list_id.append(old_start_id)
list_id = list_id[::-1]
for i in range(len(list_id)-1):
self.bfs_removal(list_id[i],rrt,False,list_id[i+1])
def modify_tree_and_path(self,rrt,sigma_current=[],sigma_separate=[], blocked_tree_indices = []):
'''
Function to modify the path segments and the tree according to the nodes
to be pruned.
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
sigma_current : list(int), optional
The segment of path before the obstacle. The default is [].
sigma_separate : list(int), optional
The segment of the path after the obstacle. The default is [].
blocked_tree_indices : list(int), optional
The list of nodes in the tree blocked out by the
new obstacle. The default is [].
Returns
-------
None.
'''
invalid_ids = np.argwhere(rrt.nodes[:,4]==2).flatten()
for index in blocked_tree_indices:
self.bfs_removal(index,rrt,True,sigma_separate[0])
rrt.nodes = np.delete(rrt.nodes,invalid_ids,0)
rrt.nodes_num-= invalid_ids.shape[0]
if len(rrt.nodes)==0:
return
for i,u_id in enumerate(sigma_current):
array_id = int(np.argwhere(rrt.nodes[:,5]==u_id)[0])
sigma_current[i] = array_id
for i,u_id in enumerate(sigma_separate):
array_id = int(np.argwhere(rrt.nodes[:,5]==u_id)[0])
sigma_separate[i] = array_id
#update the parent ids of all children
for i in range(0,rrt.nodes_num):
if sigma_separate !=[]:
if i == sigma_separate[0]:
rrt.nodes[i,3] = -1
continue
parent_uid = rrt.nodes[i,3]
if(sum(1*rrt.nodes[:,5]==parent_uid)==0):
rrt.nodes[i, 3] = i
continue
parent_aid = int(np.argwhere(rrt.nodes[:,5]==parent_uid)[0])
rrt.nodes[i,3] = parent_aid
rrt.nodes[:,5] = np.arange(rrt.nodes.shape[0])
def detectObstacle(self, rrt, path, newObstacle):
'''
Function to detect whether obstacle is actually bloocking the path or not
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
path : list(int)
List of indices of the nodes in the tree which form the path traversed
by the robot.
newObstacle : Polygon
The dynamic obstacle in question.
Returns
-------
bool
A boolean which determines whetehr obstacle intersects the path (TRUE)
or not (FALSE).
'''
points = []
points = rrt.nodes[path,0:2]
points_shapely = MultiPoint(points)
if points_shapely.intersects(newObstacle):
return True
else:
return False
def removeOccupiedNodes(self, rrt, path, p_current_index, new_obstacle,path_blocked = False):
'''
Function to determine all the nodes in the tree occuped by the dynamic/new obstacle
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
path : list(int)
List of indices of the nodes in the tree which form the path traversed
by the robot.
p_current_index : int
The current node in the tree where the robot is present.
newObstacle : Polygon
The dynamic obstacle in question.
path_blocked : Boolean, optional
A check condition which determines whether only blocked nodes are returned
or the split path segments are also returned as well. The default is False.
Returns
-------
sigma_current : list(int)
The part of path before the obstacle.
sigma_separate : list(int)
The part of path after the obstacle.
obstructed_ids : list(int)
The part of path obstructed by the obstacle.
indices: list(int)
List of indices of the tree blocked by the new obstacle.
'''
if path_blocked is True:
obstructed_path_nodes = [] # node tree indices
obstructed_ids = [] # path indices
for i in range(len(path)-1): # no need to check for the goal position
point = Point(tuple(rrt.nodes[path[i],0:2]))
if point.intersects(new_obstacle):
obstructed_path_nodes.append(path[i])
obstructed_ids.append(i)
# the separated out list of paths between the new obstacle
sigma_separate = path[obstructed_ids[-1]+1:]
sigma_current = path[p_current_index:obstructed_ids[0]]
## call function to remove all children of each of these nodes
# subsection to collect all nodes of the tree lying inside/on
# the obstacle (make obstacle slightly larger than like 1.01 instead of 1 to consider cases whenpoint lies on circle)
indices = []
for i in range(rrt.nodes_num):
point = Point(rrt.nodes[i,0:2])
indices.append(new_obstacle.intersects(point))
indices = np.argwhere(np.asarray(indices))
indices = indices.reshape(-1) # all indices in tree which lie inside new obstacle
return sigma_current, sigma_separate, obstructed_ids, indices
else:
indices = []
for i in range(rrt.nodes_num):
point = Point(rrt.nodes[i, 0:2])
indices.append(new_obstacle.intersects(point))
indices = np.argwhere(np.asarray(indices))
indices = indices.reshape(-1) # all indices in tree which lie inside new obstacle
return indices
def find_path(self, rrt, node, break_id = -1):
'''
Function to find path from the tree after replanning
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
node : int
The node from whcih the path to the start is to be calculated.
break_id : int, optional
A condition when infinity cost is returned as path cannot be found. The default is -1.
Returns
-------
path : list(int)
The list of path values represented by the indices of the nodes in the tree.
cost : float
The value of total cost measured in euclidean distance.
'''
path = [node]
cost = rrt.nodes[node,2]
node = int(node)
while node != rrt.p_current:
p = int(rrt.nodes[node][3])
if(p == break_id or p == -1 or rrt.nodes[p,4]==0):
return [],np.infty
cost = cost + rrt.nodes[p,2]
path.append(p)
node = p
path.reverse()
return path,cost
def reconnect(self,rrt, sigma_current, sigma_separate):
'''
Reconnect function tries to connect the nodes on sigma_separate with
the nearest node of sigma_current to form the globally lowest cost path
possible on the new tree structure
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
sigma_current : list(int)
The part of path before the obstacle.
sigma_separate : list(int)
The part of path after the obstacle.
Returns
-------
bool
Boolean to determine whether the reconnect process is successful or not.
best_path : list(int) (OR) []
The best new path of the robot. An empty list is returned of reconnect fails.
'''
radius = 0.75
for i,separate_node in enumerate(sigma_separate):
parent_uid = rrt.nodes[i,3]
distance = np.linalg.norm(rrt.nodes[:,0:2]-rrt.nodes[separate_node,0:2],axis=1)
potential_parents = np.argwhere((np.logical_and(distance<=radius, rrt.nodes[:,4]==1))).reshape(-1)
potential_parents = self.modify_near_indices(rrt,potential_parents,sigma_separate[0])
if len(potential_parents)!=0:
min_cost = 100000
best_path = []
for j,potential_parent in enumerate(potential_parents):
path,cost = self.find_path(rrt,potential_parent,sigma_separate[0])
cost = cost + np.linalg.norm(rrt.nodes[potential_parent,0:2]-rrt.nodes[separate_node,0:2])
if cost<min_cost:
min_cost = cost
best_path = path
rrt.nodes[separate_node,3] = potential_parent
rrt.nodes[separate_node,2] = np.linalg.norm(rrt.nodes[potential_parent,0:2]-rrt.nodes[separate_node,0:2])
if(best_path==[]):
return False, []
best_path = best_path + sigma_separate[i:]
return True,best_path
return False,[]
def nearest_regrow(self, rrt, x, sigma_separate_0):
'''
Function which works in tandem with regrow() to determine the nearest node to
a random point while considering the fact that some nodes are temporarily
blocked by dynamic obstcales
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
x : numpy
The position of point.
sigma_separate_0 : int
The frst node of sigma_separate.
Returns
-------
p : int
Th nearest node in terms of distance to x_rand.
'''
# assuming we have already deleted all the nodes bloacked by the obstacle
x_nearest_dist = np.linalg.norm(rrt.nodes[0:rrt.nodes_num,0:2]-x,axis=1) #NOTE: I haven't added the member to the tree yet
x_nearest_ids = np.argsort(x_nearest_dist)
i = 0
while True:
p = x_nearest_ids[i]
if rrt.nodes[p,4]==0:
i = i+1
continue
output = self.modify_near_indices(rrt,np.array(p).flatten(),sigma_separate_0)
if len(output)!=0:
return p
i = i+1
if i == rrt.nodes_num:
return None
def regrow(self, rrt, new_obstacle, sigma_separate_0):
'''
Function to perform the regrowth of the tree to generate new possible trajectories
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
new_obstacle : Polygpon
The obstacle blocking the path.
sigma_separate_0 : int
The first node on sigma_separate list.
Returns
-------
None.
'''
while True:
x = rrt.sample()
nearest_index = self.nearest_regrow(rrt, x, sigma_separate_0)
x_new, cost_steer = rrt.steer(x, rrt.nodes[nearest_index][0:2])
check = rrt.is_in_collision(x_new,rrt.nodes[nearest_index][0:2])
if check is True:
continue
else:
break
near_indices = rrt.get_near_list(x_new)
rrt.connect(x_new, nearest_index, cost_steer,new=True)
if near_indices.shape[0] != 0:
near_indices = self.modify_near_indices(rrt, near_indices, sigma_separate_0)
near_indices = near_indices[np.argwhere(rrt.nodes[near_indices,4]==1).flatten()]
if near_indices.shape[0] != 0:
best_index, cost = rrt.nearest_from_list(x_new,near_indices,reconnect=False)
if best_index is not(None):
cost_steer = rrt.get_dist(x_new,rrt.nodes[best_index][0:2])
rrt.connect(x_new, best_index,cost_steer,new=False)
rrt.rewire(x_new,near_indices)
def modify_near_indices(self, rrt, near_indices, sigma_separate_0):
'''
Function to update the near_indices output so that nodes from
sigma_current are not considered (called only in the regrow() function)
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
near_indices : list(int)
The list of nodes in the vicinity of the sampled point.
sigma_separate_0 : int
The first node on the sigma_separate list.
Returns
-------
near_indices : list(int)
Updated list of nearby nodes so that they do not contain sigma_current
nodes or their relatives
'''
near_indices_copy = near_indices
for i in near_indices_copy:
parent_id = int(rrt.nodes[i,3])
if parent_id == -1:
near_indices = near_indices[near_indices!=i]
continue
while parent_id!= rrt.p_current:
if parent_id == sigma_separate_0:
near_indices = near_indices[near_indices!=i]
break
parent_id = int(rrt.nodes[parent_id,3])
return near_indices
def check_goal(self,rrt):
'''
Function to check for goal after replanning
Parameters
----------
rrt : object of RRT()
Object which stores information of the tree expanded by RRT*.
Returns
-------
bool
Boolean to determine whether a path is found or not.
TYPE
The index in thetree which is close to the obstacle and leads to
the lowest global cost from the robot current position to the goal.
'''
dist_from_goal = np.linalg.norm(rrt.nodes[0:rrt.nodes_num,0:2]-rrt.goal,axis=1)
idx = np.argwhere(np.logical_and(dist_from_goal<0.6,rrt.nodes[0:rrt.nodes_num,4]==1)).flatten()
if idx is not(None):
cost = [rrt.get_cost(i) for i in idx.flatten()]
min_cost_idx = idx[np.argmin(np.array(cost))]
return True, min_cost_idx
else:
return False, None
|
{"/dynamic_path_planning_map2.py": ["/obstacles.py", "/RRT.py", "/replan.py"]}
|
6,000
|
Cepesp-Fgv/tse-dados
|
refs/heads/master
|
/etl/fixes/BemCandidatoSiglaUEFix.py
|
class BemCandidatoSiglaUEFix:
def check(self, item):
return item['database'] == 'bem_candidato' and item["year"] < 2014
def apply(self, df):
df['SIGLA_UE'] = df['SIGLA_UF']
return df
|
{"/web/cepesp/athena/builders/party_affiliations.py": ["/web/cepesp/athena/builders/base.py", "/web/cepesp/columns/filiados.py"], "/web/cepesp/athena/builders/base.py": ["/web/cepesp/athena/builders/utils.py"], "/etl/process/CoalitionsProcess.py": ["/etl/process/DimensionProcess.py"], "/web/cepesp/routes/queries.py": ["/web/cepesp/athena/options.py", "/web/cepesp/utils/session.py"], "/etl/crawler/pipelines.py": ["/etl/crawler/items.py"], "/web/cepesp/athena/builders/factory.py": ["/web/cepesp/athena/builders/candidates_assets.py", "/web/cepesp/athena/builders/elections.py", "/web/cepesp/athena/builders/others.py", "/web/cepesp/athena/builders/party_affiliations.py", "/web/cepesp/athena/builders/secretaries.py", "/web/cepesp/athena/builders/utils.py"], "/web/cepesp/athena/builders/elections.py": ["/web/cepesp/athena/builders/base.py"], "/etl/tests.py": ["/etl/process/TestProcess.py"], "/web/cepesp/athena/builders/others.py": ["/web/cepesp/athena/builders/base.py"], "/web/cepesp/routes/sql.py": ["/web/cepesp/athena/builders/factory.py", "/web/cepesp/athena/options.py"], "/web/cepesp/routes/static.py": ["/web/cepesp/utils/session.py"], "/web/cepesp/athena/builders/candidates_assets.py": ["/web/cepesp/athena/builders/base.py", "/web/cepesp/columns/bem_candidato.py"], "/web/application.py": ["/web/cepesp/routes/filters.py", "/web/cepesp/routes/lang.py", "/web/cepesp/routes/queries.py", "/web/cepesp/routes/sql.py", "/web/cepesp/routes/static.py", "/web/cepesp/utils/session.py"], "/web/tests/responses/test_duplicated_votes.py": ["/web/tests/utils.py"], "/web/tests/responses/test_response_ok.py": ["/web/tests/utils.py"], "/web/migrate.py": ["/web/cepesp/database.py"], "/web/cepesp/routes/lang.py": ["/web/cepesp/utils/session.py"], "/web/tests/responses/test_repeated_macro.py": ["/web/tests/utils.py"], "/web/cepesp/athena/options.py": ["/web/cepesp/columns/filiados.py", "/web/cepesp/columns/bem_candidato.py", "/web/cepesp/columns/secretarios.py"], "/web/cepesp/athena/builders/secretaries.py": ["/web/cepesp/athena/builders/base.py", "/web/cepesp/columns/secretarios.py"], "/etl/crawler/spiders.py": ["/etl/crawler/items.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.