krzsam commited on
Commit
1154bfc
·
1 Parent(s): ad476aa
app.py CHANGED
@@ -6,7 +6,7 @@ import pandas as pd
6
  from smolagents import CodeAgent, tool, InferenceClientModel, WebSearchTool, load_tool, PromptTemplates, Tool, FinalAnswerTool
7
  from dotenv import load_dotenv
8
  from my_prompt_config import PromptConfig
9
- from my_tools import ReverseStringTool, ImageLoadTool
10
  from PIL import Image
11
 
12
  # (Keep Constants as is)
 
6
  from smolagents import CodeAgent, tool, InferenceClientModel, WebSearchTool, load_tool, PromptTemplates, Tool, FinalAnswerTool
7
  from dotenv import load_dotenv
8
  from my_prompt_config import PromptConfig
9
+ from my_reverse_string import ReverseStringTool, ImageLoadTool
10
  from PIL import Image
11
 
12
  # (Keep Constants as is)
chess_pieces_detection/__init__.py DELETED
@@ -1 +0,0 @@
1
- from train_chess_pieces_recognition import ChessPiecesRecognition
 
 
chess_pieces_detection/train.sh DELETED
@@ -1,2 +0,0 @@
1
- . ../venv/bin/activate
2
- python3 train_chess_pieces_recognition.py
 
 
 
chess_board_tool.py → my_chess_board_tool.py RENAMED
@@ -5,13 +5,14 @@ import cv2
5
  import numpy as np
6
  import math
7
  import numpy
8
- from .chess_pieces_detection import ChessPiecesRecognition
9
-
10
 
 
11
  class ChessBoard(Tool):
12
  name = "_my_chess_board"
13
  description = """
14
- Analyze an image representing a chess board and extract board state in FEN notation
15
  """
16
 
17
  inputs = {
@@ -23,6 +24,8 @@ class ChessBoard(Tool):
23
 
24
  output_type = "string"
25
 
 
 
26
  # Steps to do
27
  # - break board image into array of images representing pieces
28
  # Image -> Image[]
@@ -31,18 +34,22 @@ class ChessBoard(Tool):
31
  # - construct FEN from string of chess pieces
32
  # str[] -> []
33
 
 
 
 
 
34
 
35
- def gradientx(self, img):
36
  # Compute gradient in x-direction using larger Sobel kernel
37
  grad_x = cv2.Sobel(img, cv2.CV_32F, 1, 0, ksize=31)
38
  return grad_x
39
 
40
- def gradienty(self, img):
41
  # Compute gradient in y-direction using larger Sobel kernel
42
  grad_y = cv2.Sobel(img, cv2.CV_32F, 0, 1, ksize=31)
43
  return grad_y
44
 
45
- def checkMatch(self, lineset):
46
  linediff = np.diff(lineset)
47
  x = 0
48
  cnt = 0
@@ -54,7 +61,7 @@ class ChessBoard(Tool):
54
  x = line
55
  return cnt == 5
56
 
57
- def pruneLines(self, lineset, image_dim, margin=20):
58
  # Remove lines near the margins
59
  lineset = [x for x in lineset if x > margin and x < image_dim - margin]
60
  if not lineset:
@@ -75,7 +82,7 @@ class ChessBoard(Tool):
75
  start_pos = i
76
  return lineset
77
 
78
- def skeletonize_1d(self, arr):
79
  _arr = arr.copy()
80
  for i in range(len(_arr) - 1):
81
  if _arr[i] <= _arr[i + 1]:
@@ -85,7 +92,7 @@ class ChessBoard(Tool):
85
  _arr[i] = 0
86
  return _arr
87
 
88
- def getChessLines(self, hdx, hdy, hdx_thresh, hdy_thresh, image_shape):
89
  # Generate Gaussian window
90
  window_size = 21
91
  sigma = 8.0
@@ -102,24 +109,24 @@ class ChessBoard(Tool):
102
  blur_y = np.convolve(hdy_thresh_binary, gausswin, mode='same')
103
 
104
  # Skeletonize signals
105
- skel_x = self.skeletonize_1d(blur_x)
106
- skel_y = self.skeletonize_1d(blur_y)
107
 
108
  # Find line positions
109
  lines_x = np.where(skel_x > 0)[0].tolist()
110
  lines_y = np.where(skel_y > 0)[0].tolist()
111
 
112
  # Prune lines
113
- lines_x = self.pruneLines(lines_x, image_shape[1])
114
- lines_y = self.pruneLines(lines_y, image_shape[0])
115
 
116
  # Check if lines match expected pattern
117
  is_match = (len(lines_x) == 7) and (len(lines_y) == 7) and \
118
- self.checkMatch(lines_x) and self.checkMatch(lines_y)
119
 
120
  return lines_x, lines_y, is_match
121
 
122
- def getChessTiles(self, img, lines_x, lines_y):
123
  stepx = int(round(np.mean(np.diff(lines_x))))
124
  stepy = int(round(np.mean(np.diff(lines_y))))
125
 
@@ -159,7 +166,7 @@ class ChessBoard(Tool):
159
  return squares
160
 
161
  # Image(PIL) --> Image(CV2)[]
162
- def extract_pieces_from_image_board(self, image):
163
  # Load the image
164
  if image is None:
165
  print(f"Image not provided")
@@ -172,8 +179,8 @@ class ChessBoard(Tool):
172
  norm_image = equ.astype(np.float32) / 255.0
173
 
174
  # Compute the gradients
175
- grad_x = self.gradientx(norm_image)
176
- grad_y = self.gradienty(norm_image)
177
 
178
  # Clip the gradients
179
  Dx_pos = np.clip(grad_x, 0, None)
@@ -195,8 +202,8 @@ class ChessBoard(Tool):
195
  threshold_x = np.max(hough_Dx) * (a / 5.0)
196
  threshold_y = np.max(hough_Dy) * (a / 5.0)
197
 
198
- lines_x, lines_y, is_match = self.getChessLines(hough_Dx, hough_Dy, threshold_x, threshold_y,
199
- norm_image.shape)
200
 
201
  if is_match:
202
  break
@@ -205,7 +212,7 @@ class ChessBoard(Tool):
205
 
206
  squares_resized = []
207
  if is_match:
208
- squares = self.getChessTiles(gray, lines_x, lines_y)
209
  for square in squares:
210
  resized = cv2.resize(square, (32, 32), interpolation=cv2.INTER_AREA)
211
  squares_resized.append(resized)
@@ -229,24 +236,32 @@ class ChessBoard(Tool):
229
 
230
  return squares_resized
231
 
232
- def detect_chess_pieces(self, images):
233
- recognition = ChessPiecesRecognition()
234
- return recognition.classify_pieces(images)
235
 
236
- def convert_pieces_list_to_fen(self, pieces):
237
- print()
238
 
239
  def forward(self, img: Image) -> str:
240
- print(f"***KS*** Analyzing chess board image")
241
- cv2_image = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)
242
-
243
- # Image(PIL) -> Image(CV2)(32x32) []
244
- squares_resized = self.extract_pieces_from_image_board(cv2_image)
245
-
246
- # Image(CV2)(32x32) [] -> str[]
247
- pieces_list = self.detect_chess_pieces(squares_resized)
248
-
249
- # str[] -> str(FEN)
250
- fen = self.convert_pieces_list_to_fen(pieces_list)
251
-
252
- return f"FEN is: \"{fen}\n "
 
 
 
 
 
 
 
 
 
 
5
  import numpy as np
6
  import math
7
  import numpy
8
+ from my_train_chess_pieces_recognition import ChessPiecesRecognition
9
+ import traceback
10
 
11
+ # Based on; https://github.com/kratos606/chessboard-recogniser/tree/main
12
  class ChessBoard(Tool):
13
  name = "_my_chess_board"
14
  description = """
15
+ Analyze an image representing a chess board and return board position as a list of chess pieces
16
  """
17
 
18
  inputs = {
 
24
 
25
  output_type = "string"
26
 
27
+ is_initialized = False
28
+
29
  # Steps to do
30
  # - break board image into array of images representing pieces
31
  # Image -> Image[]
 
34
  # - construct FEN from string of chess pieces
35
  # str[] -> []
36
 
37
+ def __init__(self, _chess_board_model_name, _chess_board_model_dir):
38
+ print(f"***KS*** ChessBoard initializing ...")
39
+ self.recognition = ChessPiecesRecognition(_chess_board_model_name, _chess_board_model_dir)
40
+ self.is_initialized = True
41
 
42
+ def __gradientx(self, img):
43
  # Compute gradient in x-direction using larger Sobel kernel
44
  grad_x = cv2.Sobel(img, cv2.CV_32F, 1, 0, ksize=31)
45
  return grad_x
46
 
47
+ def __gradienty(self, img):
48
  # Compute gradient in y-direction using larger Sobel kernel
49
  grad_y = cv2.Sobel(img, cv2.CV_32F, 0, 1, ksize=31)
50
  return grad_y
51
 
52
+ def __checkMatch(self, lineset):
53
  linediff = np.diff(lineset)
54
  x = 0
55
  cnt = 0
 
61
  x = line
62
  return cnt == 5
63
 
64
+ def __pruneLines(self, lineset, image_dim, margin=20):
65
  # Remove lines near the margins
66
  lineset = [x for x in lineset if x > margin and x < image_dim - margin]
67
  if not lineset:
 
82
  start_pos = i
83
  return lineset
84
 
85
+ def __skeletonize_1d(self, arr):
86
  _arr = arr.copy()
87
  for i in range(len(_arr) - 1):
88
  if _arr[i] <= _arr[i + 1]:
 
92
  _arr[i] = 0
93
  return _arr
94
 
95
+ def __getChessLines(self, hdx, hdy, hdx_thresh, hdy_thresh, image_shape):
96
  # Generate Gaussian window
97
  window_size = 21
98
  sigma = 8.0
 
109
  blur_y = np.convolve(hdy_thresh_binary, gausswin, mode='same')
110
 
111
  # Skeletonize signals
112
+ skel_x = self.__skeletonize_1d(blur_x)
113
+ skel_y = self.__skeletonize_1d(blur_y)
114
 
115
  # Find line positions
116
  lines_x = np.where(skel_x > 0)[0].tolist()
117
  lines_y = np.where(skel_y > 0)[0].tolist()
118
 
119
  # Prune lines
120
+ lines_x = self.__pruneLines(lines_x, image_shape[1])
121
+ lines_y = self.__pruneLines(lines_y, image_shape[0])
122
 
123
  # Check if lines match expected pattern
124
  is_match = (len(lines_x) == 7) and (len(lines_y) == 7) and \
125
+ self.__checkMatch(lines_x) and self.__checkMatch(lines_y)
126
 
127
  return lines_x, lines_y, is_match
128
 
129
+ def __getChessTiles(self, img, lines_x, lines_y):
130
  stepx = int(round(np.mean(np.diff(lines_x))))
131
  stepy = int(round(np.mean(np.diff(lines_y))))
132
 
 
166
  return squares
167
 
168
  # Image(PIL) --> Image(CV2)[]
169
+ def __extract_pieces_from_image_board(self, image):
170
  # Load the image
171
  if image is None:
172
  print(f"Image not provided")
 
179
  norm_image = equ.astype(np.float32) / 255.0
180
 
181
  # Compute the gradients
182
+ grad_x = self.__gradientx(norm_image)
183
+ grad_y = self.__gradienty(norm_image)
184
 
185
  # Clip the gradients
186
  Dx_pos = np.clip(grad_x, 0, None)
 
202
  threshold_x = np.max(hough_Dx) * (a / 5.0)
203
  threshold_y = np.max(hough_Dy) * (a / 5.0)
204
 
205
+ lines_x, lines_y, is_match = self.__getChessLines(hough_Dx, hough_Dy, threshold_x, threshold_y,
206
+ norm_image.shape)
207
 
208
  if is_match:
209
  break
 
212
 
213
  squares_resized = []
214
  if is_match:
215
+ squares = self.__getChessTiles(gray, lines_x, lines_y)
216
  for square in squares:
217
  resized = cv2.resize(square, (32, 32), interpolation=cv2.INTER_AREA)
218
  squares_resized.append(resized)
 
236
 
237
  return squares_resized
238
 
239
+ def __detect_chess_pieces(self, images):
240
+ return self.recognition.classify_pieces(images)
 
241
 
242
+ #def __convert_pieces_list_to_fen(self, pieces):
243
+ # return "dupa"
244
 
245
  def forward(self, img: Image) -> str:
246
+ pieces_list = ""
247
+ try:
248
+ print(f"***KS*** Analyzing chess board image for image: {img}")
249
+ cv2_image = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)
250
+ print(f"***KS*** Got CV2 image shape: {cv2_image.shape}")
251
+
252
+ # Image(PIL) -> Image(CV2)(32x32) []
253
+ squares_resized = self.__extract_pieces_from_image_board(cv2_image)
254
+ print(f"***KS*** Squares resized: {len(squares_resized)}")
255
+
256
+ # Image(CV2)(32x32) [] -> str[]
257
+ pieces_list = self.__detect_chess_pieces(squares_resized)
258
+ print(f"***KS*** Pieces list: {pieces_list}")
259
+
260
+ # str[] -> str(FEN)
261
+ #fen = self.__convert_pieces_list_to_fen(pieces_list)
262
+ #print(f"***KS*** FEN acquired: {fen}")
263
+ except Exception as ex:
264
+ print(traceback.format_exc())
265
+ print(f"***KS*** Exception invoking ChessBoard: {ex}")
266
+
267
+ return pieces_list
my_fen_tool.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents import Tool
2
+ import numpy as np
3
+ import textwrap
4
+
5
+
6
+ #AUTHORIZED_TYPES = [
7
+ # "string",
8
+ # "boolean",
9
+ # "integer",
10
+ # "number",
11
+ # "image",
12
+ # "audio",
13
+ # "array",
14
+ # "object",
15
+ # "any",
16
+ # "null",
17
+ #]
18
+
19
+ # https://chessvision.ai/docs/tools/fen2image/
20
+ # https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation
21
+
22
+
23
+ class FENTool(Tool):
24
+ name = "_my_fen"
25
+ description = """
26
+ Convert a bord provided a s alist of chest pieces into FEN notation
27
+ """
28
+
29
+ inputs = {
30
+ "_inp": {
31
+ "type": "string",
32
+ "description": "string with chest pieces",
33
+ }
34
+ }
35
+
36
+ output_type = "string"
37
+
38
+ def __shortenFEN(self, fen):
39
+ """Reduce FEN to shortest form (ex. '111p11Q' becomes '3p2Q')"""
40
+ return fen.replace('11111111','8').replace('1111111','7') \
41
+ .replace('111111','6').replace('11111','5') \
42
+ .replace('1111','4').replace('111','3').replace('11','2')
43
+
44
+ def forward(self, _inp: str) -> str:
45
+ #arr = np.array(list(_inp)).reshape(8, 8)
46
+ #print(f"Arr: {arr.shape}\n{arr}")
47
+
48
+ #arr = np.flip(arr, axis=0)
49
+ #print(f"Arr flipped: {arr.shape}\n{arr}")
50
+
51
+ #fen = [str(r) for r in arr]
52
+ #print(f"rows as str: {fen}")
53
+
54
+ arr = textwrap.wrap(_inp, 8)
55
+ print(f"Arr: \n{arr}")
56
+
57
+ arr = arr[::-1]
58
+ print(f"Arr: \n{arr}")
59
+
60
+ fen_long = "/".join(arr)
61
+ print(f"FEN long: \n{fen_long}")
62
+
63
+ fen_short = self.__shortenFEN(fen_long)
64
+ print(f"FEN short: \n{fen_short}")
65
+
66
+ fen_enhanced = f"{fen_short} b - - 0 1"
67
+
68
+ _out = fen_enhanced
69
+ return _out
my_tools.py → my_image_load.py RENAMED
@@ -17,28 +17,6 @@ from io import BytesIO
17
  #]
18
 
19
 
20
- class ReverseStringTool(Tool):
21
- name = "_my_reverse_string"
22
- description = """
23
- Decode a string which is provided in a reversed form.
24
- """
25
-
26
- inputs = {
27
- "_inp": {
28
- "type": "string",
29
- "description": "encoded input string",
30
- }
31
- }
32
-
33
- output_type = "string"
34
-
35
- def forward(self, _inp: str) -> str:
36
- _out = ""
37
- for a in _inp:
38
- _out = a + _out
39
- return _out
40
-
41
-
42
  class ImageLoadTool(Tool):
43
  name = "_my_image_load"
44
  description = """
 
17
  #]
18
 
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  class ImageLoadTool(Tool):
21
  name = "_my_image_load"
22
  description = """
my_reverse_string.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents import Tool
2
+
3
+
4
+ #AUTHORIZED_TYPES = [
5
+ # "string",
6
+ # "boolean",
7
+ # "integer",
8
+ # "number",
9
+ # "image",
10
+ # "audio",
11
+ # "array",
12
+ # "object",
13
+ # "any",
14
+ # "null",
15
+ #]
16
+
17
+
18
+ class ReverseStringTool(Tool):
19
+ name = "_my_reverse_string"
20
+ description = """
21
+ Decode a string which is provided in a reversed form.
22
+ """
23
+
24
+ inputs = {
25
+ "_inp": {
26
+ "type": "string",
27
+ "description": "encoded input string",
28
+ }
29
+ }
30
+
31
+ output_type = "string"
32
+
33
+ def forward(self, _inp: str) -> str:
34
+ _out = ""
35
+ for a in _inp:
36
+ _out = a + _out
37
+ return _out
chess_pieces_detection/train_chess_pieces_recognition.py → my_train_chess_pieces_recognition.py RENAMED
@@ -27,7 +27,7 @@ from PIL import Image
27
  # - black Pawn p
28
  # - empty
29
 
30
- TRAIN_DIR = "/mnt/c/Users/krzsa/IdeaProjects/Agents-Course-Assignment/chess_pieces_detection/train-data"
31
  TRAIN_DIR_BLACK = f"{TRAIN_DIR}/black"
32
  TRAIN_DIR_WHITE = f"{TRAIN_DIR}/white"
33
  TRAIN_DIR_EMPTY = f"{TRAIN_DIR}/empty"
@@ -82,9 +82,10 @@ TEST_DATA = TRAIN_DATA
82
  # https://docs.pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module
83
  class CNNModel(nn.Module):
84
 
85
- def __init__(self, _name):
86
  super(CNNModel, self).__init__()
87
- self.name = _name
 
88
  print("***KS*** Model: Creating layers")
89
  # First Convolutional Layer: 32 features, 5x5 kernel
90
  # https://docs.pytorch.org/docs/stable/generated/torch.nn.Conv2d.html#torch.nn.Conv2d
@@ -108,7 +109,7 @@ class CNNModel(nn.Module):
108
 
109
  def _initialize_weights(self):
110
  # Load the pre-trained model
111
- model_name = f"saved_models/{self.name}.pth"
112
  print(f"***KS*** Checking pre-trained model: '{model_name}'")
113
  if os.path.exists(model_name):
114
  print(f"***KS*** Model '{model_name}' exists, loading weights ...")
@@ -135,7 +136,7 @@ class CNNModel(nn.Module):
135
  print(f"***KS*** Saving model ...")
136
  # Save the model checkpoint
137
  os.makedirs('saved_models', exist_ok=True)
138
- model_save_path = f"saved_models/{self.name}.pth"
139
  torch.save(self.state_dict(), model_save_path)
140
  print(f'*** KS *** Model saved in file: {model_save_path}')
141
 
@@ -249,6 +250,8 @@ class ChessDataset(Dataset):
249
 
250
 
251
  class ChessImagesDataset(Dataset):
 
 
252
  def __init__(self, images):
253
  self.num_images = len(images)
254
  self.images = images
@@ -269,9 +272,9 @@ class ChessImagesDataset(Dataset):
269
 
270
 
271
  class ChessPiecesRecognition:
272
- def __init__(self):
273
- print(f"***KS*** Chess pieces recognition initialized")
274
- self.model = CNNModel("test-1")
275
  self.__load_train_data__()
276
 
277
  def __load_train_data__(self):
@@ -378,10 +381,9 @@ class ChessPiecesRecognition:
378
  for inputs, labels in loader:
379
  # Move inputs and labels to device
380
  inputs = inputs.to(self.model.get_device())
381
- labels = labels.to(self.model.get_device())
382
 
383
  outputs = self.model(inputs)
384
- print(f"***KS*** Got model outputs: \nshape: {outputs.shape}\n{outputs}")
385
 
386
  labels_detected = np.argmax(outputs.cpu(), axis=1)
387
  print(f"***KS*** Got labels idx detected: \nshape: {labels_detected.shape}\n{labels_detected}")
@@ -394,7 +396,4 @@ class ChessPiecesRecognition:
394
 
395
  #t = ChessPiecesRecognition()
396
  #t.train()
397
- #t.eval()
398
-
399
- if __name__ == "__main__":
400
- print("This is a module and should not be executed directly")
 
27
  # - black Pawn p
28
  # - empty
29
 
30
+ TRAIN_DIR = "/mnt/c/Users/krzsa/IdeaProjects/Agents-Course-Assignment/train-data"
31
  TRAIN_DIR_BLACK = f"{TRAIN_DIR}/black"
32
  TRAIN_DIR_WHITE = f"{TRAIN_DIR}/white"
33
  TRAIN_DIR_EMPTY = f"{TRAIN_DIR}/empty"
 
82
  # https://docs.pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module
83
  class CNNModel(nn.Module):
84
 
85
+ def __init__(self, _model_name, _model_dir):
86
  super(CNNModel, self).__init__()
87
+ self.name = _model_name
88
+ self.model_dir = _model_dir
89
  print("***KS*** Model: Creating layers")
90
  # First Convolutional Layer: 32 features, 5x5 kernel
91
  # https://docs.pytorch.org/docs/stable/generated/torch.nn.Conv2d.html#torch.nn.Conv2d
 
109
 
110
  def _initialize_weights(self):
111
  # Load the pre-trained model
112
+ model_name = os.path.join(self.model_dir, self.name)
113
  print(f"***KS*** Checking pre-trained model: '{model_name}'")
114
  if os.path.exists(model_name):
115
  print(f"***KS*** Model '{model_name}' exists, loading weights ...")
 
136
  print(f"***KS*** Saving model ...")
137
  # Save the model checkpoint
138
  os.makedirs('saved_models', exist_ok=True)
139
+ model_save_path = f"../saved_models/{self.name}.pth"
140
  torch.save(self.state_dict(), model_save_path)
141
  print(f'*** KS *** Model saved in file: {model_save_path}')
142
 
 
250
 
251
 
252
  class ChessImagesDataset(Dataset):
253
+ CHESS_PIECES = '1KQRBNPkqrbnp'
254
+
255
  def __init__(self, images):
256
  self.num_images = len(images)
257
  self.images = images
 
272
 
273
 
274
  class ChessPiecesRecognition:
275
+ def __init__(self, _model_name, _model_dir):
276
+ print(f"***KS*** Chess pieces recognition initializing ...")
277
+ self.model = CNNModel(_model_name, _model_dir)
278
  self.__load_train_data__()
279
 
280
  def __load_train_data__(self):
 
381
  for inputs, labels in loader:
382
  # Move inputs and labels to device
383
  inputs = inputs.to(self.model.get_device())
 
384
 
385
  outputs = self.model(inputs)
386
+ print(f"***KS*** Got model outputs: \nshape: {outputs.shape}")
387
 
388
  labels_detected = np.argmax(outputs.cpu(), axis=1)
389
  print(f"***KS*** Got labels idx detected: \nshape: {labels_detected.shape}\n{labels_detected}")
 
396
 
397
  #t = ChessPiecesRecognition()
398
  #t.train()
399
+ #t.eval()
 
 
 
my_train_chess_pieces_recognition.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ . ./venv/bin/activate
2
+ python3 my_train_chess_pieces_recognition.py
simple.py CHANGED
@@ -7,15 +7,14 @@ from smolagents import CodeAgent, tool, InferenceClientModel, WebSearchTool, loa
7
  from smolagents import PromptTemplates, PlanningPromptTemplate, FinalAnswerPromptTemplate, ManagedAgentPromptTemplate
8
 
9
  from dotenv import load_dotenv
10
- from my_tools import ReverseStringTool, ImageLoadTool
11
- from chess_board_tool import ChessBoard
 
12
  from PIL import Image
13
 
14
 
15
  task_id = "cca530fc-4052-43b2-b130-b30968d8aa44"
16
 
17
- # https://github.com/kratos606/chessboard-recogniser/tree/main
18
-
19
  MODEL_REASONING = "Qwen/Qwen2.5-Coder-32B-Instruct"
20
  #MODEL_REASONING = "Qwen/Qwen2.5-72B-Instruct" not good
21
  #"meta-llama/Meta-Llama-3-70B-Instruct"
@@ -30,6 +29,7 @@ PROMPT_TEMPLATES = PromptTemplates(
30
 
31
  Describe your initial plan as a set of bullet points.
32
  Each bullet point should describe in one sentence an action which is to be taken in this step.
 
33
 
34
  Use the tools provided. If you are going to use a tool, describe in detail how you are going
35
  to use that particular tool and explain parameters used to invoke the tool.
@@ -73,10 +73,18 @@ PROMPT_TEMPLATES = PromptTemplates(
73
  #question = f"Load an image for task id {task_id} and display it using matplotlib "
74
  question = f"Load an image for task id {task_id} and analyze the chess board "
75
 
 
 
 
76
  reasoning_agent = CodeAgent(
77
  name="CourseAssistant",
78
  description="General AI Assistant",
79
- tools=[ImageLoadTool(), FinalAnswerTool(), ReverseStringTool(), ChessBoard()],
 
 
 
 
 
80
  model=InferenceClientModel(model_id=MODEL_REASONING),
81
  planning_interval=3, # This is where you activate planning!,
82
  prompt_templates=PROMPT_TEMPLATES,
 
7
  from smolagents import PromptTemplates, PlanningPromptTemplate, FinalAnswerPromptTemplate, ManagedAgentPromptTemplate
8
 
9
  from dotenv import load_dotenv
10
+ from my_reverse_string import ReverseStringTool
11
+ from my_image_load import ImageLoadTool
12
+ from my_chess_board_tool import ChessBoard
13
  from PIL import Image
14
 
15
 
16
  task_id = "cca530fc-4052-43b2-b130-b30968d8aa44"
17
 
 
 
18
  MODEL_REASONING = "Qwen/Qwen2.5-Coder-32B-Instruct"
19
  #MODEL_REASONING = "Qwen/Qwen2.5-72B-Instruct" not good
20
  #"meta-llama/Meta-Llama-3-70B-Instruct"
 
29
 
30
  Describe your initial plan as a set of bullet points.
31
  Each bullet point should describe in one sentence an action which is to be taken in this step.
32
+ Do NOT provide hypothetical examples for the final answer.
33
 
34
  Use the tools provided. If you are going to use a tool, describe in detail how you are going
35
  to use that particular tool and explain parameters used to invoke the tool.
 
73
  #question = f"Load an image for task id {task_id} and display it using matplotlib "
74
  question = f"Load an image for task id {task_id} and analyze the chess board "
75
 
76
+ chess_board_model_name = "my_chess_pieces_recognition.pth"
77
+ chess_board_model_dir = "/mnt/c/Users/krzsa/IdeaProjects/Agents-Course-Assignment/saved_models"
78
+
79
  reasoning_agent = CodeAgent(
80
  name="CourseAssistant",
81
  description="General AI Assistant",
82
+ tools=[
83
+ ImageLoadTool(),
84
+ FinalAnswerTool(),
85
+ ReverseStringTool(),
86
+ ChessBoard(chess_board_model_name, chess_board_model_dir)
87
+ ],
88
  model=InferenceClientModel(model_id=MODEL_REASONING),
89
  planning_interval=3, # This is where you activate planning!,
90
  prompt_templates=PROMPT_TEMPLATES,
test.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ . ./venv/bin/activate
2
+ pytest --capture=no
test_tools.py CHANGED
@@ -1,20 +1,22 @@
1
- from my_tools import ReverseStringTool, ImageLoadTool
2
- from chess_board_tool import ChessBoard
 
 
3
  import pytest
4
  import matplotlib.pyplot as plt
5
  import matplotlib as mp
6
 
7
  #pytest --capture=no
8
 
 
9
  @pytest.mark.parametrize("_inp,_exp",[("abc", "cba"),("ihg fed cba", "abc def ghi")])
10
  def test_tool_reverse_string(_inp,_exp):
11
  assert ReverseStringTool().forward(_inp) == _exp
12
 
13
-
14
  @pytest.mark.parametrize("_task_id,_exp",[("cca530fc-4052-43b2-b130-b30968d8aa44", "")])
15
  def test_tool_image_load(_task_id,_exp):
16
- #assert ReverseStringTool().forward(_inp) == _exp
17
- print(f"Loading image for task id: {_task_id}")
18
  t = ImageLoadTool()
19
  result = t.forward(_task_id)
20
  print(f"Got result: {result}")
@@ -22,13 +24,28 @@ def test_tool_image_load(_task_id,_exp):
22
  #plt.imshow(result)
23
  #plt.show()
24
 
25
-
26
- @pytest.mark.parametrize("_task_id,_exp",[("cca530fc-4052-43b2-b130-b30968d8aa44", "")])
27
  def test_tool_chess_board(_task_id,_exp):
28
- #assert ReverseStringTool().forward(_inp) == _exp
29
- print(f"Loading image for task id: {_task_id}")
30
  t = ImageLoadTool()
31
  image = t.forward(_task_id)
32
  print(f"Got result: {image}")
33
- board_tool = ChessBoard()
34
- fen = board_tool.forward(image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from my_reverse_string import ReverseStringTool
2
+ from my_image_load import ImageLoadTool
3
+ from my_chess_board_tool import ChessBoard
4
+ from my_fen_tool import FENTool
5
  import pytest
6
  import matplotlib.pyplot as plt
7
  import matplotlib as mp
8
 
9
  #pytest --capture=no
10
 
11
+ @pytest.mark.skip(reason="disabled")
12
  @pytest.mark.parametrize("_inp,_exp",[("abc", "cba"),("ihg fed cba", "abc def ghi")])
13
  def test_tool_reverse_string(_inp,_exp):
14
  assert ReverseStringTool().forward(_inp) == _exp
15
 
16
+ @pytest.mark.skip(reason="disabled")
17
  @pytest.mark.parametrize("_task_id,_exp",[("cca530fc-4052-43b2-b130-b30968d8aa44", "")])
18
  def test_tool_image_load(_task_id,_exp):
19
+ print(f"\nLoading image for task id: {_task_id}")
 
20
  t = ImageLoadTool()
21
  result = t.forward(_task_id)
22
  print(f"Got result: {result}")
 
24
  #plt.imshow(result)
25
  #plt.show()
26
 
27
+ @pytest.mark.skip(reason="disabled")
28
+ @pytest.mark.parametrize("_task_id,_exp",[("cca530fc-4052-43b2-b130-b30968d8aa44", "1K1111111PP11111P11RBBqP1111n111Q1111111p11b11111pp111pp1k11r111")])
29
  def test_tool_chess_board(_task_id,_exp):
30
+ print(f"\nLoading image for task id: {_task_id}")
 
31
  t = ImageLoadTool()
32
  image = t.forward(_task_id)
33
  print(f"Got result: {image}")
34
+ chess_board_model_name = "my_chess_pieces_recognition.pth"
35
+ chess_board_model_dir = "/mnt/c/Users/krzsa/IdeaProjects/Agents-Course-Assignment/saved_models"
36
+ board_tool = ChessBoard(chess_board_model_name, chess_board_model_dir)
37
+ pieces = board_tool.forward(image)
38
+ assert pieces == _exp
39
+
40
+
41
+ @pytest.mark.parametrize("_pieces_list,_exp",
42
+ [
43
+ ("1K1111111PP11111P11RBBqP1111n111Q1111111p11b11111pp111pp1k11r111",
44
+ "1k2r3/1pp3pp/p2b4/Q7/4n3/P2RBBqP/1PP5/1K6 b - - 0 1")
45
+ ])
46
+ def test_tool_fen(_pieces_list,_exp):
47
+ print(f"\nConverting pieces list to FEN: {_pieces_list}")
48
+ t = FENTool()
49
+ fen = t.forward(_pieces_list)
50
+ print(f"Got result: {fen}")
51
+ assert fen == _exp