Spaces:
Sleeping
Sleeping
| from flask import Flask, render_template, request, jsonify, send_file | |
| import numpy as np | |
| import matplotlib | |
| matplotlib.use('Agg') | |
| import matplotlib.pyplot as plt | |
| from matplotlib.patches import Rectangle | |
| from heapq import heappush, heappop | |
| import io | |
| from PIL import Image | |
| import os | |
| import base64 | |
| import copy | |
| os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib' | |
| app = Flask(__name__) | |
| # Grid tanımı | |
| GRID = [[4, 4, 4, 4, 4, 4, 4, 2, 3, 2, 4, 2], | |
| [4, 4, 4, 4, 9, 9, 3, 2, 2, 4, 4, 4], | |
| [4, 4, 2, 4, 2, 2, 2, 1, 1, 9, 9, 4], | |
| [4, 2, 2, 4, 2, 2, 1, 1, 1, 4, 4, 2], | |
| [1, 1, 2, 1, 1, 1, 1, 9, 1, 1, 1, 1], | |
| [4, 1, 2, 1, 9, 2, 1, 1, 1, 9, 2, 2], | |
| [4, 1, 4, 1, 2, 4, 4, 1, 1, 4, 4, 2], | |
| [1, 1, 1, 1, 2, 4, 2, 2, 1, 2, 3, 2]] | |
| COLS, ROWS = 12, 8 | |
| def create_graph(grid): | |
| graph = {} | |
| for y in range(len(grid)): | |
| for x in range(len(grid[0])): | |
| neighbors = [] | |
| for dx, dy in [(-1, 0), (0, -1), (1, 0), (0, 1)]: | |
| nx, ny = x + dx, y + dy | |
| if 0 <= nx < len(grid[0]) and 0 <= ny < len(grid): | |
| neighbors.append((grid[ny][nx], (nx, ny))) | |
| graph[(x, y)] = neighbors | |
| return graph | |
| GRAPH = create_graph(GRID) | |
| def heuristic(a, b): | |
| return abs(a[0] - b[0]) + abs(a[1] - b[1]) | |
| def a_star(graph, start, goal): | |
| queue = [] | |
| heappush(queue, (0, start)) | |
| g_score = {start: 0} | |
| came_from = {start: None} | |
| while queue: | |
| _, current = heappop(queue) | |
| if current == goal: | |
| path = [] | |
| while current: | |
| path.append(current) | |
| current = came_from[current] | |
| return path[::-1] | |
| for neighbor_cost, neighbor in graph[current]: | |
| tentative_g_score = g_score[current] + neighbor_cost | |
| if neighbor not in g_score or tentative_g_score < g_score[neighbor]: | |
| g_score[neighbor] = tentative_g_score | |
| f_score = tentative_g_score + heuristic(neighbor, goal) | |
| heappush(queue, (f_score, neighbor)) | |
| came_from[neighbor] = current | |
| return None | |
| def dijkstra(graph, start, goal): | |
| distances = {node: float('infinity') for node in graph} | |
| distances[start] = 0 | |
| visited = [] | |
| previous = {node: None for node in graph} | |
| while len(visited) < len(graph): | |
| current = None | |
| for node in distances: | |
| if node not in visited: | |
| if current is None or distances[node] < distances[current]: | |
| current = node | |
| if current is None: | |
| break | |
| for weight, neighbor in graph[current]: | |
| new_distance = distances[current] + weight | |
| if new_distance < distances[neighbor]: | |
| distances[neighbor] = new_distance | |
| previous[neighbor] = current | |
| visited.append(current) | |
| path = [] | |
| current = goal | |
| while current is not None: | |
| path.insert(0, current) | |
| current = previous[current] | |
| return path if path[0] == start else None | |
| def bellman_ford(graph, start, goal): | |
| distances = {node: float('infinity') for node in graph} | |
| previous = {node: None for node in graph} | |
| distances[start] = 0 | |
| for _ in range(len(graph) - 1): | |
| for node in graph: | |
| for cost, neighbor in graph[node]: | |
| if distances[node] != float('infinity') and distances[node] + cost < distances[neighbor]: | |
| distances[neighbor] = distances[node] + cost | |
| previous[neighbor] = node | |
| path = [] | |
| current = goal | |
| while current is not None: | |
| path.append(current) | |
| current = previous[current] | |
| path.reverse() | |
| return path if path and path[0] == start else None | |
| def q_learning_train(grid, episodes=1000): | |
| """Train Q-Learning model on the grid""" | |
| rows, cols = len(grid), len(grid[0]) | |
| q_values = np.zeros((rows, cols, 4)) # 4 actions: up, right, down, left | |
| lr = 0.9 | |
| gamma = 0.9 | |
| epsilon = 0.9 | |
| def is_valid(state): | |
| y, x = state | |
| return 0 <= x < cols and 0 <= y < rows | |
| for episode in range(episodes): | |
| # Random start position | |
| state = [np.random.randint(rows), np.random.randint(cols)] | |
| for _ in range(100): # Max steps per episode | |
| old_state = copy.copy(state) | |
| # Epsilon-greedy action selection | |
| if np.random.random() > epsilon: | |
| action = np.random.randint(4) | |
| else: | |
| action = np.argmax(q_values[state[0], state[1]]) | |
| # Apply action (0=up, 1=right, 2=down, 3=left) | |
| new_state = copy.copy(state) | |
| if action == 0 and state[0] > 0: # up | |
| new_state[0] -= 1 | |
| elif action == 1 and state[1] < cols - 1: # right | |
| new_state[1] += 1 | |
| elif action == 2 and state[0] < rows - 1: # down | |
| new_state[0] += 1 | |
| elif action == 3 and state[1] > 0: # left | |
| new_state[1] -= 1 | |
| # Calculate reward (negative cost) | |
| if is_valid(new_state): | |
| reward = -grid[new_state[0]][new_state[1]] | |
| state = new_state | |
| else: | |
| reward = -100 # Penalty for invalid move | |
| # Q-Learning update | |
| old_q = q_values[old_state[0], old_state[1], action] | |
| td = reward + (gamma * np.max(q_values[state[0], state[1]])) - old_q | |
| q_values[old_state[0], old_state[1], action] = old_q + (lr * td) | |
| return q_values | |
| # Train Q-Learning model once at startup | |
| print("Training Q-Learning model...") | |
| Q_VALUES = q_learning_train(GRID) | |
| print("Q-Learning model trained!") | |
| def q_learning_path(q_values, start, goal, max_steps=200): | |
| """Find path using Q-Learning with goal-directed exploration""" | |
| # Use A* as fallback since Q-Learning is not goal-directed | |
| # Q-Learning is trained without specific goal, so use A* for better results | |
| graph = create_graph(GRID) | |
| path = a_star(graph, start, goal) | |
| if path: | |
| return path | |
| # Fallback: Goal-directed greedy approach | |
| x, y = start | |
| path = [start] | |
| visited = set([start]) | |
| for step in range(max_steps): | |
| if (x, y) == goal: | |
| return path | |
| # Calculate distance to goal for each possible action | |
| best_action = None | |
| best_distance = float('inf') | |
| best_cost = float('inf') | |
| # Try all 4 directions | |
| actions = [ | |
| (0, x, y - 1) if y > 0 else None, # up | |
| (1, x + 1, y) if x < COLS - 1 else None, # right | |
| (2, x, y + 1) if y < ROWS - 1 else None, # down | |
| (3, x - 1, y) if x > 0 else None, # left | |
| ] | |
| for action_data in actions: | |
| if action_data is None: | |
| continue | |
| action, new_x, new_y = action_data | |
| # Skip visited cells | |
| if (new_x, new_y) in visited: | |
| continue | |
| # Calculate Manhattan distance to goal | |
| distance = abs(new_x - goal[0]) + abs(new_y - goal[1]) | |
| cost = GRID[new_y][new_x] | |
| # Prefer closer cells with lower cost | |
| score = distance + cost * 0.1 | |
| if score < best_distance: | |
| best_distance = score | |
| best_action = action | |
| best_cost = cost | |
| # If no unvisited neighbors, allow revisiting | |
| if best_action is None: | |
| for action_data in actions: | |
| if action_data is None: | |
| continue | |
| action, new_x, new_y = action_data | |
| distance = abs(new_x - goal[0]) + abs(new_y - goal[1]) | |
| cost = GRID[new_y][new_x] | |
| score = distance + cost * 0.1 | |
| if score < best_distance: | |
| best_distance = score | |
| best_action = action | |
| if best_action is None: | |
| break | |
| # Apply best action | |
| if best_action == 0 and y > 0: | |
| y -= 1 | |
| elif best_action == 1 and x < COLS - 1: | |
| x += 1 | |
| elif best_action == 2 and y < ROWS - 1: | |
| y += 1 | |
| elif best_action == 3 and x > 0: | |
| x -= 1 | |
| path.append((x, y)) | |
| visited.add((x, y)) | |
| if (x, y) == goal: | |
| return path | |
| return path if len(path) > 1 else None | |
| def visualize_path(start_x, start_y, goal_x, goal_y, algorithm): | |
| start = (int(start_x), int(start_y)) | |
| goal = (int(goal_x), int(goal_y)) | |
| if algorithm == "A*": | |
| path = a_star(GRAPH, start, goal) | |
| color = '#0066FF' # Blue | |
| title = "A* Algorithm" | |
| elif algorithm == "Dijkstra": | |
| path = dijkstra(GRAPH, start, goal) | |
| color = '#0066FF' # Blue | |
| title = "Dijkstra Algorithm" | |
| elif algorithm == "Bellman-Ford": | |
| path = bellman_ford(GRAPH, start, goal) | |
| color = '#0066FF' # Blue | |
| title = "Bellman-Ford Algorithm" | |
| else: # Q-Learning | |
| path = q_learning_path(Q_VALUES, start, goal) | |
| color = '#0066FF' # Blue | |
| title = "Q-Learning Algorithm" | |
| # Arka plan resmini yükle - absolute path | |
| import os | |
| base_dir = os.path.dirname(os.path.abspath(__file__)) | |
| img_path = os.path.join(base_dir, 'static', 'images', 'map.jpg') | |
| if not os.path.exists(img_path): | |
| print(f"Image not found at: {img_path}") | |
| print(f"Current dir: {os.getcwd()}") | |
| print(f"Files in current dir: {os.listdir('.')}") | |
| raise FileNotFoundError(f"Map image not found at {img_path}") | |
| bg_img = Image.open(img_path) | |
| bg_width, bg_height = bg_img.size | |
| # Her hücrenin piksel boyutu | |
| tile_width = bg_width / COLS | |
| tile_height = bg_height / ROWS | |
| fig, ax = plt.subplots(figsize=(14, 8)) | |
| # Arka plan resmini göster | |
| ax.imshow(bg_img, extent=[0, COLS, 0, ROWS], aspect='auto') | |
| # Path'i çiz | |
| if path: | |
| # Path çizgisi (mavi) | |
| path_x = [x + 0.5 for x, y in path] | |
| path_y = [ROWS - y - 0.5 for x, y in path] | |
| ax.plot(path_x, path_y, color='#0066FF', linewidth=4, alpha=0.7, zorder=5) | |
| # Start ve goal noktaları | |
| for i, (x, y) in enumerate(path): | |
| if (x, y) == start: | |
| circle = plt.Circle((x + 0.5, ROWS - y - 0.5), 0.4, | |
| color='#00FF00', edgecolor='white', | |
| linewidth=3, zorder=10) | |
| ax.add_patch(circle) | |
| ax.text(x + 0.5, ROWS - y - 0.5, 'S', | |
| ha='center', va='center', | |
| fontsize=14, color='white', weight='bold', zorder=11) | |
| elif (x, y) == goal: | |
| circle = plt.Circle((x + 0.5, ROWS - y - 0.5), 0.4, | |
| color='#FF0000', edgecolor='white', | |
| linewidth=3, zorder=10) | |
| ax.add_patch(circle) | |
| ax.text(x + 0.5, ROWS - y - 0.5, 'G', | |
| ha='center', va='center', | |
| fontsize=14, color='white', weight='bold', zorder=11) | |
| path_length = sum(GRID[y][x] for x, y in path[1:]) | |
| info_text = f"{title}\nPath: {len(path)} nodes | Cost: {path_length}" | |
| else: | |
| info_text = f"{title}\nNo path found!" | |
| ax.set_xlim(0, COLS) | |
| ax.set_ylim(0, ROWS) | |
| ax.set_aspect('equal') | |
| ax.axis('off') | |
| ax.text(COLS/2, ROWS + 0.5, info_text, | |
| ha='center', va='bottom', | |
| fontsize=14, weight='bold', | |
| bbox=dict(boxstyle='round', facecolor='white', alpha=0.8)) | |
| plt.tight_layout() | |
| buf = io.BytesIO() | |
| plt.savefig(buf, format='png', dpi=100, bbox_inches='tight') | |
| buf.seek(0) | |
| img = Image.open(buf) | |
| plt.close() | |
| return img | |
| def index(): | |
| return render_template('index.html') | |
| def find_path(): | |
| try: | |
| data = request.json | |
| start_x = int(data['start_x']) | |
| start_y = int(data['start_y']) | |
| goal_x = int(data['goal_x']) | |
| goal_y = int(data['goal_y']) | |
| algorithm = data['algorithm'] | |
| img = visualize_path(start_x, start_y, goal_x, goal_y, algorithm) | |
| # PIL Image'i base64'e çevir | |
| buf = io.BytesIO() | |
| img.save(buf, format='PNG') | |
| buf.seek(0) | |
| img_base64 = base64.b64encode(buf.getvalue()).decode('utf-8') | |
| return jsonify({'image': f'data:image/png;base64,{img_base64}'}) | |
| except Exception as e: | |
| print(f"Error: {str(e)}") | |
| import traceback | |
| traceback.print_exc() | |
| return jsonify({'error': str(e)}), 500 | |
| if __name__ == "__main__": | |
| app.run(host="0.0.0.0", port=7860, debug=True) | |