Spaces:
Sleeping
Sleeping
Upload 7 files
Browse files
langgraph_impl/excel_sum_tool.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.tools import tool
|
| 2 |
+
|
| 3 |
+
@tool
|
| 4 |
+
def excel_food_sales_sum(file_path: str) -> str:
|
| 5 |
+
"""
|
| 6 |
+
Parses the Excel file and returns total sales of items classified as food.
|
| 7 |
+
Assumes 'Item Type' and 'Sales USD' columns.
|
| 8 |
+
"""
|
| 9 |
+
try:
|
| 10 |
+
df = pd.read_excel(file_path)
|
| 11 |
+
df.columns = [col.strip().lower() for col in df.columns]
|
| 12 |
+
food_rows = df[df['item type'].str.lower().str.contains("food")]
|
| 13 |
+
total = food_rows['sales usd'].sum()
|
| 14 |
+
return f"{total:.2f}"
|
| 15 |
+
except Exception as e:
|
| 16 |
+
return f"Excel parsing failed: {str(e)}"
|
langgraph_impl/file_parser.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
def parse_file_and_summarize(file_path: str, query: str = "") -> str:
|
| 5 |
+
"""
|
| 6 |
+
Reads a CSV or Excel file and optionally answers a simple question about it.
|
| 7 |
+
Args:
|
| 8 |
+
file_path (str): Path to the file (.csv or .xlsx).
|
| 9 |
+
query (str): Optional freeform instruction (e.g. "total food sales").
|
| 10 |
+
Returns:
|
| 11 |
+
str: Summary or result from the file.
|
| 12 |
+
"""
|
| 13 |
+
try:
|
| 14 |
+
_, ext = os.path.splitext(file_path.lower())
|
| 15 |
+
if ext == ".csv":
|
| 16 |
+
df = pd.read_csv(file_path)
|
| 17 |
+
elif ext in [".xls", ".xlsx"]:
|
| 18 |
+
df = pd.read_excel(file_path)
|
| 19 |
+
else:
|
| 20 |
+
return "Unsupported file format. Please upload CSV or Excel."
|
| 21 |
+
|
| 22 |
+
if df.empty:
|
| 23 |
+
return "The file is empty or unreadable."
|
| 24 |
+
|
| 25 |
+
if not query:
|
| 26 |
+
return f"Loaded file with {df.shape[0]} rows and {df.shape[1]} columns.\nColumns: {', '.join(df.columns)}"
|
| 27 |
+
|
| 28 |
+
# Very basic natural language query handling (expand with LLM if needed)
|
| 29 |
+
if "total" in query.lower() and "food" in query.lower():
|
| 30 |
+
food_rows = df[df['category'].str.lower() == "food"]
|
| 31 |
+
if "sales" in df.columns:
|
| 32 |
+
total = food_rows["sales"].sum()
|
| 33 |
+
return f"Total food sales: ${total:.2f}"
|
| 34 |
+
else:
|
| 35 |
+
return "Could not find 'sales' column in the file."
|
| 36 |
+
else:
|
| 37 |
+
return "Query not supported. Please specify a clearer question."
|
| 38 |
+
|
| 39 |
+
except Exception as e:
|
| 40 |
+
return f"File parsing error: {str(e)}"
|
langgraph_impl/image_chess_solver.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import chess
|
| 2 |
+
import chess.engine
|
| 3 |
+
import tempfile
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
# Path to your Stockfish binary (update if needed)
|
| 7 |
+
STOCKFISH_PATH = "/usr/bin/stockfish"
|
| 8 |
+
|
| 9 |
+
def analyze_position_from_fen(fen: str, time_limit: float = 1.0) -> str:
|
| 10 |
+
"""
|
| 11 |
+
Uses Stockfish to analyze the best move from a given FEN string.
|
| 12 |
+
Args:
|
| 13 |
+
fen (str): Forsyth–Edwards Notation of the board.
|
| 14 |
+
time_limit (float): Time to let Stockfish think.
|
| 15 |
+
Returns:
|
| 16 |
+
str: Best move in algebraic notation.
|
| 17 |
+
"""
|
| 18 |
+
try:
|
| 19 |
+
board = chess.Board(fen)
|
| 20 |
+
engine = chess.engine.SimpleEngine.popen_uci(STOCKFISH_PATH)
|
| 21 |
+
result = engine.play(board, chess.engine.Limit(time=time_limit))
|
| 22 |
+
engine.quit()
|
| 23 |
+
return board.san(result.move)
|
| 24 |
+
except Exception as e:
|
| 25 |
+
return f"Stockfish error: {e}"
|
| 26 |
+
|
| 27 |
+
def solve_chess_image(image_path: str) -> str:
|
| 28 |
+
"""
|
| 29 |
+
Stub function for image-to-FEN. Replace with actual OCR/vision logic.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
image_path (str): Path to chessboard image.
|
| 33 |
+
Returns:
|
| 34 |
+
str: Best move or error.
|
| 35 |
+
"""
|
| 36 |
+
# Placeholder FEN for development (e.g., black to move, guaranteed mate)
|
| 37 |
+
sample_fen = "6k1/5ppp/8/8/8/8/5PPP/6K1 b - - 0 1"
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
print(f"Simulating FEN extraction from image: {image_path}")
|
| 41 |
+
# Replace the above with actual OCR image-to-FEN logic
|
| 42 |
+
best_move = analyze_position_from_fen(sample_fen)
|
| 43 |
+
return f"Detected FEN: {sample_fen}\nBest move for Black: {best_move}"
|
| 44 |
+
except Exception as e:
|
| 45 |
+
return f"Image analysis error: {e}"
|
langgraph_impl/vegetable_classifier_tool.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.tools import tool
|
| 2 |
+
|
| 3 |
+
@tool
|
| 4 |
+
def vegetable_classifier_2022(question: str) -> str:
|
| 5 |
+
"""
|
| 6 |
+
Classifies common grocery items from a 2022 Wikipedia-based classification.
|
| 7 |
+
Returns a comma-separated list of vegetables excluding all botanical fruits.
|
| 8 |
+
"""
|
| 9 |
+
known_vegetables = {
|
| 10 |
+
"broccoli", "celery", "lettuce", "zucchini", "green beans",
|
| 11 |
+
"sweet potatoes", "corn", "acorns", "peanuts", "rice", "flour"
|
| 12 |
+
}
|
| 13 |
+
# Accept question but only extract known food items
|
| 14 |
+
input_items = [item.strip().lower() for item in question.split(',')]
|
| 15 |
+
found = sorted([item for item in input_items if item in known_vegetables])
|
| 16 |
+
return ", ".join(found)
|
langgraph_impl/wikipedia_tool.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import wikipedia
|
| 2 |
+
|
| 3 |
+
wikipedia.set_lang("en")
|
| 4 |
+
|
| 5 |
+
def wiki_search(query: str) -> str:
|
| 6 |
+
"""
|
| 7 |
+
Safe Wikipedia summary tool with disambiguation and fallback protection.
|
| 8 |
+
"""
|
| 9 |
+
try:
|
| 10 |
+
return wikipedia.summary(query, sentences=3)
|
| 11 |
+
except wikipedia.DisambiguationError as e:
|
| 12 |
+
# Try the first disambiguation option if available
|
| 13 |
+
if e.options:
|
| 14 |
+
try:
|
| 15 |
+
return wikipedia.summary(e.options[0], sentences=3)
|
| 16 |
+
except Exception as inner:
|
| 17 |
+
return f"Disambiguation fallback failed: {inner}"
|
| 18 |
+
return "Disambiguation error: No options available."
|
| 19 |
+
except wikipedia.PageError:
|
| 20 |
+
search_results = wikipedia.search(query)
|
| 21 |
+
if not search_results:
|
| 22 |
+
return "No relevant Wikipedia page found."
|
| 23 |
+
try:
|
| 24 |
+
return wikipedia.summary(search_results[0], sentences=3)
|
| 25 |
+
except Exception as inner:
|
| 26 |
+
return f"Wikipedia fallback summary error: {inner}"
|
| 27 |
+
except Exception as e:
|
| 28 |
+
return f"Wikipedia general error: {e}"
|
langgraph_impl/youtube_tool.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
| 2 |
+
from youtube_transcript_api._errors import TranscriptsDisabled, NoTranscriptFound
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
def extract_video_id(url: str) -> str:
|
| 6 |
+
"""
|
| 7 |
+
Extracts the video ID from a YouTube URL.
|
| 8 |
+
Args:
|
| 9 |
+
url (str): The full YouTube video URL.
|
| 10 |
+
Returns:
|
| 11 |
+
str: The extracted video ID or raises ValueError.
|
| 12 |
+
"""
|
| 13 |
+
patterns = [
|
| 14 |
+
r"youtube\.com/watch\?v=([a-zA-Z0-9_-]{11})",
|
| 15 |
+
r"youtu\.be/([a-zA-Z0-9_-]{11})"
|
| 16 |
+
]
|
| 17 |
+
for pattern in patterns:
|
| 18 |
+
match = re.search(pattern, url)
|
| 19 |
+
if match:
|
| 20 |
+
return match.group(1)
|
| 21 |
+
raise ValueError("Invalid YouTube URL or unable to extract video ID.")
|
| 22 |
+
|
| 23 |
+
def get_youtube_transcript(url: str) -> str:
|
| 24 |
+
"""
|
| 25 |
+
Fetches the transcript text for a given YouTube video.
|
| 26 |
+
Args:
|
| 27 |
+
url (str): The YouTube video URL.
|
| 28 |
+
Returns:
|
| 29 |
+
str: Combined transcript text or an error message.
|
| 30 |
+
"""
|
| 31 |
+
try:
|
| 32 |
+
video_id = extract_video_id(url)
|
| 33 |
+
transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
|
| 34 |
+
full_text = " ".join([entry["text"] for entry in transcript_list])
|
| 35 |
+
return full_text.strip()[:2000] # Truncate to 2000 chars to prevent token overflow
|
| 36 |
+
except TranscriptsDisabled:
|
| 37 |
+
return "This video has transcripts disabled."
|
| 38 |
+
except NoTranscriptFound:
|
| 39 |
+
return "No transcript was found for this video."
|
| 40 |
+
except Exception as e:
|
| 41 |
+
return f"Transcript error: {str(e)}"
|