ThePadelito / TWS_fonctions.py
obaes's picture
Upload 6 files
79b4b10 verified
import os
import time
from textwrap import dedent
from typing import Dict, List, Optional
from PIL import Image
from google import genai
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.firecrawl import FirecrawlTools
from agno.tools.zep import ZepTools
from openai import OpenAI
import os
from agno.models.huggingface import HuggingFace
from huggingface_hub import InferenceClient
import base64
import io
from agno.models.mistral import MistralChat
from agno.models.google import Gemini
import gradio as gr
from agno.storage.sqlite import SqliteStorage
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.memory.v2.db.sqlite import SqliteMemoryDb
from agno.memory.v2.manager import MemoryManager
from agno.memory.v2.memory import Memory
from agno.agent import Agent
from agno.embedder.cohere import CohereEmbedder
from agno.knowledge.url import UrlKnowledge
from agno.models.anthropic import Claude
from agno.reranker.cohere import CohereReranker
from agno.vectordb.lancedb import LanceDb, SearchType
from agno.agent import Agent
from agno.embedder.cohere import CohereEmbedder
from agno.knowledge.url import UrlKnowledge
from agno.models.anthropic import Claude
from agno.reranker.cohere import CohereReranker
from agno.tools.reasoning import ReasoningTools
from agno.vectordb.lancedb import LanceDb, SearchType
from agno.knowledge.csv_url import CSVUrlKnowledgeBase
from agno.knowledge.pdf import PDFKnowledgeBase, PDFReader
global idd
idd="padel"+str(time.time())
def get_session():
return gradio.Request.session_hash
def get_session_hash_from_request():
# The session hash is available on the request object
session_hash = gradio.Request.session_hash
print(f"Inside bfunction: Session hash is {session_hash}")
return f"Your session hash is: {session_hash}"
cellardb = "cellardb"
bottledb = "bottledb"
def pil_image_to_base64(image_pil: Image.Image, format: str = "PNG") -> str:
"""
Converts a Pillow Image.Image object to a Base64 encoded string.
"""
buffered = io.BytesIO()
if format.upper() == "JPEG" and image_pil.mode == 'RGBA':
image_pil = image_pil.convert('RGB')
image_pil.save(buffered, format=format)
img_bytes = buffered.getvalue()
base64_encoded_bytes = base64.b64encode(img_bytes)
base64_string = base64_encoded_bytes.decode('utf-8')
return f"data:image/{format.lower()};base64,{base64_string}"
def reduce_image_size_by_half(image_pil: Image.Image) -> Image.Image:
"""
Reduces the width and height of a Pillow Image.Image object by half.
Args:
image_pil (Image.Image): The input Pillow Image object.
Returns:
Image.Image: A new Pillow Image object with half the original width and height.
"""
if not isinstance(image_pil, Image.Image):
raise TypeError("Input must be a Pillow Image.Image object.")
original_width, original_height = image_pil.size
new_width = original_width // 2 # Use integer division
new_height = original_height // 2
# Resize the image. Image.LANCZOS is a high-quality resampling filter.
# Other options include Image.NEAREST, Image.BILINEAR, Image.BICUBIC.
# LANCZOS (or ANTIALIAS in older Pillow versions) is generally best for downsampling.
resized_image = image_pil.resize((new_width, new_height), Image.LANCZOS)
return resized_image
def add_booking(details) -> str:
"""
add a add_booking
Args:
details: book court 1 at 9h wiht jean, john luce jeanne
Returns:
response: Values to be returned.
"""
#print("Session hashvv :", get_session_hash_from_request)
#global table_namedd
print("add_booking " + details)
agent = Agent(
#model=OpenAIChat(id="gpt-4.1-mini"),
model=MistralChat(id="magistral-small-2506",api_key=os.getenv("MISTRAL_API_KEY")),
# Fix the session id to continue the same session across execution cycles
session_id=idd,
storage=SqliteStorage(table_name="padel", db_file="tmp/padel.db"),
add_history_to_messages=True,
description="You are a helpful padel booking assistant that always responds in a polite, upbeat and positive manner. each time you add or remove a party, you keep track of the booking listing. a party needs 4 players, there are maximum 4 padel court, play is 1h30, can be booked between 9 AM to 9 PM.",
num_history_runs=10,
)
response = ""
for chunk in agent.run("answer consisely 'ok added' once recorded booking and do not ask for confirmation. input : "+ details, stream=True):
if chunk.content:
response += str(chunk.content)
return response
def remove_booking(details) -> str:
"""
remove booking
Args:
details: remove booking court 1 at 9h
Returns:
response: Values to be returned.
"""
print("remove_booking " + details)
agent = Agent(
#model=OpenAIChat(id="gpt-4.1-mini"),
model=MistralChat(id="magistral-small-2506",api_key=os.getenv("MISTRAL_API_KEY")),
# Fix the session id to continue the same session across execution cycles
session_id=idd,
storage=SqliteStorage(table_name="padel", db_file="tmp/padel.db"),
add_history_to_messages=True,
description="You are a helpful padel booking assistant that always responds in a polite, upbeat and positive manner. each time you add or remove a party, you keep track of the booking listing. a party needs 4 players, there are maximum 4 padel court, play is 1h30, can be booked between 9 AM to 9 PM.",
num_history_runs=10,
)
response = ""
for chunk in agent.run("answer only based on the bookings, remove only if the booking exist, answer consisely ok removed if possible and not removed if doesnt exist; "+details, stream=True):
if chunk.content:
response += str(chunk.content)
return response
def list_booking(details) -> str:
"""
list court booking
Args:
details: list court booking
Returns:
response: Values to be returned.
"""
print("list_booking " + details)
agent = Agent(
#model=OpenAIChat(id="gpt-4.1-mini"),
model=MistralChat(id="magistral-small-2506",api_key=os.getenv("MISTRAL_API_KEY")),
# Fix the session id to continue the same session across execution cycles
session_id=idd,
storage=SqliteStorage(table_name="padel", db_file="tmp/padel.db"),
add_history_to_messages=True,
description="You are a helpful padel booking assistant that always responds in a polite, upbeat and positive manner. each time you add or remove a party, you keep track of the booking listing. a party needs 4 players, there are maximum 4 padel court, play is 1h30, can be booked between 9 AM to 9 PM.",
num_history_runs=10,
)
response = ""
for chunk in agent.run("answer only based on the bookings, report concisely with the list of each booking. "+details, stream=True):
if chunk.content:
response += str(chunk.content)
return response
def booking_reset() -> str:
"""
reset the bookings
Args:
details: reset bookings
Returns:
response: Values to be returned.
"""
global idd
idd="padel"+str(time.time())
return "ok resetted the bookings"
def get_search_padel(details) -> str:
"""
search padel information
Args:
search : query
Returns:
response: Values to be returned.
"""
print("get_search_information " )
knowledge_base = PDFKnowledgeBase(
path="references",
reader=PDFReader(),
vector_db=LanceDb(
uri="tmp/lancedb",
table_name="agno_docs",
search_type=SearchType.hybrid,
embedder=CohereEmbedder(id="embed-v4.0",api_key=os.getenv("CO_API_KEY")),
reranker=CohereReranker(model="rerank-v3.5",api_key=os.getenv("CO_API_KEY")),
),
)
print("get_search_information 2" )
agent = Agent(
#model=Claude(id="claude-3-7-sonnet-latest"),
#model=MistralChat(id="mistral-small-latest",api_key=os.getenv("MISTRAL_API_KEY")),
model=OpenAIChat(id="gpt-4o-mini", api_key=os.getenv("OPENAI_API_KEY")),
# Agentic RAG is enabled by default when `knowledge` is provided to the Agent.
knowledge=knowledge_base,
tools=[ReasoningTools(add_instructions=True)],
# search_knowledge=True gives the Agent the ability to search on demand
# search_knowledge is True by default
search_knowledge=True,
instructions=[
"Include sources in your response.",
"Always search your knowledge before answering the question.",
"Only include the output in your response. No other text.",
],
markdown=True,
)
agent.knowledge.load(recreate=False)
response = ""
for chunk in agent.run("find information on "+details , show_full_reasoning=True, stream_intermediate_steps=True, stream=True):
if chunk.content:
response += str(chunk.content)
#yield response
return response