Spaces:
Sleeping
Sleeping
| from sqlalchemy import create_engine | |
| from langchain_community.utilities import SQLDatabase | |
| from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit | |
| from langchain.chat_models import init_chat_model | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| db_path = "art.db" | |
| engine = create_engine(f'sqlite:///{db_path}') | |
| db = SQLDatabase(engine) | |
| #llm = init_chat_model("llama3-8b-8192", model_provider="groq") | |
| llm = init_chat_model("meta-llama/llama-4-scout-17b-16e-instruct", model_provider="groq") | |
| #llm = init_chat_model("gpt-4o-mini", model_provider="openai") | |
| toolkit = SQLDatabaseToolkit(db=db, llm=llm) | |
| from langchain import hub | |
| from langgraph.prebuilt import create_react_agent | |
| # Pull prompt (or define your own) | |
| prompt_template = hub.pull("langchain-ai/sql-agent-system-prompt") | |
| system_message = prompt_template.format(dialect="SQLite", top_k=5) | |
| # Create agent | |
| agent_executor = create_react_agent( | |
| llm, toolkit.get_tools(), prompt=system_message | |
| ) | |
| import gradio as gr | |
| def interact_with_agent(message, history): | |
| history_langchain_format = {"messages": [("system", "You are a helpful assistant that excel in data exploration.")]} | |
| for msg in history: | |
| if msg['role'] == "user": | |
| history_langchain_format["messages"].append(("user", msg['content'])) | |
| elif msg['role'] == "assistant": | |
| history_langchain_format["messages"].append(("assistant", msg['content'])) | |
| history_langchain_format["messages"].append(("user", message)) | |
| response = agent_executor.invoke(history_langchain_format, stream_mode="values") | |
| return response["messages"][-1].content | |
| demo = gr.ChatInterface( | |
| interact_with_agent, | |
| type="messages", | |
| examples=["What is the title of the oldest art in the database?",], | |
| #save_history=True | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |