Spaces:
Sleeping
Sleeping
File size: 1,847 Bytes
81a68bd c5ba41a 81a68bd c5ba41a 81a68bd 6280fdb c5ba41a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
from sqlalchemy import create_engine
from langchain_community.utilities import SQLDatabase
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
from langchain.chat_models import init_chat_model
from dotenv import load_dotenv
load_dotenv()
db_path = "art.db"
engine = create_engine(f'sqlite:///{db_path}')
db = SQLDatabase(engine)
#llm = init_chat_model("llama3-8b-8192", model_provider="groq")
llm = init_chat_model("meta-llama/llama-4-scout-17b-16e-instruct", model_provider="groq")
#llm = init_chat_model("gpt-4o-mini", model_provider="openai")
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
from langchain import hub
from langgraph.prebuilt import create_react_agent
# Pull prompt (or define your own)
prompt_template = hub.pull("langchain-ai/sql-agent-system-prompt")
system_message = prompt_template.format(dialect="SQLite", top_k=5)
# Create agent
agent_executor = create_react_agent(
llm, toolkit.get_tools(), prompt=system_message
)
import gradio as gr
def interact_with_agent(message, history):
history_langchain_format = {"messages": [("system", "You are a helpful assistant that excel in data exploration.")]}
for msg in history:
if msg['role'] == "user":
history_langchain_format["messages"].append(("user", msg['content']))
elif msg['role'] == "assistant":
history_langchain_format["messages"].append(("assistant", msg['content']))
history_langchain_format["messages"].append(("user", message))
response = agent_executor.invoke(history_langchain_format, stream_mode="values")
return response["messages"][-1].content
demo = gr.ChatInterface(
interact_with_agent,
type="messages",
examples=["What is the title of the oldest art in the database?",],
#save_history=True
)
if __name__ == "__main__":
demo.launch()
|