| | import getpass
|
| | import os
|
| |
|
| |
|
| | os.environ["GROQ_API_KEY"] = "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"
|
| |
|
| | from langchain_groq import ChatGroq
|
| | from langchain_openai import ChatOpenAI
|
| | from langchain_google_genai import ChatGoogleGenerativeAI
|
| | from langchain_anthropic import ChatAnthropic
|
| | from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
| | import gradio as gr
|
| |
|
| | system_message = "You are a helpful assistant"
|
| |
|
| | llm = ChatGroq(
|
| | model="llama-3.2-3b-preview",
|
| | temperature=0,
|
| | max_tokens=None,
|
| | timeout=None,
|
| | max_retries=3,
|
| | streaming=True
|
| | )
|
| |
|
| | def stream_response(message, history):
|
| | print(f"Input: {message}. History: {history}\n")
|
| |
|
| | history_langchain_format = []
|
| | history_langchain_format.append(SystemMessage(content=system_message))
|
| |
|
| | for human, ai in history:
|
| | history_langchain_format.append(HumanMessage(content=human))
|
| | history_langchain_format.append(AIMessage(content=ai))
|
| |
|
| | if message is not None:
|
| | history_langchain_format.append(HumanMessage(content=message))
|
| | partial_message = ""
|
| | for response in llm.stream(history_langchain_format):
|
| | partial_message += response.content
|
| | yield partial_message
|
| |
|
| |
|
| | demo_interface = gr.ChatInterface(
|
| |
|
| | stream_response,
|
| | textbox=gr.Textbox(placeholder="Send to the LLM...",
|
| | container=False,
|
| | autoscroll=True,
|
| | scale=7),
|
| | )
|
| |
|
| | demo_interface.launch(share=True, debug=True) |