File size: 822 Bytes
9d7f081 4d85b2d 9d7f081 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import gradio as gr
from langchain_huggingface.llms import HuggingFacePipeline
from langchain_core.prompts import PromptTemplate
# Load the HuggingFace model
hf = HuggingFacePipeline.from_model_id(
model_id="gpt2",
task="text-generation",
pipeline_kwargs={"max_new_tokens": 10},
)
# Create a prompt template
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
# Combine prompt with HuggingFace model
chain = prompt | hf
# Define a function for Gradio interface
def respond(question,history):
return chain.invoke({"question": question})
# Create Gradio ChatInterface
chat_interface = gr.ChatInterface(fn=respond, title="Q&A Chatbot", description="Ask any question and get an answer!")
# Launch the interface
chat_interface.launch()
|