import gradio as gr from langchain_huggingface.llms import HuggingFacePipeline from langchain_core.prompts import PromptTemplate # Load the HuggingFace model hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}, ) # Create a prompt template template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) # Combine prompt with HuggingFace model chain = prompt | hf # Define a function for Gradio interface def respond(question,history): return chain.invoke({"question": question}) # Create Gradio ChatInterface chat_interface = gr.ChatInterface(fn=respond, title="Q&A Chatbot", description="Ask any question and get an answer!") # Launch the interface chat_interface.launch()