|
|
import gradio as gr |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from llama_index.llms.huggingface import HuggingFaceLLM |
|
|
|
|
|
llm = HuggingFaceLLM( |
|
|
model_name="google/flan-t5-base", |
|
|
tokenizer_name="google/flan-t5-base", |
|
|
context_window=512, |
|
|
max_new_tokens=256, |
|
|
generate_kwargs={ |
|
|
"temperature": 0.3, |
|
|
"do_sample": False |
|
|
} |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from langchain_core.prompts import PromptTemplate |
|
|
from langchain.chains import LLMChain |
|
|
from langchain.llms.base import LLM |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LlamaIndexLLMAdapter(LLM): |
|
|
"""Adapter to use LlamaIndex LLM inside LangChain""" |
|
|
|
|
|
@property |
|
|
def _llm_type(self): |
|
|
return "llamaindex-huggingface" |
|
|
|
|
|
def _call(self, prompt: str, stop=None): |
|
|
response = llm.complete(prompt) |
|
|
return response.text |
|
|
|
|
|
|
|
|
langchain_llm = LlamaIndexLLMAdapter() |
|
|
|
|
|
|
|
|
prompt = PromptTemplate( |
|
|
input_variables=["question"], |
|
|
template=""" |
|
|
You are a helpful AI assistant. |
|
|
Answer clearly and concisely. |
|
|
|
|
|
Question: {question} |
|
|
Answer: |
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
chain = LLMChain( |
|
|
llm=langchain_llm, |
|
|
prompt=prompt |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat(user_input): |
|
|
if not user_input.strip(): |
|
|
return "Please enter a message." |
|
|
return chain.run(user_input) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=chat, |
|
|
inputs=gr.Textbox(lines=2, placeholder="Ask something..."), |
|
|
outputs="text", |
|
|
title="LangChain + LlamaIndex Chatbot", |
|
|
description="Integrated chatbot (No RAG, No Vector DB)" |
|
|
) |
|
|
|
|
|
demo.launch() |
|
|
|