Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Load Pretrained LLaMA 2 via Hugging Face
|
| 2 |
+
# --------------------------
|
| 3 |
+
llama_model_id = "meta-llama/Llama-2-7b-chat-hf" # Example: or your custom model ID
|
| 4 |
+
token = "<your_HuggingFace_access_token>" # Replace with your actual token
|
| 5 |
+
tokenizer = AutoTokenizer.from_pretrained(llama_model_id, token=token)
|
| 6 |
+
model = AutoModelForCausalLM.from_pretrained(llama_model_id, token=token)
|
| 7 |
+
|
| 8 |
+
# Text generation pipeline
|
| 9 |
+
text_pipe = pipeline(
|
| 10 |
+
"text-generation",
|
| 11 |
+
model=model,
|
| 12 |
+
tokenizer=tokenizer,
|
| 13 |
+
max_new_tokens=256,
|
| 14 |
+
temperature=0.7,
|
| 15 |
+
top_p=0.9
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
# LangChain LLM wrapper
|
| 19 |
+
llm = HuggingFacePipeline(pipeline=text_pipe)
|
| 20 |
+
|
| 21 |
+
# --------------------------
|
| 22 |
+
# Load or Create Chroma Vector DB
|
| 23 |
+
# --------------------------
|
| 24 |
+
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 25 |
+
persist_dir = "./chroma_db"
|
| 26 |
+
os.makedirs(persist_dir, exist_ok=True)
|
| 27 |
+
vector_db = Chroma(persist_directory=persist_dir, embedding_function=embedding_model)
|
| 28 |
+
retriever = vector_db.as_retriever()
|
| 29 |
+
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever, return_source_documents=True)
|
| 30 |
+
|
| 31 |
+
# --------------------------
|
| 32 |
+
# Whisper for Audio to Text
|
| 33 |
+
# --------------------------
|
| 34 |
+
whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-small")
|
| 35 |
+
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
|
| 36 |
+
|
| 37 |
+
def transcribe_audio(audio):
|
| 38 |
+
audio = whisper_processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt")
|
| 39 |
+
result = whisper_model.generate(**audio)
|
| 40 |
+
return whisper_processor.batch_decode(result, skip_special_tokens=True)[0]
|
| 41 |
+
|
| 42 |
+
# --------------------------
|
| 43 |
+
# Chatbot and PDF Query Functions
|
| 44 |
+
# --------------------------
|
| 45 |
+
def handle_user_input(text):
|
| 46 |
+
prompt = f"<s>[INST] {text} [/INST]"
|
| 47 |
+
response = text_pipe(prompt)[0]["generated_text"].split("[/INST]")[-1].strip()
|
| 48 |
+
return response
|
| 49 |
+
|
| 50 |
+
def handle_pdf_query(text):
|
| 51 |
+
result = qa_chain(text)
|
| 52 |
+
return result["result"]
|
| 53 |
+
|
| 54 |
+
# --------------------------
|
| 55 |
+
# Upload & Index New PDF
|
| 56 |
+
# --------------------------
|
| 57 |
+
def upload_and_index_pdf(pdf_file):
|
| 58 |
+
if pdf_file is None:
|
| 59 |
+
return "No file uploaded."
|
| 60 |
+
loader = PyPDFLoader(pdf_file.name)
|
| 61 |
+
pages = loader.load()
|
| 62 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
|
| 63 |
+
docs = text_splitter.split_documents(pages)
|
| 64 |
+
vector_db.add_documents(docs)
|
| 65 |
+
vector_db.persist()
|
| 66 |
+
return f"Uploaded and indexed: {os.path.basename(pdf_file.name)}"
|
| 67 |
+
|
| 68 |
+
# --------------------------
|
| 69 |
+
# Gradio UI
|
| 70 |
+
# --------------------------
|
| 71 |
+
with gr.Blocks(title="GenAI Customer Support") as demo:
|
| 72 |
+
gr.Markdown("## π€ LLaMA 2 Customer Support Chatbot")
|
| 73 |
+
|
| 74 |
+
with gr.Tab("π Ask with Voice"):
|
| 75 |
+
audio_input = gr.Audio(source="microphone", type="numpy")
|
| 76 |
+
audio_output = gr.Textbox(label="Transcribed Text")
|
| 77 |
+
audio_button = gr.Button("Transcribe")
|
| 78 |
+
audio_button.click(fn=transcribe_audio, inputs=audio_input, outputs=audio_output)
|
| 79 |
+
|
| 80 |
+
with gr.Tab("π¬ Chat with Bot"):
|
| 81 |
+
user_input = gr.Textbox(placeholder="Ask about booking, trains, etc.", label="Your Query")
|
| 82 |
+
bot_response = gr.Textbox(label="LLaMA 2 Response")
|
| 83 |
+
chat_button = gr.Button("Get Response")
|
| 84 |
+
chat_button.click(fn=handle_user_input, inputs=user_input, outputs=bot_response)
|
| 85 |
+
|
| 86 |
+
with gr.Tab("π Ask from PDF Knowledge Base"):
|
| 87 |
+
pdf_input = gr.Textbox(placeholder="Ask about services, policies, etc.", label="Ask PDF")
|
| 88 |
+
pdf_response = gr.Textbox(label="Knowledge Base Answer")
|
| 89 |
+
pdf_button = gr.Button("Get Answer")
|
| 90 |
+
pdf_button.click(fn=handle_pdf_query, inputs=pdf_input, outputs=pdf_response)
|
| 91 |
+
|
| 92 |
+
with gr.Tab("π Upload PDF to Index"):
|
| 93 |
+
pdf_file = gr.File(label="Upload PDF")
|
| 94 |
+
upload_result = gr.Textbox(label="Status")
|
| 95 |
+
upload_button = gr.Button("Upload & Index")
|
| 96 |
+
upload_button.click(fn=upload_and_index_pdf, inputs=pdf_file, outputs=upload_result)
|
| 97 |
+
|
| 98 |
+
# --------------------------
|
| 99 |
+
# Launch
|
| 100 |
+
# --------------------------
|
| 101 |
+
demo.launch()
|