Spaces:
Sleeping
Sleeping
Upload 5 files
Browse files- README.md +5 -8
- app.py +46 -60
- gitattributes +35 -0
- rag_utils.py +32 -0
- requirements.txt +6 -1
README.md
CHANGED
|
@@ -1,13 +1,10 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
-
short_description: 'checking different version of return policy '
|
| 11 |
---
|
| 12 |
-
|
| 13 |
-
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Cinco Return Chatbot
|
| 3 |
+
emoji: 🤖
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: "3.50.2"
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
| 10 |
---
|
|
|
|
|
|
app.py
CHANGED
|
@@ -1,64 +1,50 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
additional_inputs=[
|
| 49 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
| 50 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 51 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 52 |
-
gr.Slider(
|
| 53 |
-
minimum=0.1,
|
| 54 |
-
maximum=1.0,
|
| 55 |
-
value=0.95,
|
| 56 |
-
step=0.05,
|
| 57 |
-
label="Top-p (nucleus sampling)",
|
| 58 |
-
),
|
| 59 |
-
],
|
| 60 |
-
)
|
| 61 |
-
|
| 62 |
|
| 63 |
if __name__ == "__main__":
|
| 64 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
|
| 3 |
+
|
| 4 |
+
# Load a lightweight, CPU-friendly model
|
| 5 |
+
model_id = "google/flan-t5-base"
|
| 6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 7 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
|
| 8 |
+
|
| 9 |
+
# Pipeline setup
|
| 10 |
+
chatbot = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
|
| 11 |
+
|
| 12 |
+
# Function to format prompt for chat-like interaction
|
| 13 |
+
def format_prompt(user_input):
|
| 14 |
+
base_prompt = (
|
| 15 |
+
"You are Cinco, a helpful assistant that answers customer questions ONLY about product returns, refunds, and exchanges.\n"
|
| 16 |
+
"Respond concisely, clearly, and don't repeat the question. If the question is not about returns, politely say so.\n\n"
|
| 17 |
+
f"Customer: {user_input}\n"
|
| 18 |
+
f"Cinco Assistant:"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
# Chatbot logic
|
| 22 |
+
def chat_fn(user_input, history):
|
| 23 |
+
history = history or []
|
| 24 |
+
prompt = format_prompt(history, user_input)
|
| 25 |
+
response = chatbot(prompt, max_length=256, do_sample=False, clean_up_tokenization_spaces=True)[0]["generated_text"]
|
| 26 |
+
|
| 27 |
+
# Extract only the latest assistant response
|
| 28 |
+
if "Cinco Assistant:" in response:
|
| 29 |
+
assistant_reply = response.split("Cinco Assistant:")[-1].strip()
|
| 30 |
+
else:
|
| 31 |
+
assistant_reply = response.strip()
|
| 32 |
+
|
| 33 |
+
history.append((user_input, assistant_reply))
|
| 34 |
+
return "", history
|
| 35 |
+
|
| 36 |
+
# Build Gradio UI
|
| 37 |
+
with gr.Blocks(title="Cinco Returns Chatbot") as demo:
|
| 38 |
+
gr.Markdown("## 🧾 Cinco Returns Chatbot\nAsk anything about returns, refunds, or exchanges.")
|
| 39 |
+
chatbot_ui = gr.Chatbot(label="Cinco Assistant", show_label=True)
|
| 40 |
+
with gr.Row():
|
| 41 |
+
user_input = gr.Textbox(placeholder="Example: Can I return a used item without a receipt?", scale=6)
|
| 42 |
+
submit_btn = gr.Button("Send", scale=1)
|
| 43 |
+
|
| 44 |
+
state = gr.State([])
|
| 45 |
+
|
| 46 |
+
submit_btn.click(fn=chat_fn, inputs=[user_input, state], outputs=[user_input, chatbot_ui])
|
| 47 |
+
user_input.submit(fn=chat_fn, inputs=[user_input, state], outputs=[user_input, chatbot_ui])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
if __name__ == "__main__":
|
| 50 |
demo.launch()
|
gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
rag_utils.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from sentence_transformers import SentenceTransformer
|
| 3 |
+
import faiss
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
class RAGEngine:
|
| 7 |
+
def __init__(self, json_path):
|
| 8 |
+
self.embedder = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
|
| 9 |
+
with open(json_path, 'r') as f:
|
| 10 |
+
self.data = json.load(f)
|
| 11 |
+
self.texts = []
|
| 12 |
+
self.build_corpus()
|
| 13 |
+
self.build_index()
|
| 14 |
+
|
| 15 |
+
def build_corpus(self):
|
| 16 |
+
# Combine multiple fields for better context
|
| 17 |
+
self.texts = [
|
| 18 |
+
f"Product: {item['product_name']}\nCategory: {item['category']}\nPolicy: {item['return_policy']}\nReason: {item['return_reason']}"
|
| 19 |
+
for item in self.data
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
def build_index(self):
|
| 23 |
+
embeddings = self.embedder.encode(self.texts, convert_to_numpy=True)
|
| 24 |
+
dim = embeddings.shape[1]
|
| 25 |
+
self.index = faiss.IndexFlatL2(dim)
|
| 26 |
+
self.index.add(embeddings)
|
| 27 |
+
|
| 28 |
+
def retrieve(self, query, top_k=3):
|
| 29 |
+
query_emb = self.embedder.encode([query], convert_to_numpy=True)
|
| 30 |
+
distances, indices = self.index.search(query_emb, top_k)
|
| 31 |
+
results = [self.texts[idx] for idx in indices[0] if idx != -1]
|
| 32 |
+
return results
|
requirements.txt
CHANGED
|
@@ -1 +1,6 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers==4.40.0
|
| 2 |
+
gradio==4.44.1
|
| 3 |
+
torch
|
| 4 |
+
faiss-cpu
|
| 5 |
+
sentence-transformers
|
| 6 |
+
accelerate>=0.26.0
|