mali08890 commited on
Commit
165a79a
·
verified ·
1 Parent(s): b9abea7

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +5 -8
  2. app.py +46 -60
  3. gitattributes +35 -0
  4. rag_utils.py +32 -0
  5. requirements.txt +6 -1
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
- title: Testingreturnpolicyspace
3
- emoji: 💬
4
- colorFrom: yellow
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
- short_description: 'checking different version of return policy '
11
  ---
12
-
13
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
1
  ---
2
+ title: Cinco Return Chatbot
3
+ emoji: 🤖
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: "3.50.2"
8
  app_file: app.py
9
  pinned: false
 
10
  ---
 
 
app.py CHANGED
@@ -1,64 +1,50 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
3
+
4
+ # Load a lightweight, CPU-friendly model
5
+ model_id = "google/flan-t5-base"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
8
+
9
+ # Pipeline setup
10
+ chatbot = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
11
+
12
+ # Function to format prompt for chat-like interaction
13
+ def format_prompt(user_input):
14
+ base_prompt = (
15
+ "You are Cinco, a helpful assistant that answers customer questions ONLY about product returns, refunds, and exchanges.\n"
16
+ "Respond concisely, clearly, and don't repeat the question. If the question is not about returns, politely say so.\n\n"
17
+ f"Customer: {user_input}\n"
18
+ f"Cinco Assistant:"
19
+ )
20
+
21
+ # Chatbot logic
22
+ def chat_fn(user_input, history):
23
+ history = history or []
24
+ prompt = format_prompt(history, user_input)
25
+ response = chatbot(prompt, max_length=256, do_sample=False, clean_up_tokenization_spaces=True)[0]["generated_text"]
26
+
27
+ # Extract only the latest assistant response
28
+ if "Cinco Assistant:" in response:
29
+ assistant_reply = response.split("Cinco Assistant:")[-1].strip()
30
+ else:
31
+ assistant_reply = response.strip()
32
+
33
+ history.append((user_input, assistant_reply))
34
+ return "", history
35
+
36
+ # Build Gradio UI
37
+ with gr.Blocks(title="Cinco Returns Chatbot") as demo:
38
+ gr.Markdown("## 🧾 Cinco Returns Chatbot\nAsk anything about returns, refunds, or exchanges.")
39
+ chatbot_ui = gr.Chatbot(label="Cinco Assistant", show_label=True)
40
+ with gr.Row():
41
+ user_input = gr.Textbox(placeholder="Example: Can I return a used item without a receipt?", scale=6)
42
+ submit_btn = gr.Button("Send", scale=1)
43
+
44
+ state = gr.State([])
45
+
46
+ submit_btn.click(fn=chat_fn, inputs=[user_input, state], outputs=[user_input, chatbot_ui])
47
+ user_input.submit(fn=chat_fn, inputs=[user_input, state], outputs=[user_input, chatbot_ui])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  if __name__ == "__main__":
50
  demo.launch()
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
rag_utils.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from sentence_transformers import SentenceTransformer
3
+ import faiss
4
+ import numpy as np
5
+
6
+ class RAGEngine:
7
+ def __init__(self, json_path):
8
+ self.embedder = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
9
+ with open(json_path, 'r') as f:
10
+ self.data = json.load(f)
11
+ self.texts = []
12
+ self.build_corpus()
13
+ self.build_index()
14
+
15
+ def build_corpus(self):
16
+ # Combine multiple fields for better context
17
+ self.texts = [
18
+ f"Product: {item['product_name']}\nCategory: {item['category']}\nPolicy: {item['return_policy']}\nReason: {item['return_reason']}"
19
+ for item in self.data
20
+ ]
21
+
22
+ def build_index(self):
23
+ embeddings = self.embedder.encode(self.texts, convert_to_numpy=True)
24
+ dim = embeddings.shape[1]
25
+ self.index = faiss.IndexFlatL2(dim)
26
+ self.index.add(embeddings)
27
+
28
+ def retrieve(self, query, top_k=3):
29
+ query_emb = self.embedder.encode([query], convert_to_numpy=True)
30
+ distances, indices = self.index.search(query_emb, top_k)
31
+ results = [self.texts[idx] for idx in indices[0] if idx != -1]
32
+ return results
requirements.txt CHANGED
@@ -1 +1,6 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
1
+ transformers==4.40.0
2
+ gradio==4.44.1
3
+ torch
4
+ faiss-cpu
5
+ sentence-transformers
6
+ accelerate>=0.26.0