tudeplom commited on
Commit
f6cc1d3
·
verified ·
1 Parent(s): 8dd10c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -41
app.py CHANGED
@@ -1,45 +1,17 @@
1
- from flask import Flask, request, jsonify
2
- import torch
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- from flask_cors import CORS
5
 
6
- app = Flask(__name__)
7
- CORS(app)
8
 
9
- # Load model TinyLlama tối ưu cho CPU
10
- MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
11
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
12
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float32, device_map="cpu")
 
13
 
14
- # Hàm xử lý chat không lưu lịch sử
15
- def get_answer(user_text):
16
- prompt = f"User: {user_text}\nAssistant:"
17
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
18
 
19
- with torch.no_grad(): # Giảm tải tính toán
20
- output = model.generate(input_ids, max_length=150, do_sample=True, temperature=0.7)
21
-
22
- response = tokenizer.decode(output[0], skip_special_tokens=True)
23
- return response.replace(prompt, "").strip()
24
-
25
- @app.route("/")
26
- def home():
27
- return "TinyLlama Chatbot API đang chạy!"
28
-
29
- @app.route("/chat", methods=["POST"])
30
- def chat():
31
- try:
32
- data = request.json
33
- user_message = data.get("message", "").strip()
34
-
35
- if not user_message:
36
- return jsonify({"error": "Tin nhắn không được để trống!"}), 400
37
-
38
- bot_reply = get_answer(user_message)
39
- return jsonify({"FROM": "AI Assistant", "MESSAGE": bot_reply})
40
-
41
- except Exception as e:
42
- return jsonify({"error": str(e)}), 500
43
-
44
- if __name__ == "__main__":
45
- app.run(host="0.0.0.0", port=7860, debug=True)
 
1
+ import gradio as gr
2
+ from transformers import pipeline
 
 
3
 
4
+ # Load chatbot pipeline từ Hugging Face
5
+ chatbot = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
6
 
7
+ # Hàm xử chat
8
+ def chat_with_ai(user_message):
9
+ prompt = f"User: {user_message}\nAssistant:"
10
+ response = chatbot(prompt, max_length=200, do_sample=True, temperature=0.7)
11
+ return response[0]["generated_text"].split("Assistant:")[-1].strip()
12
 
13
+ # Giao diện Gradio
14
+ iface = gr.Interface(fn=chat_with_ai, inputs="text", outputs="text", title="TinyLlama Chatbot")
 
 
15
 
16
+ # Chạy ứng dụng
17
+ iface.launch()