Update app.py
Browse files
app.py
CHANGED
|
@@ -2,34 +2,37 @@ import gradio as gr
|
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
from huggingface_hub import login
|
| 4 |
import torch
|
| 5 |
-
import os
|
| 6 |
|
| 7 |
-
# ุงูุญุตูู ุนูู ุงูุชููู ู
ู Secrets
|
| 8 |
hf_token = os.getenv("HF_TOKEN")
|
| 9 |
|
| 10 |
-
# ุชุณุฌูู ุงูุฏุฎูู ุฅูู Hugging Face
|
| 11 |
if hf_token:
|
| 12 |
login(token=hf_token)
|
| 13 |
else:
|
| 14 |
raise ValueError("Hugging Face token is missing. Please check your secrets.")
|
| 15 |
|
| 16 |
-
# ุงุณุชุฎุฏุงู
ูู
ูุฐุฌ TinyLlama ุงูุฃุฎู
|
| 17 |
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
| 18 |
|
| 19 |
-
# ุชุญุฏูุฏ ุงูุฌูุงุฒ ุงูู
ูุงุณุจ (GPU ุฃู CPU)
|
| 20 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 21 |
|
| 22 |
-
# ุชุญู
ูู ุงููู
ูุฐุฌ ูุงูู
ุญูู
|
| 23 |
model = AutoModelForCausalLM.from_pretrained(
|
| 24 |
model_name,
|
| 25 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 26 |
device_map="auto",
|
| 27 |
-
token=hf_token,
|
| 28 |
trust_remote_code=True
|
| 29 |
)
|
|
|
|
| 30 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 31 |
|
| 32 |
-
#
|
|
|
|
|
|
|
|
|
|
| 33 |
custom_responses = {
|
| 34 |
"ู
ุง ูู ูุฏููุ": "ูุฏูู ูู ุชูุฏูู
ุงูู
ุณุงุนุฏุฉ ูุงูุฅุฌุงุจุฉ ุนูู ุฃุณุฆูุชู ุจุฃูุถู ุทุฑููุฉ ู
ู
ููุฉ.",
|
| 35 |
"who created you?": "I was created by George.",
|
|
@@ -42,8 +45,11 @@ custom_responses = {
|
|
| 42 |
"ไฝ ็็ผ็จ่
ๆฏ่ฐ๏ผ": "ๆ็็ผ็จ่
ๆฏ Georgeใ",
|
| 43 |
}
|
| 44 |
|
| 45 |
-
# ุฏุงูุฉ ุงูุฏุฑุฏุดุฉ
|
| 46 |
def chatbot(user_input):
|
|
|
|
|
|
|
|
|
|
| 47 |
user_input = user_input.lower()
|
| 48 |
|
| 49 |
# ุงูุชุญูู ู
ู ุงูุฑุฏูุฏ ุงูู
ุฎุตุตุฉ ุฃููุงู
|
|
@@ -52,14 +58,15 @@ def chatbot(user_input):
|
|
| 52 |
return answer
|
| 53 |
|
| 54 |
# ู
ุนุงูุฌุฉ ุงูุฅุฏุฎุงู ุจุงุณุชุฎุฏุงู
ุงููู
ูุฐุฌ
|
| 55 |
-
inputs = tokenizer(user_input, return_tensors="pt").to(device)
|
| 56 |
|
| 57 |
-
with torch.no_grad(): # ุช
|
| 58 |
-
output = model.generate(**inputs, max_length=
|
| 59 |
|
| 60 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
|
|
|
| 61 |
return response
|
| 62 |
|
| 63 |
-
# ุชุดุบูู ุงููุงุฌูุฉ ู
ุน Gradio
|
| 64 |
-
iface = gr.Interface(fn=chatbot, inputs="text", outputs="text")
|
| 65 |
iface.launch(share=True)
|
|
|
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
from huggingface_hub import login
|
| 4 |
import torch
|
| 5 |
+
import os
|
| 6 |
|
| 7 |
+
# ๐น ุงูุญุตูู ุนูู ุงูุชููู ู
ู Secrets
|
| 8 |
hf_token = os.getenv("HF_TOKEN")
|
| 9 |
|
| 10 |
+
# ๐น ุชุณุฌูู ุงูุฏุฎูู ุฅูู Hugging Face
|
| 11 |
if hf_token:
|
| 12 |
login(token=hf_token)
|
| 13 |
else:
|
| 14 |
raise ValueError("Hugging Face token is missing. Please check your secrets.")
|
| 15 |
|
| 16 |
+
# ๐น ุงุณุชุฎุฏุงู
ูู
ูุฐุฌ TinyLlama ุงูุฃุฎู
|
| 17 |
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
| 18 |
|
| 19 |
+
# ๐น ุชุญุฏูุฏ ุงูุฌูุงุฒ ุงูู
ูุงุณุจ (GPU ุฃู CPU)
|
| 20 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 21 |
|
| 22 |
+
# ๐น ุชุญู
ูู ุงููู
ูุฐุฌ ูุงูู
ุญูู
|
| 23 |
model = AutoModelForCausalLM.from_pretrained(
|
| 24 |
model_name,
|
| 25 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 26 |
device_map="auto",
|
|
|
|
| 27 |
trust_remote_code=True
|
| 28 |
)
|
| 29 |
+
|
| 30 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 31 |
|
| 32 |
+
# ๐น ุญู ู
ุดููุฉ ุงูู padding
|
| 33 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 34 |
+
|
| 35 |
+
# ๐น ุฑุฏูุฏ ู
ุฎุตุตุฉ
|
| 36 |
custom_responses = {
|
| 37 |
"ู
ุง ูู ูุฏููุ": "ูุฏูู ูู ุชูุฏูู
ุงูู
ุณุงุนุฏุฉ ูุงูุฅุฌุงุจุฉ ุนูู ุฃุณุฆูุชู ุจุฃูุถู ุทุฑููุฉ ู
ู
ููุฉ.",
|
| 38 |
"who created you?": "I was created by George.",
|
|
|
|
| 45 |
"ไฝ ็็ผ็จ่
ๆฏ่ฐ๏ผ": "ๆ็็ผ็จ่
ๆฏ Georgeใ",
|
| 46 |
}
|
| 47 |
|
| 48 |
+
# ๐น ุฏุงูุฉ ุงูุฏุฑุฏุดุฉ
|
| 49 |
def chatbot(user_input):
|
| 50 |
+
if not user_input.strip(): # ุงูุชุฃูุฏ ู
ู ุฃู ุงูุฅุฏุฎุงู ููุณ ูุงุฑุบูุง
|
| 51 |
+
return "Please enter a message."
|
| 52 |
+
|
| 53 |
user_input = user_input.lower()
|
| 54 |
|
| 55 |
# ุงูุชุญูู ู
ู ุงูุฑุฏูุฏ ุงูู
ุฎุตุตุฉ ุฃููุงู
|
|
|
|
| 58 |
return answer
|
| 59 |
|
| 60 |
# ู
ุนุงูุฌุฉ ุงูุฅุฏุฎุงู ุจุงุณุชุฎุฏุงู
ุงููู
ูุฐุฌ
|
| 61 |
+
inputs = tokenizer(user_input, return_tensors="pt", padding=True, truncation=True).to(device)
|
| 62 |
|
| 63 |
+
with torch.no_grad(): # ุชุญุณูู ุงูุฃุฏุงุก
|
| 64 |
+
output = model.generate(**inputs, max_length=150, pad_token_id=tokenizer.eos_token_id)
|
| 65 |
|
| 66 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
| 67 |
+
|
| 68 |
return response
|
| 69 |
|
| 70 |
+
# ๐น ุชุดุบูู ุงููุงุฌูุฉ ู
ุน Gradio
|
| 71 |
+
iface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Octagon 2.0 Chatbot")
|
| 72 |
iface.launch(share=True)
|