Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,30 +1,47 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 3 |
import os
|
| 4 |
-
import subprocess
|
| 5 |
import torch
|
| 6 |
from huggingface_hub import login
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
-
# ---------- STEP 1: Fine-tuned ๋ชจ๋ธ
|
| 13 |
-
|
| 14 |
|
| 15 |
|
| 16 |
-
# ---------- STEP 2:
|
| 17 |
-
|
| 18 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
model = AutoModelForCausalLM.from_pretrained(
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
)
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
| 24 |
|
|
|
|
| 25 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
|
| 26 |
|
| 27 |
# ---------- STEP 3: Gradio ํจ์ ์ ์ ----------
|
|
|
|
| 28 |
def generate_response(prompt, max_length=256, temperature=0.7):
|
| 29 |
# max_length๋ฅผ ์ ํํ์ฌ ์๋๋ฅผ ๋น ๋ฅด๊ฒ ํจ
|
| 30 |
outputs = pipe(
|
|
@@ -39,6 +56,7 @@ def generate_response(prompt, max_length=256, temperature=0.7):
|
|
| 39 |
return outputs[0]["generated_text"]
|
| 40 |
|
| 41 |
# ---------- STEP 4: Gradio UI ----------
|
|
|
|
| 42 |
with gr.Blocks() as demo:
|
| 43 |
gr.Markdown("# ๐ Fine-tuned Mistral-7B (CPU Optimized)")
|
| 44 |
|
|
@@ -59,4 +77,4 @@ with gr.Blocks() as demo:
|
|
| 59 |
)
|
| 60 |
|
| 61 |
# ---------- STEP 5: Launch ----------
|
| 62 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
|
| 3 |
import os
|
| 4 |
+
# import subprocess # ํ์ฌ ์ฝ๋์์ ์ฌ์ฉ๋์ง ์์ผ๋ฏ๋ก ์ ๊ฑฐ ๊ฐ๋ฅ
|
| 5 |
import torch
|
| 6 |
from huggingface_hub import login
|
| 7 |
|
| 8 |
+
# ํ๊ฒฝ ๋ณ์์์ ํ ํฐ ๊ฐ์ ธ์ค๊ธฐ
|
| 9 |
+
token = os.environ.get("HF_TOKEN") # ์ผ๋ฐ์ ์ผ๋ก "HF_TOKEN"์ผ๋ก ์ค์ ๋ฉ๋๋ค.
|
| 10 |
+
if token:
|
| 11 |
+
login(token)
|
| 12 |
+
else:
|
| 13 |
+
print("HF_TOKEN ํ๊ฒฝ ๋ณ์๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค. ๋ชจ๋ธ ๋ค์ด๋ก๋์ ๋ฌธ์ ๊ฐ ์์ ์ ์์ต๋๋ค.")
|
| 14 |
|
| 15 |
|
| 16 |
+
# ---------- STEP 1: Fine-tuned ๋ชจ๋ธ ์ ๋ณด ----------
|
| 17 |
+
repo_id = "DMID23/MachineToolAgent" # ๋ชจ๋ธ ์ ์ฅ์ ID
|
| 18 |
|
| 19 |
|
| 20 |
+
# ---------- STEP 2: ์์ํ ์ค์ ๋ฐ ๋ชจ๋ธ ๋ก๋ ----------
|
| 21 |
+
|
| 22 |
+
# 8bit ์์ํ ์ค์ (CPU ํ๊ฒฝ์์๋ ์ฌ์ฉ ๊ฐ๋ฅ)
|
| 23 |
+
# load_in_8bit=True ์ต์
๋ง์ผ๋ก๋ BitsAndBytesConfig ๊ฐ์ฒด๋ฅผ ์๋์ผ๋ก ์์ฑํ์ฌ ์ ์ฉํฉ๋๋ค.
|
| 24 |
+
# CPU์์๋ float32 -> int8 ์์ํ๊ฐ ์ฃผ๋ก ์ผ์ด๋ฉ๋๋ค.
|
| 25 |
+
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
| 26 |
+
|
| 27 |
model = AutoModelForCausalLM.from_pretrained(
|
| 28 |
+
repo_id,
|
| 29 |
+
quantization_config=quantization_config, # ์์ํ ์ค์ ์ ์ฉ
|
| 30 |
+
torch_dtype=torch.float32, # 8๋นํธ ๋ก๋ ์์๋ ๋ด๋ถ์ ์ผ๋ก float32๋ก ์ฒ๋ฆฌ๋๊ฑฐ๋ ํผํฉ ์ ๋ฐ๋๋ก ์๋ํ ์ ์์ต๋๋ค.
|
| 31 |
+
# ํ์ง๋ง ์ค์ ๋ฉ๋ชจ๋ฆฌ๋ 8๋นํธ๋งํผ๋ง ์ฌ์ฉ๋ฉ๋๋ค.
|
| 32 |
+
device_map="auto" # ๋ชจ๋ธ์ ๊ฐ ๋ ์ด์ด๋ฅผ ์๋์ผ๋ก ์ต์ ์ ์ฅ์น(CPU/GPU)์ ๋ถ๋ฐฐ
|
| 33 |
+
# CPU๋ง ์๋ค๋ฉด CPU๋ก ๋ก๋๋ฉ๋๋ค.
|
| 34 |
)
|
| 35 |
+
print("Model loaded successfully.")
|
| 36 |
+
|
| 37 |
+
# ๋ง์ฝ DMID23/MachineToolAgent ์ ์ฅ์์ ํ ํฌ๋์ด์ ๊ฐ ์๋ค๋ฉด repo_id๋ก ๋ฐ๊พธ์ธ์.
|
| 38 |
+
tokenizer = AutoTokenizer.from_pretrained(repo_id)
|
| 39 |
|
| 40 |
+
# pipe ์ค์ ์, device=-1 (CPU) ๋ช
์
|
| 41 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
|
| 42 |
|
| 43 |
# ---------- STEP 3: Gradio ํจ์ ์ ์ ----------
|
| 44 |
+
# (์ด ๋ถ๋ถ์ ๋ณ๊ฒฝ ์์)
|
| 45 |
def generate_response(prompt, max_length=256, temperature=0.7):
|
| 46 |
# max_length๋ฅผ ์ ํํ์ฌ ์๋๋ฅผ ๋น ๋ฅด๊ฒ ํจ
|
| 47 |
outputs = pipe(
|
|
|
|
| 56 |
return outputs[0]["generated_text"]
|
| 57 |
|
| 58 |
# ---------- STEP 4: Gradio UI ----------
|
| 59 |
+
# (์ด ๋ถ๋ถ์ ๋ณ๊ฒฝ ์์)
|
| 60 |
with gr.Blocks() as demo:
|
| 61 |
gr.Markdown("# ๐ Fine-tuned Mistral-7B (CPU Optimized)")
|
| 62 |
|
|
|
|
| 77 |
)
|
| 78 |
|
| 79 |
# ---------- STEP 5: Launch ----------
|
| 80 |
+
demo.launch()
|