Update app.py
Browse files
app.py
CHANGED
|
@@ -3,13 +3,14 @@ import torch
|
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
import os
|
| 5 |
import shutil
|
|
|
|
| 6 |
|
| 7 |
model_name = "minoD/JURAN"
|
| 8 |
|
| 9 |
# モデルのロード
|
| 10 |
model = AutoModelForCausalLM.from_pretrained(
|
| 11 |
model_name,
|
| 12 |
-
device_map="
|
| 13 |
torch_dtype=torch.float16
|
| 14 |
|
| 15 |
)
|
|
@@ -32,6 +33,7 @@ def generate_prompt(F):
|
|
| 32 |
|
| 33 |
# テキスト生成関数の定義
|
| 34 |
def generate2(F=None, maxTokens=256):
|
|
|
|
| 35 |
# 推論
|
| 36 |
prompt = generate_prompt(F)
|
| 37 |
input_ids = tokenizer(prompt,
|
|
@@ -81,4 +83,4 @@ iface = gr.Interface(
|
|
| 81 |
)
|
| 82 |
|
| 83 |
if __name__ == "__main__":
|
| 84 |
-
|
|
|
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
import os
|
| 5 |
import shutil
|
| 6 |
+
import spaces
|
| 7 |
|
| 8 |
model_name = "minoD/JURAN"
|
| 9 |
|
| 10 |
# モデルのロード
|
| 11 |
model = AutoModelForCausalLM.from_pretrained(
|
| 12 |
model_name,
|
| 13 |
+
device_map="cpu",
|
| 14 |
torch_dtype=torch.float16
|
| 15 |
|
| 16 |
)
|
|
|
|
| 33 |
|
| 34 |
# テキスト生成関数の定義
|
| 35 |
def generate2(F=None, maxTokens=256):
|
| 36 |
+
model.to("cuda")
|
| 37 |
# 推論
|
| 38 |
prompt = generate_prompt(F)
|
| 39 |
input_ids = tokenizer(prompt,
|
|
|
|
| 83 |
)
|
| 84 |
|
| 85 |
if __name__ == "__main__":
|
| 86 |
+
iface.launch()
|