Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,13 +1,20 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
|
| 4 |
-
#
|
|
|
|
|
|
|
|
|
|
| 5 |
model_id = "CohereForAI/c4ai-command-r-plus-08-2024"
|
| 6 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 7 |
-
model = AutoModelForCausalLM.from_pretrained(model_id)
|
| 8 |
|
| 9 |
def translate_code(english_code):
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
| 11 |
messages = [{"role": "user", "content": english_code}]
|
| 12 |
input_ids = tokenizer.apply_chat_template(
|
| 13 |
messages,
|
|
@@ -16,7 +23,7 @@ def translate_code(english_code):
|
|
| 16 |
return_tensors="pt"
|
| 17 |
)
|
| 18 |
|
| 19 |
-
#
|
| 20 |
gen_tokens = model.generate(
|
| 21 |
input_ids,
|
| 22 |
max_new_tokens=200,
|
|
@@ -24,19 +31,19 @@ def translate_code(english_code):
|
|
| 24 |
temperature=0.3,
|
| 25 |
)
|
| 26 |
|
| 27 |
-
# μμ±λ
|
| 28 |
gen_text = tokenizer.decode(gen_tokens[0], skip_special_tokens=True)
|
| 29 |
|
| 30 |
-
# λ²μλ ν
μ€νΈ λ°ν
|
| 31 |
return gen_text
|
| 32 |
|
| 33 |
# Gradio μΈν°νμ΄μ€ μ€μ
|
| 34 |
iface = gr.Interface(
|
| 35 |
fn=translate_code,
|
| 36 |
-
inputs=gr.inputs.Textbox(lines=10,
|
| 37 |
-
outputs=gr.outputs.Textbox(
|
| 38 |
title="μ½λ λ²μκΈ°",
|
| 39 |
-
description="μμ΄λ‘ μμ±λ μ½λλ₯Ό νκ΅μ΄λ‘ λ²μ
|
| 40 |
)
|
| 41 |
|
| 42 |
if __name__ == "__main__":
|
|
|
|
| 1 |
+
import os
|
| 2 |
import gradio as gr
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
|
| 5 |
+
# νκ²½ λ³μμμ Hugging Face ν ν°μ κ°μ Έμ΅λλ€.
|
| 6 |
+
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
| 7 |
+
|
| 8 |
+
# λͺ¨λΈκ³Ό ν ν¬λμ΄μ λ₯Ό λ‘λν©λλ€.
|
| 9 |
model_id = "CohereForAI/c4ai-command-r-plus-08-2024"
|
| 10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HUGGINGFACE_TOKEN)
|
| 11 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=HUGGINGFACE_TOKEN)
|
| 12 |
|
| 13 |
def translate_code(english_code):
|
| 14 |
+
"""
|
| 15 |
+
μμ΄ μ½λλ₯Ό νκ΅μ΄λ‘ λ²μνλ ν¨μμ
λλ€.
|
| 16 |
+
"""
|
| 17 |
+
# λ©μμ§ ν¬λ§·ν
|
| 18 |
messages = [{"role": "user", "content": english_code}]
|
| 19 |
input_ids = tokenizer.apply_chat_template(
|
| 20 |
messages,
|
|
|
|
| 23 |
return_tensors="pt"
|
| 24 |
)
|
| 25 |
|
| 26 |
+
# ν
μ€νΈ μμ±
|
| 27 |
gen_tokens = model.generate(
|
| 28 |
input_ids,
|
| 29 |
max_new_tokens=200,
|
|
|
|
| 31 |
temperature=0.3,
|
| 32 |
)
|
| 33 |
|
| 34 |
+
# μμ±λ ν
μ€νΈ λμ½λ©
|
| 35 |
gen_text = tokenizer.decode(gen_tokens[0], skip_special_tokens=True)
|
| 36 |
|
| 37 |
+
# λ²μλ νκ΅μ΄ ν
μ€νΈ λ°ν
|
| 38 |
return gen_text
|
| 39 |
|
| 40 |
# Gradio μΈν°νμ΄μ€ μ€μ
|
| 41 |
iface = gr.Interface(
|
| 42 |
fn=translate_code,
|
| 43 |
+
inputs=gr.inputs.Textbox(lines=10, placeholder="μμ΄ μ½λλ₯Ό μ
λ ₯νμΈμ..."),
|
| 44 |
+
outputs=gr.outputs.Textbox(),
|
| 45 |
title="μ½λ λ²μκΈ°",
|
| 46 |
+
description="μμ΄λ‘ μμ±λ μ½λλ₯Ό νκ΅μ΄λ‘ λ²μν΄λ립λλ€."
|
| 47 |
)
|
| 48 |
|
| 49 |
if __name__ == "__main__":
|