Kims12 commited on
Commit
4eb05a1
Β·
verified Β·
1 Parent(s): 8110c29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -10
app.py CHANGED
@@ -1,13 +1,20 @@
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # λͺ¨λΈ λ‘œλ“œ
 
 
 
5
  model_id = "CohereForAI/c4ai-command-r-plus-08-2024"
6
- tokenizer = AutoTokenizer.from_pretrained(model_id)
7
- model = AutoModelForCausalLM.from_pretrained(model_id)
8
 
9
  def translate_code(english_code):
10
- # μ‚¬μš©μž λ©”μ‹œμ§€ ν¬λ§·νŒ…
 
 
 
11
  messages = [{"role": "user", "content": english_code}]
12
  input_ids = tokenizer.apply_chat_template(
13
  messages,
@@ -16,7 +23,7 @@ def translate_code(english_code):
16
  return_tensors="pt"
17
  )
18
 
19
- # λͺ¨λΈμ„ μ‚¬μš©ν•˜μ—¬ λ²ˆμ—­ 생성
20
  gen_tokens = model.generate(
21
  input_ids,
22
  max_new_tokens=200,
@@ -24,19 +31,19 @@ def translate_code(english_code):
24
  temperature=0.3,
25
  )
26
 
27
- # μƒμ„±λœ 토큰을 ν…μŠ€νŠΈλ‘œ λ””μ½”λ”©
28
  gen_text = tokenizer.decode(gen_tokens[0], skip_special_tokens=True)
29
 
30
- # λ²ˆμ—­λœ ν…μŠ€νŠΈ λ°˜ν™˜
31
  return gen_text
32
 
33
  # Gradio μΈν„°νŽ˜μ΄μŠ€ μ„€μ •
34
  iface = gr.Interface(
35
  fn=translate_code,
36
- inputs=gr.inputs.Textbox(lines=10, label="μ˜μ–΄ μ½”λ“œ μž…λ ₯"),
37
- outputs=gr.outputs.Textbox(label="ν•œκ΅­μ–΄ λ²ˆμ—­"),
38
  title="μ½”λ“œ λ²ˆμ—­κΈ°",
39
- description="μ˜μ–΄λ‘œ μž‘μ„±λœ μ½”λ“œλ₯Ό ν•œκ΅­μ–΄λ‘œ λ²ˆμ—­ν•©λ‹ˆλ‹€."
40
  )
41
 
42
  if __name__ == "__main__":
 
1
+ import os
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
+ # ν™˜κ²½ λ³€μˆ˜μ—μ„œ Hugging Face 토큰을 κ°€μ Έμ˜΅λ‹ˆλ‹€.
6
+ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
7
+
8
+ # λͺ¨λΈκ³Ό ν† ν¬λ‚˜μ΄μ €λ₯Ό λ‘œλ“œν•©λ‹ˆλ‹€.
9
  model_id = "CohereForAI/c4ai-command-r-plus-08-2024"
10
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HUGGINGFACE_TOKEN)
11
+ model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=HUGGINGFACE_TOKEN)
12
 
13
  def translate_code(english_code):
14
+ """
15
+ μ˜μ–΄ μ½”λ“œλ₯Ό ν•œκ΅­μ–΄λ‘œ λ²ˆμ—­ν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
16
+ """
17
+ # λ©”μ‹œμ§€ ν¬λ§·νŒ…
18
  messages = [{"role": "user", "content": english_code}]
19
  input_ids = tokenizer.apply_chat_template(
20
  messages,
 
23
  return_tensors="pt"
24
  )
25
 
26
+ # ν…μŠ€νŠΈ 생성
27
  gen_tokens = model.generate(
28
  input_ids,
29
  max_new_tokens=200,
 
31
  temperature=0.3,
32
  )
33
 
34
+ # μƒμ„±λœ ν…μŠ€νŠΈ λ””μ½”λ”©
35
  gen_text = tokenizer.decode(gen_tokens[0], skip_special_tokens=True)
36
 
37
+ # λ²ˆμ—­λœ ν•œκ΅­μ–΄ ν…μŠ€νŠΈ λ°˜ν™˜
38
  return gen_text
39
 
40
  # Gradio μΈν„°νŽ˜μ΄μŠ€ μ„€μ •
41
  iface = gr.Interface(
42
  fn=translate_code,
43
+ inputs=gr.inputs.Textbox(lines=10, placeholder="μ˜μ–΄ μ½”λ“œλ₯Ό μž…λ ₯ν•˜μ„Έμš”..."),
44
+ outputs=gr.outputs.Textbox(),
45
  title="μ½”λ“œ λ²ˆμ—­κΈ°",
46
+ description="μ˜μ–΄λ‘œ μž‘μ„±λœ μ½”λ“œλ₯Ό ν•œκ΅­μ–΄λ‘œ λ²ˆμ—­ν•΄λ“œλ¦½λ‹ˆλ‹€."
47
  )
48
 
49
  if __name__ == "__main__":