Andro0s commited on
Commit
b8cb739
verified
1 Parent(s): a8a8c34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -17
app.py CHANGED
@@ -1,18 +1,11 @@
1
- # ===============================
2
- # AmorCoder AI - Space de Producci贸n
3
- # ===============================
4
  import gradio as gr
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
  from peft import PeftModel
7
  import torch
8
 
9
- # -------------------------------
10
- # 1锔忊儯 Cargar modelo base y LoRA
11
- # -------------------------------
12
  MODEL_NAME = "codellama/CodeLlama-7b-hf"
13
- LORA_PATH = "./lora_codellama" # carpeta con tus pesos LoRA
14
 
15
- print("Cargando modelo...")
16
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
17
  model = AutoModelForCausalLM.from_pretrained(
18
  MODEL_NAME,
@@ -20,10 +13,8 @@ model = AutoModelForCausalLM.from_pretrained(
20
  torch_dtype=torch.float16
21
  )
22
 
23
- print("Aplicando pesos LoRA...")
24
  model = PeftModel.from_pretrained(model, LORA_PATH)
25
 
26
- # Crear pipeline de generaci贸n
27
  codegen = pipeline(
28
  "text-generation",
29
  model=model,
@@ -34,20 +25,13 @@ codegen = pipeline(
34
  top_p=0.95
35
  )
36
 
37
- # -------------------------------
38
- # 2锔忊儯 Funci贸n de generaci贸n de c贸digo
39
- # -------------------------------
40
  def generar_codigo(instruccion):
41
  prompt = f"# Instrucci贸n:\n{instruccion}\n\n# C贸digo:\n"
42
  salida = codegen(prompt)[0]["generated_text"]
43
- # Extraer solo el c贸digo generado despu茅s de la instrucci贸n
44
  if "# C贸digo:" in salida:
45
  return salida.split("# C贸digo:")[1].strip()
46
  return salida.strip()
47
 
48
- # -------------------------------
49
- # 3锔忊儯 Interfaz Gradio
50
- # -------------------------------
51
  iface = gr.Interface(
52
  fn=generar_codigo,
53
  inputs=gr.Textbox(lines=4, placeholder="Escribe tu instrucci贸n aqu铆..."),
 
 
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  from peft import PeftModel
4
  import torch
5
 
 
 
 
6
  MODEL_NAME = "codellama/CodeLlama-7b-hf"
7
+ LORA_PATH = "lora_codellama"
8
 
 
9
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
  model = AutoModelForCausalLM.from_pretrained(
11
  MODEL_NAME,
 
13
  torch_dtype=torch.float16
14
  )
15
 
 
16
  model = PeftModel.from_pretrained(model, LORA_PATH)
17
 
 
18
  codegen = pipeline(
19
  "text-generation",
20
  model=model,
 
25
  top_p=0.95
26
  )
27
 
 
 
 
28
  def generar_codigo(instruccion):
29
  prompt = f"# Instrucci贸n:\n{instruccion}\n\n# C贸digo:\n"
30
  salida = codegen(prompt)[0]["generated_text"]
 
31
  if "# C贸digo:" in salida:
32
  return salida.split("# C贸digo:")[1].strip()
33
  return salida.strip()
34
 
 
 
 
35
  iface = gr.Interface(
36
  fn=generar_codigo,
37
  inputs=gr.Textbox(lines=4, placeholder="Escribe tu instrucci贸n aqu铆..."),