fernand124 commited on
Commit
16f35b6
verified
1 Parent(s): f04d836

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -50
app.py CHANGED
@@ -1,64 +1,81 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
  gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
  ),
 
 
 
 
 
 
 
59
  ],
 
 
 
 
 
 
 
 
 
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
+ # Nombre correcto del modelo en Hugging Face
6
+ model_name = "deepseek-ai/deepseek-coder-1.3b-instruct"
 
 
7
 
8
+ print("Cargando modelo... Esto puede tomar unos minutos la primera vez.")
9
 
10
+ # Cargar el tokenizer
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
 
 
 
 
 
 
 
12
 
13
+ # Cargar el modelo con optimizaciones para recursos limitados
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ model_name,
16
+ torch_dtype=torch.float16, # Usar float16 para ahorrar memoria
17
+ device_map="auto",
18
+ trust_remote_code=True,
19
+ low_cpu_mem_usage=True
20
+ )
 
 
 
 
 
 
 
 
 
 
21
 
22
+ print("隆Modelo cargado exitosamente!")
 
23
 
24
+ def generate_response(prompt, max_length=200, temperature=0.7):
25
+ # Formatear el prompt para DeepSeek Coder
26
+ formatted_prompt = f"### Instruction:\n{prompt}\n\n### Response:\n"
27
+
28
+ inputs = tokenizer.encode(formatted_prompt, return_tensors="pt")
29
+
30
+ with torch.no_grad():
31
+ outputs = model.generate(
32
+ inputs,
33
+ max_length=len(inputs[0]) + max_length,
34
+ temperature=temperature,
35
+ do_sample=True,
36
+ pad_token_id=tokenizer.eos_token_id,
37
+ eos_token_id=tokenizer.eos_token_id,
38
+ repetition_penalty=1.1
39
+ )
40
+
41
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
+ # Extraer solo la respuesta generada
43
+ response = response.split("### Response:\n")[-1].strip()
44
+ return response
45
 
46
+ # Interfaz Gradio
47
+ interface = gr.Interface(
48
+ fn=generate_response,
49
+ inputs=[
50
+ gr.Textbox(
51
+ label="Prompt",
52
+ placeholder="Escribe tu pregunta de programaci贸n o c贸digo...",
53
+ lines=3
54
+ ),
55
  gr.Slider(
56
+ minimum=50,
57
+ maximum=500,
58
+ value=200,
59
+ label="Longitud m谩xima de respuesta"
 
60
  ),
61
+ gr.Slider(
62
+ minimum=0.1,
63
+ maximum=2.0,
64
+ value=0.7,
65
+ step=0.1,
66
+ label="Temperatura (creatividad)"
67
+ )
68
  ],
69
+ outputs=gr.Textbox(label="Respuesta del DeepSeek Coder", lines=10),
70
+ title="馃殌 DeepSeek Coder 1.3B",
71
+ description="Modelo de programaci贸n DeepSeek ejecut谩ndose en Hugging Face Spaces. Perfecto para ayuda con c贸digo, explicaciones y debugging.",
72
+ examples=[
73
+ ["Escribe una funci贸n en Python para calcular fibonacci"],
74
+ ["驴C贸mo puedo hacer una API REST con FastAPI?"],
75
+ ["Explica qu茅 hace este c贸digo: for i in range(10): print(i**2)"],
76
+ ["Crea una funci贸n JavaScript para validar emails"]
77
+ ]
78
  )
79
 
 
80
  if __name__ == "__main__":
81
+ interface.launch()