Files changed (1) hide show
  1. app.py +33 -64
app.py CHANGED
@@ -1,70 +1,39 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
  ],
 
 
 
61
  )
62
 
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
-
68
-
69
  if __name__ == "__main__":
70
  demo.launch()
 
1
  import gradio as gr
2
+ from gpt4all import GPT4All
3
+
4
+ # Cargar el modelo local (requiere archivo .gguf en carpeta "models/")
5
+ MODEL_PATH = "models/mistral-7b-openorca.Q4_0.gguf"
6
+
7
+ def responder(pregunta, modo):
8
+ # Plantilla de modos
9
+ if modo == "Didáctico":
10
+ prompt = f"Explica de manera sencilla para un estudiante: {pregunta}"
11
+ elif modo == "Paso a paso":
12
+ prompt = f"Resuelve paso a paso: {pregunta}"
13
+ elif modo == "Examen":
14
+ prompt = f"Responde de forma breve, como si fuera un examen: {pregunta}"
15
+ else:
16
+ prompt = f"Responde con referencias de científicos famosos: {pregunta}"
17
+
18
+ try:
19
+ model = GPT4All(MODEL_PATH)
20
+ with model.chat_session() as session:
21
+ respuesta = session.generate(prompt, max_tokens=256)
22
+ return respuesta
23
+ except Exception as e:
24
+ return f"⚠️ Error: {str(e)}\nAsegúrate de poner el archivo .gguf en la carpeta 'models/'."
25
+
26
+ # Interfaz Gradio
27
+ demo = gr.Interface(
28
+ fn=responder,
29
+ inputs=[
30
+ gr.Textbox(label="Tu pregunta de Química o Física"),
31
+ gr.Radio(["Didáctico", "Paso a paso", "Examen", "Referencias"], label="Modo")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  ],
33
+ outputs=gr.Textbox(label="Respuesta IA"),
34
+ title="ExploraLab — Tutor IA de Química y Física",
35
+ description="Tu asistente académico gratuito para aprender ciencias."
36
  )
37
 
 
 
 
 
 
 
38
  if __name__ == "__main__":
39
  demo.launch()