Javier-Jimenez99 commited on
Commit
a50f13f
·
1 Parent(s): e78424a

Actualizar la conexión SSE a través de variables de entorno y ajustar el puerto del servidor Gradio

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -8,12 +8,16 @@ from langchain_openai import ChatOpenAI
8
  from langchain_ollama.chat_models import ChatOllama
9
  import gradio as gr
10
  import re
 
 
 
 
11
 
12
  async def initialize_agent():
13
  #connection = SSEConnection(url="https://javier-jimenez99-owlbear-mcp-server.hf.space/gradio_api/mcp/sse", transport="sse")
14
- connection = SSEConnection(url="http://localhost:7861/gradio_api/mcp/sse", transport="sse")
15
  tools = await load_mcp_tools(session=None,connection=connection)
16
- llm = ChatOllama(model="qwen3:8b")
17
  agent = create_react_agent(llm, tools)
18
  return agent
19
 
@@ -55,4 +59,4 @@ demo = gr.Interface(
55
  outputs="text",
56
  )
57
 
58
- demo.launch(server_port=7860)
 
8
  from langchain_ollama.chat_models import ChatOllama
9
  import gradio as gr
10
  import re
11
+ from dotenv import load_dotenv
12
+ import os
13
+
14
+ load_dotenv()
15
 
16
  async def initialize_agent():
17
  #connection = SSEConnection(url="https://javier-jimenez99-owlbear-mcp-server.hf.space/gradio_api/mcp/sse", transport="sse")
18
+ connection = SSEConnection(url=os.getenv("MCP_SERVER_URL"), transport="sse")
19
  tools = await load_mcp_tools(session=None,connection=connection)
20
+ llm = ChatOllama(model=os.getenv("OLLAMA_MODEL", "qwen3:8b"))
21
  agent = create_react_agent(llm, tools)
22
  return agent
23
 
 
59
  outputs="text",
60
  )
61
 
62
+ demo.launch(server_port=os.getenv("GRADIO_PORT", 7860))