Spaces:
Sleeping
Sleeping
Fix OpenAI proxy issues for HuggingFace compatibility
Browse files- streamlit_app.py +15 -25
streamlit_app.py
CHANGED
|
@@ -15,6 +15,12 @@ from langchain_core.messages import AIMessage, HumanMessage
|
|
| 15 |
import requests
|
| 16 |
import json
|
| 17 |
from langchain_core.output_parsers import StrOutputParser
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
# Global variable to store ArXiv sources
|
| 20 |
ARXIV_SOURCES = []
|
|
@@ -95,39 +101,23 @@ def load_document_chunks():
|
|
| 95 |
@st.cache_resource
|
| 96 |
def get_chat_model():
|
| 97 |
"""Get the chat model for initial RAG."""
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
# DEBUG END
|
| 103 |
-
return ChatOpenAI(api_key=api_key, model="gpt-4.1-mini")
|
| 104 |
|
| 105 |
@st.cache_resource
|
| 106 |
def get_agent_model():
|
| 107 |
"""Get the more powerful model for agent and evaluation."""
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
# DEBUG END
|
| 113 |
-
return ChatOpenAI(api_key=api_key, model="gpt-4.1")
|
| 114 |
|
| 115 |
@st.cache_resource
|
| 116 |
def get_embedding_model():
|
| 117 |
"""Get the embedding model."""
|
| 118 |
-
|
| 119 |
-
from langchain_openai import OpenAIEmbeddings
|
| 120 |
-
|
| 121 |
-
# DEBUG START - HF Compatibility fix
|
| 122 |
-
api_key = os.environ.get("OPENAI_API_KEY", "")
|
| 123 |
-
print(f"DEBUG: Initializing embeddings with API key: {api_key[:4]}...")
|
| 124 |
-
# DEBUG END
|
| 125 |
-
|
| 126 |
-
# Most minimal parameters possible
|
| 127 |
-
return OpenAIEmbeddings(
|
| 128 |
-
model="text-embedding-3-small",
|
| 129 |
-
api_key=api_key
|
| 130 |
-
)
|
| 131 |
|
| 132 |
@st.cache_resource
|
| 133 |
def setup_qdrant_client():
|
|
|
|
| 15 |
import requests
|
| 16 |
import json
|
| 17 |
from langchain_core.output_parsers import StrOutputParser
|
| 18 |
+
from openai import OpenAI
|
| 19 |
+
|
| 20 |
+
# Disable proxies to prevent validation errors on Hugging Face
|
| 21 |
+
os.environ["OPENAI_PROXY"] = ""
|
| 22 |
+
os.environ["HTTPS_PROXY"] = ""
|
| 23 |
+
os.environ["HTTP_PROXY"] = ""
|
| 24 |
|
| 25 |
# Global variable to store ArXiv sources
|
| 26 |
ARXIV_SOURCES = []
|
|
|
|
| 101 |
@st.cache_resource
|
| 102 |
def get_chat_model():
|
| 103 |
"""Get the chat model for initial RAG."""
|
| 104 |
+
return ChatOpenAI(
|
| 105 |
+
model="gpt-4.1-mini", # Use model not model_name
|
| 106 |
+
temperature=0,
|
| 107 |
+
)
|
|
|
|
|
|
|
| 108 |
|
| 109 |
@st.cache_resource
|
| 110 |
def get_agent_model():
|
| 111 |
"""Get the more powerful model for agent and evaluation."""
|
| 112 |
+
return ChatOpenAI(
|
| 113 |
+
model="gpt-4.1", # Use model not model_name
|
| 114 |
+
temperature=0,
|
| 115 |
+
)
|
|
|
|
|
|
|
| 116 |
|
| 117 |
@st.cache_resource
|
| 118 |
def get_embedding_model():
|
| 119 |
"""Get the embedding model."""
|
| 120 |
+
return OpenAIEmbeddings(model="text-embedding-3-small")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
@st.cache_resource
|
| 123 |
def setup_qdrant_client():
|