Spaces:
Sleeping
Sleeping
Юра Цепліцький
commited on
Commit
·
816e0e9
1
Parent(s):
03ed0d1
Switch to openai
Browse files- app.py +2 -1
- data/{2105.07464v6.pdf → paper.pdf} +0 -0
- main.py +3 -3
- uploaded_files/paper.pdf +0 -0
- utils/constant.py +0 -2
- utils/retriever.py +1 -1
- utils/settings.py +9 -3
app.py
CHANGED
|
@@ -11,7 +11,8 @@ model_config = ConfigDict(protected_namespaces=())
|
|
| 11 |
setting_keys = gr.Interface(
|
| 12 |
fn=set_keys,
|
| 13 |
inputs=[
|
| 14 |
-
gr.Textbox(label="Enter your
|
|
|
|
| 15 |
gr.Textbox(label="Enter your LLAMA_CLOUD_API_KEY"),
|
| 16 |
],
|
| 17 |
outputs=gr.Textbox(label="Status")
|
|
|
|
| 11 |
setting_keys = gr.Interface(
|
| 12 |
fn=set_keys,
|
| 13 |
inputs=[
|
| 14 |
+
#gr.Textbox(label="Enter your CO_API_KEY"),
|
| 15 |
+
gr.Textbox(label="Enter your OPENAI_API_KEY"),
|
| 16 |
gr.Textbox(label="Enter your LLAMA_CLOUD_API_KEY"),
|
| 17 |
],
|
| 18 |
outputs=gr.Textbox(label="Status")
|
data/{2105.07464v6.pdf → paper.pdf}
RENAMED
|
File without changes
|
main.py
CHANGED
|
@@ -2,12 +2,12 @@ from utils.retriever import get_query_engine
|
|
| 2 |
from utils.index import create_index
|
| 3 |
from utils.constant import INDEX_PATH, DATA_PATH
|
| 4 |
import os
|
| 5 |
-
from pathlib import Path
|
| 6 |
import shutil
|
| 7 |
|
| 8 |
-
def set_keys(
|
| 9 |
try:
|
| 10 |
-
os.environ["CO_API_KEY"] = co_api_key
|
|
|
|
| 11 |
os.environ["LLAMA_CLOUD_API_KEY"] = llama_cloud_api_key
|
| 12 |
return "Keys are set successfully"
|
| 13 |
|
|
|
|
| 2 |
from utils.index import create_index
|
| 3 |
from utils.constant import INDEX_PATH, DATA_PATH
|
| 4 |
import os
|
|
|
|
| 5 |
import shutil
|
| 6 |
|
| 7 |
+
def set_keys(openai_api_key: str, llama_cloud_api_key: str) -> str:
|
| 8 |
try:
|
| 9 |
+
#os.environ["CO_API_KEY"] = co_api_key
|
| 10 |
+
os.environ["OPENAI_API_KEY"] = openai_api_key
|
| 11 |
os.environ["LLAMA_CLOUD_API_KEY"] = llama_cloud_api_key
|
| 12 |
return "Keys are set successfully"
|
| 13 |
|
uploaded_files/paper.pdf
DELETED
|
Binary file (775 kB)
|
|
|
utils/constant.py
CHANGED
|
@@ -3,5 +3,3 @@ INDEX_PATH = "./index"
|
|
| 3 |
|
| 4 |
TOP_K_RETRIEVAL = 10
|
| 5 |
TOP_N_RERANKER = 3
|
| 6 |
-
|
| 7 |
-
EMBEDDING_MODEL = "sentence-transformers/all-mpnet-base-v2"
|
|
|
|
| 3 |
|
| 4 |
TOP_K_RETRIEVAL = 10
|
| 5 |
TOP_N_RERANKER = 3
|
|
|
|
|
|
utils/retriever.py
CHANGED
|
@@ -30,7 +30,7 @@ class QueryEngineManager:
|
|
| 30 |
configure_settings()
|
| 31 |
self.index = load_index(path=INDEX_PATH)
|
| 32 |
self.nodes = list(self.index.docstore.docs.values())
|
| 33 |
-
self.reranker = LLMRerank(top_n=
|
| 34 |
|
| 35 |
def get_engine(self, bm25: bool = False, semantic: bool = False):
|
| 36 |
if bm25:
|
|
|
|
| 30 |
configure_settings()
|
| 31 |
self.index = load_index(path=INDEX_PATH)
|
| 32 |
self.nodes = list(self.index.docstore.docs.values())
|
| 33 |
+
self.reranker = LLMRerank(top_n=TOP_N_RERANKER)
|
| 34 |
|
| 35 |
def get_engine(self, bm25: bool = False, semantic: bool = False):
|
| 36 |
if bm25:
|
utils/settings.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
from llama_index.core import Settings
|
| 2 |
-
from llama_index.llms.cohere import Cohere
|
|
|
|
| 3 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
| 4 |
from llama_index.core.node_parser import SemanticSplitterNodeParser
|
| 5 |
|
|
@@ -15,9 +16,14 @@ def load_llm():
|
|
| 15 |
- Do not include information from external sources not provided by the user.
|
| 16 |
'''
|
| 17 |
|
| 18 |
-
llm = Cohere(
|
| 19 |
-
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
)
|
| 22 |
|
| 23 |
return llm
|
|
|
|
| 1 |
from llama_index.core import Settings
|
| 2 |
+
#from llama_index.llms.cohere import Cohere
|
| 3 |
+
from llama_index.llms.openai import OpenAI
|
| 4 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
| 5 |
from llama_index.core.node_parser import SemanticSplitterNodeParser
|
| 6 |
|
|
|
|
| 16 |
- Do not include information from external sources not provided by the user.
|
| 17 |
'''
|
| 18 |
|
| 19 |
+
# llm = Cohere(
|
| 20 |
+
# system_prompt=system_prompt,
|
| 21 |
|
| 22 |
+
# )
|
| 23 |
+
|
| 24 |
+
llm = OpenAI(
|
| 25 |
+
model = "gpt-4o-mini",
|
| 26 |
+
system_prompt=system_prompt
|
| 27 |
)
|
| 28 |
|
| 29 |
return llm
|