Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,69 +5,33 @@ import streamlit as st
|
|
| 5 |
from huggingface_hub import HfApi, login
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
|
| 8 |
-
from download_repo import download_gitlab_repo_to_hfspace
|
| 9 |
-
from process_repo import extract_repo_files
|
| 10 |
-
from chunking import chunk_pythoncode_and_add_metadata, chunk_text_and_add_metadata
|
| 11 |
-
from vectorstore import setup_vectorstore
|
| 12 |
from llm import get_groq_llm
|
| 13 |
from vectorstore import get_chroma_vectorstore
|
| 14 |
from embeddings import get_SFR_Code_embedding_model
|
| 15 |
from kadi_apy_bot import KadiAPYBot
|
| 16 |
-
from repo_versions import store_message_from_json
|
| 17 |
|
| 18 |
# Load environment variables from .env file
|
| 19 |
load_dotenv()
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
with open("config.json", "r") as file:
|
| 24 |
-
config = json.load(file)
|
| 25 |
|
| 26 |
GROQ_API_KEY = os.environ["GROQ_API_KEY"]
|
| 27 |
HF_TOKEN = os.environ["HF_Token"]
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
CHUNK_SIZE = config["chunking"]["chunk_size"]
|
| 32 |
-
CHUNK_OVERLAP = config["chunking"]["chunk_overlap"]
|
| 33 |
-
|
| 34 |
-
EMBEDDING_MODEL_NAME = config["embedding_model"]["name"]
|
| 35 |
-
EMBEDDING_MODEL_VERSION = config["embedding_model"]["version"]
|
| 36 |
-
|
| 37 |
-
LLM_MODEL_NAME = config["llm_model"]["name"]
|
| 38 |
-
LLM_MODEL_TEMPERATURE = config["llm_model"]["temperature"]
|
| 39 |
-
|
| 40 |
-
GITLAB_API_URL = config["gitlab"]["api_url"]
|
| 41 |
-
GITLAB_PROJECT_ID = config["gitlab"]["project id"]
|
| 42 |
-
GITLAB_PROJECT_VERSION = config["gitlab"]["project version"]
|
| 43 |
-
|
| 44 |
-
DATA_DIR = config["data_dir"]
|
| 45 |
-
HF_SPACE_NAME = config["hf_space_name"]
|
| 46 |
|
| 47 |
login(HF_TOKEN)
|
| 48 |
hf_api = HfApi()
|
| 49 |
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
def initialize():
|
| 52 |
global kadiAPY_bot
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
# download_gitlab_repo_to_hfspace(GITLAB_API_URL, GITLAB_PROJECT_ID, GITLAB_PROJECT_VERSION, DATA_DIR, hf_api, HF_SPACE_NAME)
|
| 57 |
-
|
| 58 |
-
# code_texts, code_references = extract_repo_files(DATA_DIR, ['kadi_apy'], [])
|
| 59 |
-
# doc_texts, doc_references = extract_repo_files(DATA_DIR, ['docs'], [])
|
| 60 |
-
|
| 61 |
-
# print("Length of code_texts: ", len(code_texts))
|
| 62 |
-
# print("Length of doc_files: ", len(doc_texts))
|
| 63 |
-
|
| 64 |
-
# code_chunks = chunk_pythoncode_and_add_metadata(code_texts, code_references)
|
| 65 |
-
# doc_chunks = chunk_text_and_add_metadata(doc_texts, doc_references, CHUNK_SIZE, CHUNK_OVERLAP)
|
| 66 |
-
|
| 67 |
-
# print(f"Total number of code_chunks: {len(code_chunks)}")
|
| 68 |
-
# print(f"Total number of doc_chunks: {len(doc_chunks)}")
|
| 69 |
-
|
| 70 |
-
vectorstore = get_chroma_vectorstore(get_SFR_Code_embedding_model(), "data/vectorstore")
|
| 71 |
llm = get_groq_llm(LLM_MODEL_NAME, LLM_MODEL_TEMPERATURE, GROQ_API_KEY)
|
| 72 |
|
| 73 |
kadiAPY_bot = KadiAPYBot(llm, vectorstore)
|
|
@@ -75,7 +39,6 @@ def initialize():
|
|
| 75 |
initialize()
|
| 76 |
|
| 77 |
|
| 78 |
-
|
| 79 |
def bot_kadi(history):
|
| 80 |
user_query = history[-1][0]
|
| 81 |
response = kadiAPY_bot.process_query(user_query)
|
|
|
|
| 5 |
from huggingface_hub import HfApi, login
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
from llm import get_groq_llm
|
| 9 |
from vectorstore import get_chroma_vectorstore
|
| 10 |
from embeddings import get_SFR_Code_embedding_model
|
| 11 |
from kadi_apy_bot import KadiAPYBot
|
|
|
|
| 12 |
|
| 13 |
# Load environment variables from .env file
|
| 14 |
load_dotenv()
|
| 15 |
|
| 16 |
+
vectorstore_path = "data/vectorstore"
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
GROQ_API_KEY = os.environ["GROQ_API_KEY"]
|
| 19 |
HF_TOKEN = os.environ["HF_Token"]
|
| 20 |
|
| 21 |
+
with open("config.json", "r") as file:
|
| 22 |
+
config = json.load(file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
login(HF_TOKEN)
|
| 25 |
hf_api = HfApi()
|
| 26 |
|
| 27 |
+
# Access the values
|
| 28 |
+
LLM_MODEL_NAME = config["llm_model_name"]
|
| 29 |
+
LLM_MODEL_TEMPERATURE = float(config["llm_model_temperature"])
|
| 30 |
|
| 31 |
def initialize():
|
| 32 |
global kadiAPY_bot
|
| 33 |
|
| 34 |
+
vectorstore = get_chroma_vectorstore(get_SFR_Code_embedding_model(), vectorstore_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
llm = get_groq_llm(LLM_MODEL_NAME, LLM_MODEL_TEMPERATURE, GROQ_API_KEY)
|
| 36 |
|
| 37 |
kadiAPY_bot = KadiAPYBot(llm, vectorstore)
|
|
|
|
| 39 |
initialize()
|
| 40 |
|
| 41 |
|
|
|
|
| 42 |
def bot_kadi(history):
|
| 43 |
user_query = history[-1][0]
|
| 44 |
response = kadiAPY_bot.process_query(user_query)
|