|
|
from .LLM_Inference import get_response |
|
|
from .Ingesting_phase import Doc_Vectorizer |
|
|
import requests |
|
|
|
|
|
|
|
|
dv=Doc_Vectorizer() |
|
|
|
|
|
|
|
|
def reset_database(): |
|
|
dv.vectorized_docs.clear() |
|
|
dv.original_docs.clear() |
|
|
dv.vectors= None |
|
|
|
|
|
|
|
|
def initialize(file_name): |
|
|
file_type= file_name.split(".")[-1] |
|
|
return dv.process_and_add_documents(file_path=file_name, file_type=file_type) |
|
|
|
|
|
|
|
|
def chat(user_query, is_debug= False): |
|
|
original_best_match, processed_best_match= dv.find_best_matches(user_query) |
|
|
context= "\n\n".join(original_best_match[0]) |
|
|
|
|
|
if is_debug: |
|
|
print(f"Context: {context}") |
|
|
|
|
|
resp= get_response(user_query, context) |
|
|
return resp |
|
|
|
|
|
|
|
|
|