Spaces:
Sleeping
Sleeping
Felipe Silva
commited on
Commit
·
ee5fb3c
1
Parent(s):
7b4d8d6
add decorator
Browse files- rag_utils.py +4 -0
rag_utils.py
CHANGED
|
@@ -9,6 +9,7 @@ from langchain.llms import HuggingFacePipeline
|
|
| 9 |
from langchain.chat_models import ChatOpenAI
|
| 10 |
from langchain.chains import RetrievalQA
|
| 11 |
|
|
|
|
| 12 |
import config
|
| 13 |
import torch
|
| 14 |
print(torch.cuda.is_available())
|
|
@@ -23,6 +24,7 @@ _embedding_instance = None
|
|
| 23 |
_model_instance = None
|
| 24 |
_tokenizer = None
|
| 25 |
|
|
|
|
| 26 |
def get_embedding_model():
|
| 27 |
global _embedding_instance
|
| 28 |
if _embedding_instance is None:
|
|
@@ -33,6 +35,7 @@ def get_embedding_model():
|
|
| 33 |
|
| 34 |
# model_name = "Qwen/Qwen2.5-7B-Instruct-GPTQ-Int8" #"Qwen/Qwen2.5-7B-Instruct-AWQ" #"Qwen/Qwen2.5-7B-Instruct"
|
| 35 |
|
|
|
|
| 36 |
def get_model():
|
| 37 |
global _model_instance
|
| 38 |
if _model_instance is None:
|
|
@@ -49,6 +52,7 @@ def get_model():
|
|
| 49 |
|
| 50 |
# _model_instance.to(device)
|
| 51 |
|
|
|
|
| 52 |
def get_tokenizer():
|
| 53 |
global _tokenizer
|
| 54 |
if _tokenizer is None:
|
|
|
|
| 9 |
from langchain.chat_models import ChatOpenAI
|
| 10 |
from langchain.chains import RetrievalQA
|
| 11 |
|
| 12 |
+
import spaces
|
| 13 |
import config
|
| 14 |
import torch
|
| 15 |
print(torch.cuda.is_available())
|
|
|
|
| 24 |
_model_instance = None
|
| 25 |
_tokenizer = None
|
| 26 |
|
| 27 |
+
@spaces.GPU
|
| 28 |
def get_embedding_model():
|
| 29 |
global _embedding_instance
|
| 30 |
if _embedding_instance is None:
|
|
|
|
| 35 |
|
| 36 |
# model_name = "Qwen/Qwen2.5-7B-Instruct-GPTQ-Int8" #"Qwen/Qwen2.5-7B-Instruct-AWQ" #"Qwen/Qwen2.5-7B-Instruct"
|
| 37 |
|
| 38 |
+
@spaces.GPU
|
| 39 |
def get_model():
|
| 40 |
global _model_instance
|
| 41 |
if _model_instance is None:
|
|
|
|
| 52 |
|
| 53 |
# _model_instance.to(device)
|
| 54 |
|
| 55 |
+
@spaces.GPU
|
| 56 |
def get_tokenizer():
|
| 57 |
global _tokenizer
|
| 58 |
if _tokenizer is None:
|