llm_server / wrapper.py
Paridhim's picture
Upload wrapper.py
ff618f0 verified
raw
history blame
784 Bytes
from langchain_community.llms.ctransformers import CTransformers
import os
MODEL_TYPE = os.getenv("MODEL_TYPE",'mistral')
MODEL_BIN_PATH = os.getenv("MODEL_BIN_PATH","model/mistral-7b-instruct-v0.1.Q3_K_S.gguf")
MAX_NEW_TOKEN = int(os.getenv("MAX_NEW_TOKEN",600))
TEMPRATURE = float(os.getenv("TEMPRATURE", 0.01))
CONTEXT_LENGTH = int(os.getenv("CONTEXT_LENGTH", 6000))
class LLMWrapper:
def __init__(self):
self.llm = CTransformers(
model=MODEL_BIN_PATH,
config={
'max_new_tokens': MAX_NEW_TOKEN,
'temperature': TEMPRATURE,
'context_length': CONTEXT_LENGTH
},
model_type=MODEL_TYPE
)
def generate_text(self, prompt):
return self.llm(prompt)