Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_cpp import Llama
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from langchain.llms.base import LLM
|
| 4 |
+
from llama_index import LLMPredictor, LangchainEmbedding, ServiceContext, PromptHelper
|
| 5 |
+
from typing import Optional, List, Mapping, Any
|
| 6 |
+
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
| 7 |
+
|
| 8 |
+
MODEL_NAME = 'TheBloke/MelloGPT-AWQ'
|
| 9 |
+
|
| 10 |
+
# Number of threads to use
|
| 11 |
+
NUM_THREADS = 8
|
| 12 |
+
# define prompt helper
|
| 13 |
+
# set maximum input size
|
| 14 |
+
max_input_size = 2048
|
| 15 |
+
# set number of output tokens
|
| 16 |
+
num_output = 256
|
| 17 |
+
# set maximum chunk overlap
|
| 18 |
+
chunk_overlap_ratio = 0.8
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
prompt_helper = PromptHelper(max_input_size, num_output, chunk_overlap_ratio)
|
| 22 |
+
except Exception as e:
|
| 23 |
+
chunk_overlap_ratio = 0.2 # Set a different max_chunk_overlap value for the next attempt
|
| 24 |
+
prompt_helper = PromptHelper(max_input_size, num_output, chunk_overlap_ratio)
|
| 25 |
+
|
| 26 |
+
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class CustomLLM(LLM):
|
| 30 |
+
model_name = MODEL_NAME
|
| 31 |
+
|
| 32 |
+
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
| 33 |
+
p = f"Human: {prompt} Assistant: "
|
| 34 |
+
prompt_length = len(p)
|
| 35 |
+
llm = Llama(model_path=MODEL_PATH, n_threads=NUM_THREADS)
|
| 36 |
+
output = llm(p, max_tokens=512, stop=["Human:"], echo=True)['choices'][0]['text']
|
| 37 |
+
# only return newly generated tokens by slicing list to include words after the original prompt
|
| 38 |
+
response = output[prompt_length:]
|
| 39 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 40 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 41 |
+
|
| 42 |
+
@property
|
| 43 |
+
def _identifying_params(self) -> Mapping[str, Any]:
|
| 44 |
+
return {"name_of_model": self.model_name}
|
| 45 |
+
|
| 46 |
+
@property
|
| 47 |
+
def _llm_type(self) -> str:
|
| 48 |
+
return "custom"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# define our LLM
|
| 52 |
+
llm_predictor = LLMPredictor(llm=CustomLLM())
|
| 53 |
+
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def clear_convo():
|
| 57 |
+
st.session_state['messages'] = []
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def init():
|
| 61 |
+
st.set_page_config(page_title='Local LLama', page_icon=':robot_face: ')
|
| 62 |
+
st.sidebar.title('Local LLama')
|
| 63 |
+
if 'messages' not in st.session_state:
|
| 64 |
+
st.session_state['messages'] = []
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
if __name__ == '__main__':
|
| 68 |
+
init()
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@st.cache_resource
|
| 72 |
+
def get_llm():
|
| 73 |
+
llm = CustomLLM()
|
| 74 |
+
return llm
|
| 75 |
+
|
| 76 |
+
clear_button = st.sidebar.button("Clear Conversation", key="clear")
|
| 77 |
+
if clear_button:
|
| 78 |
+
clear_convo()
|
| 79 |
+
|
| 80 |
+
user_input = st.chat_input("Say something")
|
| 81 |
+
|
| 82 |
+
if user_input:
|
| 83 |
+
llm = get_llm()
|
| 84 |
+
llm._call(prompt=user_input)
|
| 85 |
+
|
| 86 |
+
for message in st.session_state.messages:
|
| 87 |
+
with st.chat_message(message["role"]):
|
| 88 |
+
st.markdown(message["content"])
|