Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,30 +1,29 @@
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
-
from fastai.vision.all import *
|
| 4 |
-
import skimage
|
| 5 |
import copy
|
|
|
|
|
|
|
| 6 |
from llama_cpp import Llama
|
| 7 |
-
from huggingface_hub import hf_hub_download
|
| 8 |
|
| 9 |
-
# Load the FastAI vision model
|
| 10 |
-
learn = load_learner('export.pkl')
|
| 11 |
-
labels = learn.dls.vocab
|
| 12 |
|
| 13 |
-
# Load the Llama language model
|
| 14 |
llm = Llama(
|
| 15 |
model_path=hf_hub_download(
|
| 16 |
repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7B-Chat-GGML"),
|
| 17 |
filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.ggmlv3.q5_0.bin"),
|
| 18 |
),
|
| 19 |
n_ctx=2048,
|
| 20 |
-
n_gpu_layers=50,
|
| 21 |
-
)
|
| 22 |
|
| 23 |
history = []
|
|
|
|
| 24 |
system_message = """
|
| 25 |
-
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe.
|
|
|
|
| 26 |
"""
|
| 27 |
|
|
|
|
| 28 |
def generate_text(message, history):
|
| 29 |
temp = ""
|
| 30 |
input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
|
|
@@ -37,13 +36,13 @@ def generate_text(message, history):
|
|
| 37 |
input_prompt,
|
| 38 |
temperature=0.15,
|
| 39 |
top_p=0.1,
|
| 40 |
-
top_k=40,
|
| 41 |
repeat_penalty=1.1,
|
| 42 |
max_tokens=1024,
|
| 43 |
stop=[
|
| 44 |
-
"",
|
| 45 |
-
"",
|
| 46 |
-
" \n",
|
| 47 |
"ASSISTANT:",
|
| 48 |
"USER:",
|
| 49 |
"SYSTEM:",
|
|
@@ -53,29 +52,20 @@ def generate_text(message, history):
|
|
| 53 |
for out in output:
|
| 54 |
stream = copy.deepcopy(out)
|
| 55 |
temp += stream["choices"][0]["text"]
|
|
|
|
| 56 |
|
| 57 |
-
history
|
| 58 |
-
history.append(("ASSISTANT:", temp))
|
| 59 |
-
|
| 60 |
-
return temp
|
| 61 |
|
| 62 |
-
# Define the predict function for the FastAI model
|
| 63 |
-
def predict_with_llama_and_generate_text(img):
|
| 64 |
-
img = PILImage.create(img)
|
| 65 |
-
pred, pred_idx, probs = learn.predict(img)
|
| 66 |
-
detected_object = labels[pred_idx]
|
| 67 |
-
|
| 68 |
-
response = f"The system has detected {detected_object}. Do you want to know about {detected_object}?"
|
| 69 |
-
|
| 70 |
-
llama_response = generate_text(response, history)
|
| 71 |
-
|
| 72 |
-
return llama_response
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
|
|
|
|
|
|
| 3 |
import copy
|
| 4 |
+
import time
|
| 5 |
+
import llama_cpp
|
| 6 |
from llama_cpp import Llama
|
| 7 |
+
from huggingface_hub import hf_hub_download
|
| 8 |
|
|
|
|
|
|
|
|
|
|
| 9 |
|
|
|
|
| 10 |
llm = Llama(
|
| 11 |
model_path=hf_hub_download(
|
| 12 |
repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7B-Chat-GGML"),
|
| 13 |
filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.ggmlv3.q5_0.bin"),
|
| 14 |
),
|
| 15 |
n_ctx=2048,
|
| 16 |
+
n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
|
| 17 |
+
)
|
| 18 |
|
| 19 |
history = []
|
| 20 |
+
|
| 21 |
system_message = """
|
| 22 |
+
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
| 23 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
| 24 |
"""
|
| 25 |
|
| 26 |
+
|
| 27 |
def generate_text(message, history):
|
| 28 |
temp = ""
|
| 29 |
input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
|
|
|
|
| 36 |
input_prompt,
|
| 37 |
temperature=0.15,
|
| 38 |
top_p=0.1,
|
| 39 |
+
top_k=40,
|
| 40 |
repeat_penalty=1.1,
|
| 41 |
max_tokens=1024,
|
| 42 |
stop=[
|
| 43 |
+
"<|prompter|>",
|
| 44 |
+
"<|endoftext|>",
|
| 45 |
+
"<|endoftext|> \n",
|
| 46 |
"ASSISTANT:",
|
| 47 |
"USER:",
|
| 48 |
"SYSTEM:",
|
|
|
|
| 52 |
for out in output:
|
| 53 |
stream = copy.deepcopy(out)
|
| 54 |
temp += stream["choices"][0]["text"]
|
| 55 |
+
yield temp
|
| 56 |
|
| 57 |
+
history = ["init", input_prompt]
|
|
|
|
|
|
|
|
|
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
+
demo = gr.ChatInterface(
|
| 61 |
+
generate_text,
|
| 62 |
+
title="llama-cpp-python on GPU",
|
| 63 |
+
description="Running LLM with https://github.com/abetlen/llama-cpp-python",
|
| 64 |
+
examples=["tell me everything about llamas"],
|
| 65 |
+
cache_examples=True,
|
| 66 |
+
retry_btn=None,
|
| 67 |
+
undo_btn="Delete Previous",
|
| 68 |
+
clear_btn="Clear",
|
| 69 |
+
)
|
| 70 |
+
demo.queue(concurrency_count=1, max_size=5)
|
| 71 |
+
demo.launch()
|