Spaces:
Configuration error
Configuration error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,29 +6,16 @@ import gradio as gr
|
|
| 6 |
import spaces
|
| 7 |
import torch
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
MAX_MAX_NEW_TOKENS = 2048
|
| 11 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
| 12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
| 13 |
|
| 14 |
DESCRIPTION = """\
|
| 15 |
-
# Llama-
|
| 16 |
-
|
| 17 |
-
This Space demonstrates model [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta, a Llama 2 model with 13B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
| 18 |
-
|
| 19 |
-
🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
|
| 20 |
-
|
| 21 |
-
🔨 Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
|
| 22 |
-
🐇 For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
|
| 23 |
-
|
| 24 |
-
"""
|
| 25 |
-
|
| 26 |
-
LICENSE = """
|
| 27 |
-
<p/>
|
| 28 |
-
|
| 29 |
-
---
|
| 30 |
-
As a derivate work of [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta,
|
| 31 |
-
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/USE_POLICY.md).
|
| 32 |
"""
|
| 33 |
|
| 34 |
if not torch.cuda.is_available():
|
|
@@ -42,6 +29,26 @@ if torch.cuda.is_available():
|
|
| 42 |
tokenizer.use_default_system_prompt = False
|
| 43 |
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
@spaces.GPU
|
| 46 |
def generate(
|
| 47 |
message: str,
|
|
@@ -86,7 +93,11 @@ def generate(
|
|
| 86 |
outputs.append(text)
|
| 87 |
yield "".join(outputs)
|
| 88 |
|
|
|
|
|
|
|
|
|
|
| 89 |
|
|
|
|
| 90 |
chat_interface = gr.ChatInterface(
|
| 91 |
fn=generate,
|
| 92 |
additional_inputs=[
|
|
@@ -103,7 +114,7 @@ chat_interface = gr.ChatInterface(
|
|
| 103 |
minimum=0.1,
|
| 104 |
maximum=4.0,
|
| 105 |
step=0.1,
|
| 106 |
-
value=0.
|
| 107 |
),
|
| 108 |
gr.Slider(
|
| 109 |
label="Top-p (nucleus sampling)",
|
|
@@ -124,7 +135,7 @@ chat_interface = gr.ChatInterface(
|
|
| 124 |
minimum=1.0,
|
| 125 |
maximum=2.0,
|
| 126 |
step=0.05,
|
| 127 |
-
value=1.
|
| 128 |
),
|
| 129 |
],
|
| 130 |
stop_btn=None,
|
|
|
|
| 6 |
import spaces
|
| 7 |
import torch
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 9 |
+
import sqlite3
|
| 10 |
+
|
| 11 |
+
|
| 12 |
|
| 13 |
MAX_MAX_NEW_TOKENS = 2048
|
| 14 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
| 15 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
| 16 |
|
| 17 |
DESCRIPTION = """\
|
| 18 |
+
# Llama-3 7B MRC \
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
"""
|
| 20 |
|
| 21 |
if not torch.cuda.is_available():
|
|
|
|
| 29 |
tokenizer.use_default_system_prompt = False
|
| 30 |
|
| 31 |
|
| 32 |
+
_TABLE = """
|
| 33 |
+
CREATE TABLE IF NOT EXISTS Item(
|
| 34 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 35 |
+
question TEXT NOT NULL,
|
| 36 |
+
answer TEXT NOT NULL,
|
| 37 |
+
timestamp TIMESTAMP DEFAULT (DATETIME('now', 'localtime'))
|
| 38 |
+
);
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
INSERT = """
|
| 42 |
+
INSERT INTO Item(question, answer) VALUES(?, ?);
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
connect = sqlite3.connect(db_path, check_same_thread=False)
|
| 47 |
+
cursor = self.connect.cursor()
|
| 48 |
+
cursor.execute(_TABLE)
|
| 49 |
+
connect.commit()
|
| 50 |
+
|
| 51 |
+
|
| 52 |
@spaces.GPU
|
| 53 |
def generate(
|
| 54 |
message: str,
|
|
|
|
| 93 |
outputs.append(text)
|
| 94 |
yield "".join(outputs)
|
| 95 |
|
| 96 |
+
self.cursor.execute(INSERT, (message, "".join(outputs)))
|
| 97 |
+
self.connect.commit()
|
| 98 |
+
|
| 99 |
|
| 100 |
+
|
| 101 |
chat_interface = gr.ChatInterface(
|
| 102 |
fn=generate,
|
| 103 |
additional_inputs=[
|
|
|
|
| 114 |
minimum=0.1,
|
| 115 |
maximum=4.0,
|
| 116 |
step=0.1,
|
| 117 |
+
value=0.1,
|
| 118 |
),
|
| 119 |
gr.Slider(
|
| 120 |
label="Top-p (nucleus sampling)",
|
|
|
|
| 135 |
minimum=1.0,
|
| 136 |
maximum=2.0,
|
| 137 |
step=0.05,
|
| 138 |
+
value=1.15,
|
| 139 |
),
|
| 140 |
],
|
| 141 |
stop_btn=None,
|