Spaces:
Sleeping
Sleeping
BMike10 commited on
Commit ·
c674665
1
Parent(s): 415081a
Changes
Browse files- .gitignore +1 -0
- app.py +6 -8
- requirements.txt +1 -1
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
itaca_mistral7b_qlora_4bit-unsloth.Q4_K_M.gguf
|
app.py
CHANGED
|
@@ -1,19 +1,17 @@
|
|
| 1 |
import random
|
| 2 |
import gradio as gr
|
| 3 |
-
from
|
| 4 |
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
|
| 9 |
-
|
| 10 |
-
# model="michelebasilico/itaca-mistral-7b-v2-4bit")
|
| 11 |
-
model = pipeline("text-generation",
|
| 12 |
-
model="michelebasilico/itaca-mistral-7b-v2-16bit")
|
| 13 |
|
| 14 |
|
| 15 |
def predict(message, history):
|
| 16 |
-
outputs = model(message)
|
| 17 |
|
| 18 |
return outputs
|
| 19 |
|
|
|
|
| 1 |
import random
|
| 2 |
import gradio as gr
|
| 3 |
+
from ctransformers import AutoModelForCausalLM
|
| 4 |
|
| 5 |
+
local_path = r"itaca_mistral7b_qlora_4bit-unsloth.Q4_K_M.gguf"
|
| 6 |
|
| 7 |
+
model = AutoModelForCausalLM.from_pretrained(local_path, model_file="itaca_mistral7b_qlora_4bit-unsloth.Q4_K_M.gguf", model_type="mistral", local_files_only=True
|
| 8 |
+
# , gpu_layers=50
|
| 9 |
|
| 10 |
+
)
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
def predict(message, history):
|
| 14 |
+
outputs = model(message)
|
| 15 |
|
| 16 |
return outputs
|
| 17 |
|
requirements.txt
CHANGED
|
@@ -1,2 +1,2 @@
|
|
| 1 |
-
|
| 2 |
torch
|
|
|
|
| 1 |
+
ctransformers
|
| 2 |
torch
|