Kajlid
commited on
Commit
·
9a07ae6
1
Parent(s):
00ed5b0
chore(requirements): remove llama_cpp requirement from requirements.txt
Browse files- chat_ui/app.py +1 -3
- chat_ui/requirements.txt +0 -1
- requirements.txt +0 -1
chat_ui/app.py
CHANGED
|
@@ -1,13 +1,11 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from huggingface_hub import InferenceClient
|
| 3 |
import subprocess
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
|
| 6 |
subprocess.run("pip install -V llama_cpp_python==0.3.1", shell=True)
|
| 7 |
-
# Download GGUF model into HF Space storage
|
| 8 |
-
|
| 9 |
from llama_cpp import Llama
|
| 10 |
|
|
|
|
| 11 |
model_path = hf_hub_download(
|
| 12 |
repo_id="ft-lora/llama3.2-3b-gguf-q4km",
|
| 13 |
filename="llama3.2-3b-instruct-finetuned.gguf"
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import subprocess
|
| 3 |
from huggingface_hub import hf_hub_download
|
| 4 |
|
| 5 |
subprocess.run("pip install -V llama_cpp_python==0.3.1", shell=True)
|
|
|
|
|
|
|
| 6 |
from llama_cpp import Llama
|
| 7 |
|
| 8 |
+
# Download GGUF model into HF Space storage
|
| 9 |
model_path = hf_hub_download(
|
| 10 |
repo_id="ft-lora/llama3.2-3b-gguf-q4km",
|
| 11 |
filename="llama3.2-3b-instruct-finetuned.gguf"
|
chat_ui/requirements.txt
CHANGED
|
@@ -1,2 +1 @@
|
|
| 1 |
-
llama-cpp-python==0.3.1
|
| 2 |
gradio
|
|
|
|
|
|
|
| 1 |
gradio
|
requirements.txt
CHANGED
|
@@ -1,3 +1,2 @@
|
|
| 1 |
-
llama-cpp-python==0.3.1
|
| 2 |
gradio
|
| 3 |
huggingface_hub
|
|
|
|
|
|
|
| 1 |
gradio
|
| 2 |
huggingface_hub
|