Filip
commited on
Commit
·
16a33a8
1
Parent(s):
d5c72cc
update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,6 @@ from llama_cpp import Llama
|
|
| 3 |
from huggingface_hub import hf_hub_download
|
| 4 |
|
| 5 |
def load_model():
|
| 6 |
-
# Download the model from HuggingFace
|
| 7 |
repo_id = "forestav/gguf_lora_model"
|
| 8 |
model_file = "unsloth.F16.gguf"
|
| 9 |
|
|
@@ -12,7 +11,8 @@ def load_model():
|
|
| 12 |
filename=model_file
|
| 13 |
)
|
| 14 |
|
| 15 |
-
|
|
|
|
| 16 |
model = Llama(
|
| 17 |
model_path=local_path,
|
| 18 |
n_ctx=2048,
|
|
@@ -22,7 +22,6 @@ def load_model():
|
|
| 22 |
return model
|
| 23 |
|
| 24 |
def generate_response(message, history):
|
| 25 |
-
# Generate response
|
| 26 |
response = model.create_chat_completion(
|
| 27 |
messages=[
|
| 28 |
{"role": "user", "content": message}
|
|
@@ -35,9 +34,11 @@ def generate_response(message, history):
|
|
| 35 |
return response['choices'][0]['message']['content']
|
| 36 |
|
| 37 |
# Load model globally
|
|
|
|
| 38 |
model = load_model()
|
|
|
|
| 39 |
|
| 40 |
-
# Create Gradio interface
|
| 41 |
demo = gr.ChatInterface(
|
| 42 |
fn=generate_response,
|
| 43 |
title="Your GGUF Model Chat",
|
|
@@ -45,5 +46,9 @@ demo = gr.ChatInterface(
|
|
| 45 |
examples=["Continue the fibonacci sequence: 1, 1, 2, 3, 5, 8,"]
|
| 46 |
)
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from huggingface_hub import hf_hub_download
|
| 4 |
|
| 5 |
def load_model():
|
|
|
|
| 6 |
repo_id = "forestav/gguf_lora_model"
|
| 7 |
model_file = "unsloth.F16.gguf"
|
| 8 |
|
|
|
|
| 11 |
filename=model_file
|
| 12 |
)
|
| 13 |
|
| 14 |
+
print(f"Loading model from: {local_path}")
|
| 15 |
+
|
| 16 |
model = Llama(
|
| 17 |
model_path=local_path,
|
| 18 |
n_ctx=2048,
|
|
|
|
| 22 |
return model
|
| 23 |
|
| 24 |
def generate_response(message, history):
|
|
|
|
| 25 |
response = model.create_chat_completion(
|
| 26 |
messages=[
|
| 27 |
{"role": "user", "content": message}
|
|
|
|
| 34 |
return response['choices'][0]['message']['content']
|
| 35 |
|
| 36 |
# Load model globally
|
| 37 |
+
print("Starting model loading...")
|
| 38 |
model = load_model()
|
| 39 |
+
print("Model loaded successfully!")
|
| 40 |
|
| 41 |
+
# Create Gradio interface
|
| 42 |
demo = gr.ChatInterface(
|
| 43 |
fn=generate_response,
|
| 44 |
title="Your GGUF Model Chat",
|
|
|
|
| 46 |
examples=["Continue the fibonacci sequence: 1, 1, 2, 3, 5, 8,"]
|
| 47 |
)
|
| 48 |
|
| 49 |
+
# Add proper Gradio launch configuration for Spaces
|
| 50 |
+
demo.launch(
|
| 51 |
+
server_name="0.0.0.0", # Necessary for Spaces
|
| 52 |
+
server_port=7860, # Standard port for Spaces
|
| 53 |
+
share=False # Don't need share link in Spaces
|
| 54 |
+
)
|