Spaces:
Sleeping
Sleeping
Commit
·
cd4a687
1
Parent(s):
84b6705
update
Browse files
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from fastapi.responses import StreamingResponse
|
|
|
|
| 3 |
from llama_cpp import Llama
|
| 4 |
import asyncio
|
| 5 |
from fastapi.middleware.cors import CORSMiddleware
|
|
@@ -14,13 +15,14 @@ app.add_middleware(
|
|
| 14 |
allow_headers=["*"],
|
| 15 |
)
|
| 16 |
|
| 17 |
-
#
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
| 24 |
)
|
| 25 |
|
| 26 |
alpaca_prompt = """
|
|
@@ -49,6 +51,14 @@ Important notes:
|
|
| 49 |
### Response:
|
| 50 |
"""
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
async def stream_llm_response(task_description: str):
|
| 53 |
prompt = alpaca_prompt.format(task_description)
|
| 54 |
stream = llm(
|
|
@@ -67,4 +77,4 @@ async def stream_response(task: str = "make an agent which send mail by searchin
|
|
| 67 |
|
| 68 |
if __name__ == "__main__":
|
| 69 |
import uvicorn
|
| 70 |
-
uvicorn.run(app, host="0.0.0.0", port=
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from fastapi.responses import StreamingResponse
|
| 3 |
+
from huggingface_hub import hf_hub_download
|
| 4 |
from llama_cpp import Llama
|
| 5 |
import asyncio
|
| 6 |
from fastapi.middleware.cors import CORSMiddleware
|
|
|
|
| 15 |
allow_headers=["*"],
|
| 16 |
)
|
| 17 |
|
| 18 |
+
# Download the GGUF file
|
| 19 |
+
model_id = "muhammadnoman76/cortex_q4"
|
| 20 |
+
gguf_filename = "unsloth.Q4_K_M.gguf" # Replace with the correct filename
|
| 21 |
+
model_path = hf_hub_download(
|
| 22 |
+
repo_id=model_id,
|
| 23 |
+
filename=gguf_filename,
|
| 24 |
+
local_dir=".",
|
| 25 |
+
local_dir_use_symlinks=False
|
| 26 |
)
|
| 27 |
|
| 28 |
alpaca_prompt = """
|
|
|
|
| 51 |
### Response:
|
| 52 |
"""
|
| 53 |
|
| 54 |
+
# Load model from local file in the copied folder
|
| 55 |
+
llm = Llama(
|
| 56 |
+
model_path= r'.//unsloth.Q4_K_M.gguf',
|
| 57 |
+
n_ctx=2048,
|
| 58 |
+
n_batch=512,
|
| 59 |
+
verbose=False
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
async def stream_llm_response(task_description: str):
|
| 63 |
prompt = alpaca_prompt.format(task_description)
|
| 64 |
stream = llm(
|
|
|
|
| 77 |
|
| 78 |
if __name__ == "__main__":
|
| 79 |
import uvicorn
|
| 80 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|