Waheeb2001 commited on
Commit
df2f11e
·
verified ·
1 Parent(s): 8b98824

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +21 -15
main.py CHANGED
@@ -2,8 +2,14 @@ from ctransformers import AutoModelForCausalLM
2
  from fastapi import FastAPI, Form
3
  from pydantic import BaseModel
4
  import logging
 
 
5
  logging.basicConfig(level=logging.INFO)
6
 
 
 
 
 
7
  try:
8
  llm = AutoModelForCausalLM.from_pretrained(
9
  "zephyr-7b-beta.Q4_K_S.gguf",
@@ -15,26 +21,26 @@ try:
15
  except Exception as e:
16
  logging.error(f"Model failed to load: {e}")
17
  raise e
18
- #Model loading
19
- llm = AutoModelForCausalLM.from_pretrained("zephyr-7b-beta.Q4_K_S.gguf",
20
- model_type='mistral',
21
- max_new_tokens = 1096,
22
- threads = 3,
23
- )
24
-
25
 
26
- #Pydantic object
27
- class validation(BaseModel):
28
  prompt: str
29
-
30
- #Fast API
31
- app = FastAPI()
32
 
33
- #Zephyr completion
 
 
 
 
 
 
 
 
 
34
  @app.post("/llm_on_cpu")
35
- async def stream(item: validation):
36
  system_prompt = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.'
37
  E_INST = "</s>"
38
  user, assistant = "<|user|>", "<|assistant|>"
39
  prompt = f"{system_prompt}{E_INST}\n{user}\n{item.prompt.strip()}{E_INST}\n{assistant}\n"
40
- return llm(prompt)
 
 
2
  from fastapi import FastAPI, Form
3
  from pydantic import BaseModel
4
  import logging
5
+
6
+ # Set up logging
7
  logging.basicConfig(level=logging.INFO)
8
 
9
+ # Initialize FastAPI app
10
+ app = FastAPI()
11
+
12
+ # Load the GGUF model once
13
  try:
14
  llm = AutoModelForCausalLM.from_pretrained(
15
  "zephyr-7b-beta.Q4_K_S.gguf",
 
21
  except Exception as e:
22
  logging.error(f"Model failed to load: {e}")
23
  raise e
 
 
 
 
 
 
 
24
 
25
+ # Define Pydantic model for input validation
26
+ class ValidationModel(BaseModel):
27
  prompt: str
 
 
 
28
 
29
+ # Root endpoint for health checks and UI
30
+ @app.get("/")
31
+ def read_root():
32
+ return {
33
+ "status": "running",
34
+ "message": "Zephyr LLM API is active",
35
+ "endpoints": ["/llm_on_cpu (POST)"]
36
+ }
37
+
38
+ # LLM inference endpoint
39
  @app.post("/llm_on_cpu")
40
+ async def stream(item: ValidationModel):
41
  system_prompt = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.'
42
  E_INST = "</s>"
43
  user, assistant = "<|user|>", "<|assistant|>"
44
  prompt = f"{system_prompt}{E_INST}\n{user}\n{item.prompt.strip()}{E_INST}\n{assistant}\n"
45
+ response = llm(prompt)
46
+ return {"response": response}