kripeshAlt commited on
Commit
155b5cb
·
verified ·
1 Parent(s): a77020b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -6
app.py CHANGED
@@ -1,14 +1,28 @@
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import gradio as gr
 
 
4
 
 
5
  tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-llm-1.8b-chat")
6
  model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-llm-1.8b-chat")
7
 
8
- def chat(prompt):
9
- inputs = tokenizer(prompt, return_tensors="pt")
 
 
 
 
 
 
 
 
 
10
  outputs = model.generate(**inputs, max_new_tokens=100)
11
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
12
 
13
- demo = gr.Interface(fn=chat, inputs="text", outputs="text")
14
- demo.launch()
 
 
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from fastapi import FastAPI
4
+ from pydantic import BaseModel
5
+ import uvicorn
6
 
7
+ # Load model and tokenizer
8
  tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-llm-1.8b-chat")
9
  model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-llm-1.8b-chat")
10
 
11
+ # Initialize FastAPI app
12
+ app = FastAPI()
13
+
14
+ # Define input schema
15
+ class RequestBody(BaseModel):
16
+ prompt: str
17
+
18
+ # Define the model inference function
19
+ @app.post("/predict")
20
+ async def predict(request: RequestBody):
21
+ inputs = tokenizer(request.prompt, return_tensors="pt")
22
  outputs = model.generate(**inputs, max_new_tokens=100)
23
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+ return {"response": result}
25
 
26
+ # For testing locally (not needed for Hugging Face)
27
+ if __name__ == "__main__":
28
+ uvicorn.run(app, host="0.0.0.0", port=8000)