TabarcraftOfficiel commited on
Commit
768960d
·
verified ·
1 Parent(s): 05dbb35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -1,14 +1,20 @@
1
- import gradio as gr
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
 
 
 
5
  model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-llm-7b-chat", device_map="auto", torch_dtype=torch.float16)
6
  tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-llm-7b-chat")
7
 
8
- def chat(prompt):
9
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
10
- outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True)
11
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
12
 
13
- demo = gr.Interface(fn=chat, inputs="text", outputs="text")
14
- demo.launch()
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
5
 
6
+ app = FastAPI()
7
+
8
+ # Chargement du modèle
9
  model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-llm-7b-chat", device_map="auto", torch_dtype=torch.float16)
10
  tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-llm-7b-chat")
11
 
12
+ class Prompt(BaseModel):
13
+ prompt: str
 
 
14
 
15
+ @app.post("/predict")
16
+ async def predict(prompt: Prompt):
17
+ inputs = tokenizer(prompt.prompt, return_tensors="pt").to(model.device)
18
+ outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True)
19
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
+ return {"response": response}