load_model / app.py
1MR's picture
Update app.py
7fa2d88 verified
from fastapi import FastAPI
from transformers import AutoTokenizer, AutoModelForCausalLM
app = FastAPI()
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B")
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-4B")
@app.get("/")
def read_root():
return {"message": "Model is ready"}