File size: 300 Bytes
7fa2d88
a76699e
 
7fa2d88
 
a76699e
7fa2d88
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
from fastapi import FastAPI
from transformers import AutoTokenizer, AutoModelForCausalLM

app = FastAPI()

tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B")
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-4B")

@app.get("/")
def read_root():
    return {"message": "Model is ready"}