File size: 2,531 Bytes
933f161
 
 
 
169c067
933f161
 
 
 
169c067
 
 
 
 
 
 
 
 
 
 
933f161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169c067
 
 
 
933f161
 
 
 
 
 
169c067
 
933f161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM
from huggingface_hub import login
from transformers import StoppingCriteria, StoppingCriteriaList
import os
import torch
import uvicorn

class StopOnStrings(StoppingCriteria):
    def __init__(self, tokenizer, stop_strings):
        self.tokenizer = tokenizer
        self.stop_ids = [tokenizer.encode(s, add_special_tokens=False) for s in stop_strings]

    def __call__(self, input_ids, scores, **kwargs):
        for stop_id in self.stop_ids:
            if input_ids[0][-len(stop_id):].tolist() == stop_id:
                return True
        return False

login(os.getenv("HF_TOKEN"))

app = FastAPI(
    title="VexaAI Model-Platform: Microsoft Phi-1.5",
    description="Self-hosted AI-Model Microsoft Phi-1.5, powered by VexaAI.",
    version="0.9"
)

model_name = "microsoft/phi-1_5"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map="auto",
    trust_remote_code=True,
    torch_dtype=torch.float32
)
model.eval()

class GenerateRequest(BaseModel):
    prompt: str
    max_new_tokens: int = 512
    temperature: float = 0.7

@app.post("/generate")
async def generate_text(request: GenerateRequest):
    try:
        inputs = tokenizer(request.prompt, return_tensors="pt").to(model.device)
        
        with torch.no_grad():
            stopping = StoppingCriteriaList([
                StopOnStrings(tokenizer, ["\n\n", "###", "END"])
            ])
            
            outputs = model.generate(
                **inputs,
                max_new_tokens=request.max_new_tokens,
                temperature=request.temperature,
                do_sample=True,
                repetition_penalty=1.1,
                pad_token_id=tokenizer.eos_token_id,
                stopping_criteria=stopping
            )
        
        full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        generated_text = full_text[len(tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)):].strip()
        
        return {"generated_text": generated_text}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"VexaAI Model-Platform: HTTP/S error: {str(e)}")

@app.get("/")
async def root():
    return {"message": "To start generating text, use /generate."}

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7860)