Spaces:
Sleeping
Sleeping
File size: 1,496 Bytes
18c2a96 98d7de3 18c2a96 473b890 73bdb28 473b890 18c2a96 98d7de3 18c2a96 98d7de3 18c2a96 98d7de3 18c2a96 98d7de3 473b890 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | from fastapi import FastAPI, HTTPException
import os
import google.generativeai as genai
from fastapi.responses import HTMLResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from dotenv import load_dotenv
load_dotenv()
key = os.getenv("GENAI_API_KEY")
app = FastAPI(
title="TAT-EzQuest AI service",
description="TAT-EzQuest AI Back-end Service",
version="0.0.1",
docs_url="/docs",
redoc_url="/redoc"
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Prompt(BaseModel):
prom: str
@app.post("/post")
async def create_item(prompt: Prompt):
try:
genai.configure(api_key=key)
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
generation_config=generation_config,
)
chat_session = model.start_chat(history=[])
response = chat_session.send_message(prompt.prom).text
return {"answer": response}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)
|