File size: 1,741 Bytes
d16ecdc
2a8521a
d16ecdc
 
32992a1
d16ecdc
2a8521a
d16ecdc
 
2a8521a
 
32992a1
2a8521a
32992a1
d16ecdc
2a8521a
d16ecdc
 
 
2a8521a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d16ecdc
2a8521a
 
 
 
 
 
 
 
 
 
 
32992a1
d16ecdc
2a8521a
32992a1
 
2a8521a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from fastapi import FastAPI
import asyncio
from langchain_openai import ChatOpenAI
from pydantic import BaseModel
import os

# Initialize FastAPI application
app = FastAPI()

# Load OpenAI API key from Hugging Face secrets/environment variables
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")  # Ensure you set this in Hugging Face secrets

# Initialize OpenAI model (async-compatible)
llm = ChatOpenAI(model_name="gpt-4", openai_api_key=OPENAI_API_KEY)

# Define a request model for structured API input
class QueryRequest(BaseModel):
    prompt: str

# Root endpoint for basic API status check
@app.get("/")
async def home():
    """ Root endpoint to confirm API is running. """
    return {"message": "FastAPI is running! Use /query for AI responses."}

# Asynchronous endpoint to simulate AI response delay
@app.get("/async-query")
async def async_query():
    """
    Simulates an async AI query by delaying the response for 2 seconds.
    
    Returns:
        JSON response after a delay.
    """
    await asyncio.sleep(2)  # Simulates an asynchronous wait time
    return {"response": "Asynchronous AI response generated!"}

# Asynchronous OpenAI Query Endpoint
@app.post("/query")
async def query_llm(request: QueryRequest):
    """
    Calls OpenAI's GPT-4 model asynchronously to generate a response.

    Parameters:
        request (QueryRequest): JSON containing a prompt for AI.

    Returns:
        dict: AI response as JSON.
    """
    response = await asyncio.to_thread(llm.predict, request.prompt)  # Runs OpenAI call asynchronously
    return {"response": response}

# Run the FastAPI server on Hugging Face Spaces (Port 7860)
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)