Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,25 +1,24 @@
|
|
| 1 |
import os
|
| 2 |
-
from fastapi import FastAPI, HTTPException
|
| 3 |
from fastapi.responses import StreamingResponse
|
| 4 |
from openai import AsyncOpenAI
|
| 5 |
|
| 6 |
app = FastAPI()
|
| 7 |
|
| 8 |
-
async def generate_ai_response(prompt: str):
|
| 9 |
# Configuration for unofficial GitHub AI endpoint
|
| 10 |
token = os.getenv("GITHUB_TOKEN")
|
| 11 |
if not token:
|
| 12 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
| 13 |
|
| 14 |
endpoint = "https://models.github.ai/inference"
|
| 15 |
-
model = "openai/gpt-4.1-mini" # Unofficial model name
|
| 16 |
|
| 17 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
| 18 |
|
| 19 |
try:
|
| 20 |
stream = await client.chat.completions.create(
|
| 21 |
messages=[
|
| 22 |
-
{"role": "system", "content": "You are a helpful assistant
|
| 23 |
{"role": "user", "content": prompt}
|
| 24 |
],
|
| 25 |
model=model,
|
|
@@ -37,14 +36,14 @@ async def generate_ai_response(prompt: str):
|
|
| 37 |
raise HTTPException(status_code=500, detail="AI generation failed")
|
| 38 |
|
| 39 |
@app.post("/generate")
|
| 40 |
-
async def generate_response(prompt: str):
|
| 41 |
if not prompt:
|
| 42 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
| 43 |
|
| 44 |
return StreamingResponse(
|
| 45 |
-
generate_ai_response(prompt),
|
| 46 |
media_type="text/event-stream"
|
| 47 |
)
|
| 48 |
|
| 49 |
def get_app():
|
| 50 |
-
return app
|
|
|
|
| 1 |
import os
|
| 2 |
+
from fastapi import FastAPI, HTTPException, Query
|
| 3 |
from fastapi.responses import StreamingResponse
|
| 4 |
from openai import AsyncOpenAI
|
| 5 |
|
| 6 |
app = FastAPI()
|
| 7 |
|
| 8 |
+
async def generate_ai_response(prompt: str, model: str = "openai/gpt-4.1-mini"):
|
| 9 |
# Configuration for unofficial GitHub AI endpoint
|
| 10 |
token = os.getenv("GITHUB_TOKEN")
|
| 11 |
if not token:
|
| 12 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
| 13 |
|
| 14 |
endpoint = "https://models.github.ai/inference"
|
|
|
|
| 15 |
|
| 16 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
| 17 |
|
| 18 |
try:
|
| 19 |
stream = await client.chat.completions.create(
|
| 20 |
messages=[
|
| 21 |
+
{"role": "system", "content": "You are a helpful assistant named Orion and made by Abdullah Ali"},
|
| 22 |
{"role": "user", "content": prompt}
|
| 23 |
],
|
| 24 |
model=model,
|
|
|
|
| 36 |
raise HTTPException(status_code=500, detail="AI generation failed")
|
| 37 |
|
| 38 |
@app.post("/generate")
|
| 39 |
+
async def generate_response(prompt: str = Query(...), model: str = Query("openai/gpt-4.1-mini")):
|
| 40 |
if not prompt:
|
| 41 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
| 42 |
|
| 43 |
return StreamingResponse(
|
| 44 |
+
generate_ai_response(prompt, model),
|
| 45 |
media_type="text/event-stream"
|
| 46 |
)
|
| 47 |
|
| 48 |
def get_app():
|
| 49 |
+
return app
|