File size: 2,459 Bytes
4bdbba8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import os
import random
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from gradio import Blocks, ChatInterface, mount_gradio_app
import uvicorn

from api.endpoints import router as api_router
from api.openrouter import OpenRouterClient

# Initialize FastAPI
app = FastAPI(title="OpenRouter AI Hub", description="Access multiple AI models via OpenRouter")

# Mount API endpoints
app.include_router(api_router, prefix="/api/v1")

# Serve static files
app.mount("/static", StaticFiles(directory="static"), name="static")

# Initialize templates
templates = Jinja2Templates(directory="templates")

# Initialize OpenRouter client
openrouter_client = OpenRouterClient(
    api_key=os.getenv("OPENROUTER_API_KEY", "")
)

# Web interface with Gradio
def chat_with_model(message, history):
    # Select a model based on message or randomly
    if "code" in message.lower():
        model = "openai/gpt-3.5-turbo"  # Better for coding
    elif "creative" in message.lower():
        model = "anthropic/claude-2"  # Better for creative writing
    else:
        models = [
            "openai/gpt-3.5-turbo",
            "anthropic/claude-instant-v1",
            "google/palm-2-chat-bison",
            "meta-llama/llama-2-13b-chat",
            "mistralai/mistral-7b-instruct"
        ]
        model = random.choice(models)
    
    # Get response from OpenRouter
    response = openrouter_client.chat_completion(
        model=model,
        messages=[{"role": "user", "content": message}]
    )
    
    return response["choices"][0]["message"]["content"]

# Create Gradio interface
with Blocks() as demo:
    ChatInterface(
        chat_with_model,
        title="OpenRouter AI Chat",
        description="Chat with multiple AI models powered by OpenRouter",
        examples=[
            "Explain quantum computing in simple terms",
            "Write a Python script to calculate Fibonacci sequence",
            "Tell me a creative story about a robot learning to love"
        ],
        css=".gradio-container {max-width: 800px; margin: auto;}"
    )

# Mount Gradio app
app = mount_gradio_app(app, demo, path="/")

# Root endpoint
@app.get("/", response_class=HTMLResponse)
async def read_root():
    return templates.TemplateResponse("index.html", {"request": {}})

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7860)