Spaces:
Configuration error
Configuration error
| """ | |
| AJ API - OpenAI-Compatible API Endpoint | |
| Use this for your developer portal and Vercel chatbot | |
| """ | |
| from fastapi import FastAPI, HTTPException | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import BaseModel | |
| from typing import List, Optional | |
| import uvicorn | |
| from huggingface_hub import InferenceClient | |
| app = FastAPI(title="AJ API", description="OpenAI-compatible API by AJ STUDIOZ") | |
| # Enable CORS for all origins | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Initialize client | |
| client = InferenceClient() | |
| # Brand configuration | |
| BRAND_NAME = "AJ" | |
| COMPANY = "AJ STUDIOZ" | |
| LOCATION = "Tamil Nadu, India" | |
| # OpenAI-compatible request models | |
| class Message(BaseModel): | |
| role: str | |
| content: str | |
| class ChatCompletionRequest(BaseModel): | |
| model: str = "aj-v1" | |
| messages: List[Message] | |
| temperature: Optional[float] = 0.7 | |
| max_tokens: Optional[int] = 500 | |
| stream: Optional[bool] = False | |
| class ChatCompletionResponse(BaseModel): | |
| id: str | |
| object: str = "chat.completion" | |
| created: int | |
| model: str | |
| choices: List[dict] | |
| usage: dict | |
| def read_root(): | |
| return { | |
| "name": BRAND_NAME, | |
| "company": COMPANY, | |
| "location": LOCATION, | |
| "version": "v1.0", | |
| "endpoints": { | |
| "chat": "/v1/chat/completions", | |
| "models": "/v1/models" | |
| } | |
| } | |
| def list_models(): | |
| """List available models - OpenAI compatible""" | |
| return { | |
| "object": "list", | |
| "data": [ | |
| { | |
| "id": "aj-v1", | |
| "object": "model", | |
| "created": 1699000000, | |
| "owned_by": COMPANY | |
| } | |
| ] | |
| } | |
| async def chat_completions(request: ChatCompletionRequest): | |
| """ | |
| OpenAI-compatible chat completions endpoint | |
| Use this in your Vercel chatbot and developer portal | |
| """ | |
| try: | |
| # Add system prompt with AJ branding | |
| system_prompt = { | |
| "role": "system", | |
| "content": f"""You are {BRAND_NAME}, an AI assistant created by {COMPANY} in {LOCATION}. | |
| When asked about your identity, always say "I am {BRAND_NAME}, developed by {COMPANY} in {LOCATION}." | |
| You are helpful, accurate, and behave like ChatGPT/Claude.""" | |
| } | |
| # Prepare messages | |
| messages = [system_prompt] + [{"role": m.role, "content": m.content} for m in request.messages] | |
| # Call Hugging Face | |
| response = client.chat_completion( | |
| messages=messages, | |
| model="meta-llama/Llama-3.2-3B-Instruct", | |
| max_tokens=request.max_tokens, | |
| temperature=request.temperature, | |
| stream=False | |
| ) | |
| # Return OpenAI-compatible response | |
| return { | |
| "id": "chatcmpl-" + str(hash(str(messages)))[-10:], | |
| "object": "chat.completion", | |
| "created": 1699000000, | |
| "model": request.model, | |
| "choices": [ | |
| { | |
| "index": 0, | |
| "message": { | |
| "role": "assistant", | |
| "content": response.choices[0].message.content | |
| }, | |
| "finish_reason": "stop" | |
| } | |
| ], | |
| "usage": { | |
| "prompt_tokens": 0, | |
| "completion_tokens": 0, | |
| "total_tokens": 0 | |
| } | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |