Juna190825 commited on
Commit
b0ce8a9
·
verified ·
1 Parent(s): 2cc5c01

Update api/endpoints.py

Browse files
Files changed (1) hide show
  1. api/endpoints.py +21 -56
api/endpoints.py CHANGED
@@ -1,13 +1,14 @@
1
- from fastapi import APIRouter, HTTPException, Query
2
  from typing import Optional
3
  import random
 
4
 
5
  from .openrouter import OpenRouterClient
6
  from .utils import get_openrouter_client
7
 
8
  router = APIRouter()
 
9
 
10
- # Available free models
11
  FREE_MODELS = [
12
  "openai/gpt-3.5-turbo",
13
  "anthropic/claude-instant-v1",
@@ -20,26 +21,30 @@ FREE_MODELS = [
20
  async def chat_with_model(
21
  message: str,
22
  model: Optional[str] = None,
23
- temperature: float = 0.7,
24
- max_tokens: int = 1024
25
  ):
26
  """
27
  Chat with a specified model or let the system choose one
28
-
29
- Parameters:
30
- - message: The user's input message
31
- - model: Optional model identifier (default: random)
32
- - temperature: Creativity parameter (0-2)
33
- - max_tokens: Maximum length of response
34
  """
 
 
 
 
 
 
35
  client = get_openrouter_client()
36
 
37
  if not model:
38
  model = random.choice(FREE_MODELS)
39
  elif model not in FREE_MODELS:
40
- raise HTTPException(status_code=400, detail="Model not available in free tier")
 
 
 
41
 
42
  try:
 
43
  response = client.chat_completion(
44
  model=model,
45
  messages=[{"role": "user", "content": message}],
@@ -52,48 +57,8 @@ async def chat_with_model(
52
  "usage": response.get("usage", {})
53
  }
54
  except Exception as e:
55
- raise HTTPException(status_code=500, detail=str(e))
56
-
57
- @router.get("/models")
58
- async def list_available_models():
59
- """
60
- List all available free models
61
- """
62
- return {
63
- "models": FREE_MODELS,
64
- "count": len(FREE_MODELS),
65
- "description": "These models are available in the free tier"
66
- }
67
-
68
- @router.post("/smart-chat")
69
- async def smart_chat_with_model(message: str):
70
- """
71
- Smart chat endpoint that selects the best model based on the input
72
-
73
- Parameters:
74
- - message: The user's input message
75
- """
76
- client = get_openrouter_client()
77
-
78
- # Model selection logic
79
- if any(word in message.lower() for word in ["code", "program", "python", "javascript"]):
80
- model = "openai/gpt-3.5-turbo"
81
- elif any(word in message.lower() for word in ["story", "creative", "write", "poem"]):
82
- model = "anthropic/claude-instant-v1"
83
- elif any(word in message.lower() for word in ["fact", "science", "history"]):
84
- model = "google/palm-2-chat-bison"
85
- else:
86
- model = random.choice(FREE_MODELS)
87
-
88
- try:
89
- response = client.chat_completion(
90
- model=model,
91
- messages=[{"role": "user", "content": message}]
92
- )
93
- return {
94
- "model": model,
95
- "response": response["choices"][0]["message"]["content"],
96
- "reason": f"Selected {model} based on input content"
97
- }
98
- except Exception as e:
99
- raise HTTPException(status_code=500, detail=str(e))
 
1
+ from fastapi import APIRouter, HTTPException, Query, status
2
  from typing import Optional
3
  import random
4
+ import logging
5
 
6
  from .openrouter import OpenRouterClient
7
  from .utils import get_openrouter_client
8
 
9
  router = APIRouter()
10
+ logger = logging.getLogger(__name__)
11
 
 
12
  FREE_MODELS = [
13
  "openai/gpt-3.5-turbo",
14
  "anthropic/claude-instant-v1",
 
21
  async def chat_with_model(
22
  message: str,
23
  model: Optional[str] = None,
24
+ temperature: float = Query(0.7, ge=0, le=2),
25
+ max_tokens: int = Query(1024, ge=1, le=4096)
26
  ):
27
  """
28
  Chat with a specified model or let the system choose one
 
 
 
 
 
 
29
  """
30
+ if not message.strip():
31
+ raise HTTPException(
32
+ status_code=status.HTTP_400_BAD_REQUEST,
33
+ detail="Message cannot be empty"
34
+ )
35
+
36
  client = get_openrouter_client()
37
 
38
  if not model:
39
  model = random.choice(FREE_MODELS)
40
  elif model not in FREE_MODELS:
41
+ raise HTTPException(
42
+ status_code=status.HTTP_400_BAD_REQUEST,
43
+ detail=f"Model not available in free tier. Available models: {', '.join(FREE_MODELS)}"
44
+ )
45
 
46
  try:
47
+ logger.info(f"Processing chat request with model: {model}")
48
  response = client.chat_completion(
49
  model=model,
50
  messages=[{"role": "user", "content": message}],
 
57
  "usage": response.get("usage", {})
58
  }
59
  except Exception as e:
60
+ logger.error(f"Error processing chat request: {str(e)}")
61
+ raise HTTPException(
62
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
63
+ detail="Error processing your request"
64
+ )