Aasher commited on
Commit
a9af6e5
·
0 Parent(s):

Feat: Build a student homework assistant API

Browse files
Files changed (14) hide show
  1. .gitignore +11 -0
  2. .python-version +1 -0
  3. README.md +0 -0
  4. api/chat.py +59 -0
  5. main.py +38 -0
  6. models/chat.py +32 -0
  7. prompts.py +8 -0
  8. pyproject.toml +14 -0
  9. services/chat_service.py +137 -0
  10. static/app.js +239 -0
  11. static/index.html +47 -0
  12. static/styles.css +66 -0
  13. utils.py +66 -0
  14. uv.lock +0 -0
.gitignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
11
+ .env
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.11
README.md ADDED
File without changes
api/chat.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, HTTPException, Depends
2
+ from fastapi.responses import StreamingResponse
3
+ import json
4
+ from typing import AsyncGenerator
5
+
6
+ from models.chat import ChatRequest, ChatResponse, StreamResponse
7
+ from services.chat_service import ChatService
8
+
9
+ chat_router = APIRouter(prefix="/chat", tags=["chat"])
10
+
11
+ def get_chat_service():
12
+ return ChatService()
13
+
14
+ @chat_router.post("/", response_model=ChatResponse)
15
+ async def chat_completion(
16
+ request: ChatRequest,
17
+ chat_service: ChatService = Depends(get_chat_service)
18
+ ):
19
+ """
20
+ Process chat completion request
21
+ """
22
+ try:
23
+ response = await chat_service.process_chat(request)
24
+ return response
25
+ except Exception as e:
26
+ raise HTTPException(status_code=500, detail=str(e))
27
+
28
+ @chat_router.post("/stream")
29
+ async def chat_stream(
30
+ request: ChatRequest,
31
+ chat_service: ChatService = Depends(get_chat_service)
32
+ ):
33
+ """
34
+ Stream chat completion response
35
+ """
36
+ try:
37
+ async def generate_stream() -> AsyncGenerator[str, None]:
38
+ async for chunk in chat_service.stream_chat(request):
39
+ yield f"data: {json.dumps(chunk.model_dump())}\n\n"
40
+
41
+ # Send final message
42
+ final_chunk = StreamResponse(finished=True)
43
+ yield f"data: {json.dumps(final_chunk.model_dump())}\n\n"
44
+
45
+ return StreamingResponse(
46
+ generate_stream(),
47
+ media_type="text/plain",
48
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}
49
+ )
50
+ except Exception as e:
51
+ error_chunk = StreamResponse(error=str(e), finished=True)
52
+ async def error_stream():
53
+ yield f"data: {json.dumps(error_chunk.model_dump())}\n\n"
54
+
55
+ return StreamingResponse(
56
+ error_stream(),
57
+ media_type="text/plain",
58
+ status_code=500
59
+ )
main.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from fastapi.staticfiles import StaticFiles
3
+ from fastapi.responses import FileResponse
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from dotenv import load_dotenv
6
+
7
+ from api.chat import chat_router
8
+
9
+ load_dotenv()
10
+
11
+ app = FastAPI(
12
+ title="Chat Assistant API",
13
+ description="FastAPI backend for chat assistant with LiteLLM and Gemini",
14
+ version="1.0.0"
15
+ )
16
+
17
+ # CORS middleware for frontend integration
18
+ app.add_middleware(
19
+ CORSMiddleware,
20
+ allow_origins=["*"], # Configure this properly for production
21
+ allow_credentials=True,
22
+ allow_methods=["*"],
23
+ allow_headers=["*"],
24
+ )
25
+
26
+ # Include routers
27
+ app.include_router(chat_router, prefix="/api")
28
+
29
+ # Mount static files for the frontend
30
+ app.mount("/static", StaticFiles(directory="static"), name="static")
31
+
32
+ @app.get("/")
33
+ async def root():
34
+ return FileResponse("static/index.html")
35
+
36
+ @app.get("/health")
37
+ async def health_check():
38
+ return {"status": "healthy"}
models/chat.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from typing import List, Dict, Literal, Optional, Union
3
+
4
+ class MessageContent(BaseModel):
5
+ type: str = Field(..., description="Content type: 'text' or 'image_url'")
6
+ text: Optional[str] = Field(None, description="Text content")
7
+ image_url: Optional[Dict[str, str]] = Field(None, description="Image URL object with 'url' key")
8
+
9
+ class ChatMessage(BaseModel):
10
+ role: str = Field(..., description="Role of the message sender (system, user, assistant)")
11
+ content: Union[str, List[MessageContent]] = Field(..., description="Message content - string for simple text or list for mixed content")
12
+
13
+ class ChatSettings(BaseModel):
14
+ temperature: float = Field(1.0, ge=0.0, le=2.0)
15
+ top_p: float = Field(0.8, ge=0.0, le=1.0)
16
+ reasoning_effort: Literal["low", "medium", "high"] = "low"
17
+
18
+ class ChatRequest(BaseModel):
19
+ messages: List[ChatMessage] = Field(..., description="List of messages in the conversation")
20
+ settings: Optional[ChatSettings] = Field(default_factory=ChatSettings)
21
+ command: Optional[Literal["search", "url_context"]] = None
22
+
23
+ class ChatResponse(BaseModel):
24
+ content: str
25
+ reasoning_content: Optional[str] = None
26
+ error: Optional[str] = None
27
+
28
+ class StreamResponse(BaseModel):
29
+ content: Optional[str] = None
30
+ reasoning_content: Optional[str] = None
31
+ finished: bool = False
32
+ error: Optional[str] = None
prompts.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # prompts.py
2
+ # Replace this with your actual system prompt
3
+
4
+ SYSTEM_PROMPT = """You are a helpful AI assistant. You can analyze text and images, answer questions, and help with various tasks.
5
+
6
+ When provided with images, analyze them carefully and describe what you see. When asked to search for information, use the available tools to find current information.
7
+
8
+ Be helpful, accurate, and concise in your responses."""
pyproject.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "student-homework-assistant"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ "chainlit>=2.7.2",
9
+ "fastapi>=0.116.1",
10
+ "litellm>=1.76.2",
11
+ "pillow>=11.3.0",
12
+ "pydantic-settings>=2.10.1",
13
+ "uvicorn>=0.35.0",
14
+ ]
services/chat_service.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Dict, Any, Optional, AsyncGenerator
3
+ from litellm import acompletion
4
+
5
+ from models.chat import ChatRequest, ChatResponse, StreamResponse
6
+ from prompts import SYSTEM_PROMPT
7
+
8
+ class ChatService:
9
+ def __init__(self):
10
+ self.model = f"gemini/{os.getenv('MODEL', 'gemini-2.5-flash')}"
11
+ self.api_key = os.getenv("GOOGLE_API_KEY")
12
+
13
+ if not self.api_key:
14
+ raise ValueError("GOOGLE_API_KEY environment variable is required")
15
+
16
+ async def process_chat(self, request: ChatRequest) -> ChatResponse:
17
+ """Process a complete chat request and return the response"""
18
+ try:
19
+ # Prepare messages with system prompt
20
+ messages = self._prepare_messages(request)
21
+
22
+ # Prepare tools based on command
23
+ tools = self._prepare_tools(request.command)
24
+
25
+ # Make the API call
26
+ response = await acompletion(
27
+ model=self.model,
28
+ messages=messages,
29
+ temperature=request.settings.temperature,
30
+ top_p=request.settings.top_p,
31
+ reasoning_effort=request.settings.reasoning_effort,
32
+ api_key=self.api_key,
33
+ tools=tools,
34
+ stream=False
35
+ )
36
+
37
+ content = response.choices[0].message.content or ""
38
+ reasoning_content = getattr(response.choices[0].message, "reasoning_content", None)
39
+
40
+ return ChatResponse(
41
+ content=content,
42
+ reasoning_content=reasoning_content
43
+ )
44
+
45
+ except Exception as e:
46
+ return ChatResponse(
47
+ content="",
48
+ error=str(e)
49
+ )
50
+
51
+ async def stream_chat(self, request: ChatRequest) -> AsyncGenerator[StreamResponse, None]:
52
+ """Stream chat response"""
53
+ try:
54
+ # Prepare messages with system prompt
55
+ messages = self._prepare_messages(request)
56
+
57
+ # Prepare tools based on command
58
+ tools = self._prepare_tools(request.command)
59
+
60
+ # Make the streaming API call
61
+ response = await acompletion(
62
+ model=self.model,
63
+ messages=messages,
64
+ temperature=request.settings.temperature,
65
+ top_p=request.settings.top_p,
66
+ reasoning_effort=request.settings.reasoning_effort,
67
+ api_key=self.api_key,
68
+ tools=tools,
69
+ stream=True
70
+ )
71
+
72
+ async for part in response:
73
+ chunk_response = StreamResponse()
74
+
75
+ if hasattr(part.choices[0].delta, "reasoning_content"):
76
+ reasoning = part.choices[0].delta.reasoning_content or ""
77
+ if reasoning:
78
+ chunk_response.reasoning_content = reasoning
79
+
80
+ content = part.choices[0].delta.content or ""
81
+ if content:
82
+ chunk_response.content = content
83
+
84
+ if chunk_response.content or chunk_response.reasoning_content:
85
+ yield chunk_response
86
+
87
+ except Exception as e:
88
+ yield StreamResponse(error=str(e), finished=True)
89
+
90
+ def _prepare_messages(self, request: ChatRequest) -> List[Dict[str, Any]]:
91
+ """Prepare messages with system prompt"""
92
+ messages = []
93
+
94
+ # Add system message first
95
+ messages.append({
96
+ "role": "system",
97
+ "content": SYSTEM_PROMPT
98
+ })
99
+
100
+ # Add all messages from the request
101
+ for message in request.messages:
102
+ if message.role == "system":
103
+ continue # Skip additional system messages since we already added one
104
+
105
+ # Convert message to dict format expected by litellm
106
+ if isinstance(message.content, str):
107
+ # Simple text message
108
+ messages.append({
109
+ "role": message.role,
110
+ "content": message.content
111
+ })
112
+ else:
113
+ # Complex content with text and/or images
114
+ content_parts = []
115
+ for part in message.content:
116
+ if hasattr(part, 'dict'):
117
+ content_parts.append(part.dict())
118
+ elif isinstance(part, dict):
119
+ content_parts.append(part)
120
+
121
+ messages.append({
122
+ "role": message.role,
123
+ "content": content_parts
124
+ })
125
+
126
+ return messages
127
+
128
+ def _prepare_tools(self, command: str) -> List[Dict[str, Any]]:
129
+ """Prepare tools based on the command"""
130
+ tools = []
131
+
132
+ if command == "search":
133
+ tools = [{"googleSearch": {}}]
134
+ elif command == "url_context":
135
+ tools = [{"urlContext": {}}]
136
+
137
+ return tools
static/app.js ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const messagesEl = document.getElementById('messages');
2
+ const formEl = document.getElementById('chat-form');
3
+ const inputEl = document.getElementById('user-input');
4
+ const sendBtn = document.getElementById('send-btn');
5
+ const statusEl = document.getElementById('status');
6
+ const commandEl = document.getElementById('command');
7
+ const imageInput = document.getElementById('image-input');
8
+ const addImageBtn = document.getElementById('add-image-btn');
9
+ const clearImagesBtn = document.getElementById('clear-images-btn');
10
+ const imagePreviews = document.getElementById('image-previews');
11
+
12
+ // Conversation state
13
+ let conversation = [];
14
+ let pendingImages = []; // data URLs
15
+
16
+ function renderMessage(message, isStreaming = false) {
17
+ const wrapper = document.createElement('div');
18
+ wrapper.className = `message role-${message.role}`;
19
+
20
+ const avatar = document.createElement('div');
21
+ avatar.className = 'avatar';
22
+ avatar.textContent = message.role === 'user' ? 'U' : 'A';
23
+
24
+ const bubble = document.createElement('div');
25
+ bubble.className = 'bubble';
26
+ if (isStreaming) bubble.classList.add('assistant-streaming');
27
+ // Text
28
+ let text = '';
29
+ if (typeof message.content === 'string') {
30
+ text = message.content;
31
+ } else if (Array.isArray(message.content)) {
32
+ const textPart = message.content.find(p => p.type === 'text');
33
+ text = textPart?.text || '';
34
+ }
35
+ const textNode = document.createElement('div');
36
+ textNode.innerText = text;
37
+ bubble.appendChild(textNode);
38
+
39
+ // Images
40
+ if (Array.isArray(message.content)) {
41
+ const images = message.content.filter(p => p.type === 'image_url');
42
+ if (images.length) {
43
+ const imgWrap = document.createElement('div');
44
+ imgWrap.className = 'bubble-images';
45
+ for (const img of images) {
46
+ const el = document.createElement('img');
47
+ el.src = img.image_url?.url || '';
48
+ imgWrap.appendChild(el);
49
+ }
50
+ bubble.appendChild(imgWrap);
51
+ }
52
+ }
53
+
54
+ wrapper.appendChild(avatar);
55
+ wrapper.appendChild(bubble);
56
+ messagesEl.appendChild(wrapper);
57
+ messagesEl.scrollTop = messagesEl.scrollHeight;
58
+ return bubble; // return bubble for incremental updates
59
+ }
60
+
61
+ function setBusy(busy) {
62
+ sendBtn.disabled = busy;
63
+ statusEl.textContent = busy ? 'Thinking…' : 'Ready';
64
+ }
65
+
66
+ function buildRequestPayload(command) {
67
+ return {
68
+ messages: conversation,
69
+ settings: {
70
+ temperature: 1.0,
71
+ top_p: 0.8,
72
+ reasoning_effort: 'low'
73
+ },
74
+ command: command || null
75
+ };
76
+ }
77
+
78
+ async function sendChat(command) {
79
+ setBusy(true);
80
+ const userMsg = conversation[conversation.length - 1];
81
+ renderMessage(userMsg);
82
+ const assistantMsg = { role: 'assistant', content: '' };
83
+ const assistantBubble = renderMessage(assistantMsg, true);
84
+
85
+ try {
86
+ const res = await fetch('/api/chat/stream', {
87
+ method: 'POST',
88
+ headers: { 'Content-Type': 'application/json' },
89
+ body: JSON.stringify(buildRequestPayload(command))
90
+ });
91
+
92
+ if (!res.ok || !res.body) {
93
+ throw new Error(`Request failed: ${res.status}`);
94
+ }
95
+
96
+ const reader = res.body.getReader();
97
+ const decoder = new TextDecoder();
98
+ let assistantText = '';
99
+ while (true) {
100
+ const { value, done } = await reader.read();
101
+ if (done) break;
102
+ const chunk = decoder.decode(value, { stream: true });
103
+
104
+ // SSE-style lines: data: {json}\n\n
105
+ const lines = chunk.split('\n');
106
+ for (const line of lines) {
107
+ if (!line.startsWith('data:')) continue;
108
+ const jsonStr = line.replace('data: ', '');
109
+ if (!jsonStr) continue;
110
+ try {
111
+ const data = JSON.parse(jsonStr);
112
+ if (data.error) {
113
+ throw new Error(data.error);
114
+ }
115
+ if (data.content) {
116
+ assistantText += data.content;
117
+ assistantBubble.firstChild.innerText = assistantText;
118
+ }
119
+ if (data.reasoning_content) {
120
+ // Optionally, we could display reasoning in a muted style
121
+ }
122
+ if (data.finished) {
123
+ assistantBubble.classList.remove('assistant-streaming');
124
+ // Push final assistant message to history
125
+ conversation.push({ role: 'assistant', content: assistantText });
126
+ }
127
+ } catch (e) {
128
+ // non-JSON line, ignore
129
+ }
130
+ }
131
+ }
132
+ } catch (err) {
133
+ assistantBubble.classList.remove('assistant-streaming');
134
+ assistantBubble.firstChild.innerText = `Error: ${err.message}`;
135
+ assistantBubble.classList.add('error');
136
+ } finally {
137
+ setBusy(false);
138
+ try { localStorage.setItem('sha_conversation', JSON.stringify(conversation)); } catch {}
139
+ }
140
+ }
141
+
142
+ formEl.addEventListener('submit', (e) => {
143
+ e.preventDefault();
144
+ const text = inputEl.value.trim();
145
+ const command = commandEl.value;
146
+ if (!text && pendingImages.length === 0) return;
147
+
148
+ const contentParts = [];
149
+ if (text) contentParts.push({ type: 'text', text });
150
+ for (const url of pendingImages) {
151
+ contentParts.push({ type: 'image_url', image_url: { url } });
152
+ }
153
+
154
+ const userMsg = contentParts.length === 1 && text
155
+ ? { role: 'user', content: text }
156
+ : { role: 'user', content: contentParts };
157
+
158
+ conversation.push(userMsg);
159
+
160
+ inputEl.value = '';
161
+ clearPendingImages();
162
+ sendChat(command);
163
+ });
164
+
165
+ // Auto-resize textarea
166
+ inputEl.addEventListener('input', () => {
167
+ inputEl.style.height = 'auto';
168
+ inputEl.style.height = Math.min(inputEl.scrollHeight, 120) + 'px';
169
+ });
170
+
171
+ // Focus input on load and restore session
172
+ window.addEventListener('load', () => {
173
+ inputEl.focus();
174
+ try {
175
+ const saved = localStorage.getItem('sha_conversation');
176
+ if (saved) {
177
+ conversation = JSON.parse(saved);
178
+ messagesEl.innerHTML = '';
179
+ for (const msg of conversation) {
180
+ renderMessage(msg);
181
+ }
182
+ }
183
+ } catch {}
184
+ });
185
+
186
+ // Image handling
187
+ addImageBtn.addEventListener('click', () => imageInput.click());
188
+ imageInput.addEventListener('change', async (e) => {
189
+ const files = Array.from(e.target.files || []);
190
+ for (const file of files) {
191
+ const dataUrl = await fileToDataUrl(file);
192
+ pendingImages.push(dataUrl);
193
+ }
194
+ renderPendingImages();
195
+ imageInput.value = '';
196
+ });
197
+
198
+ clearImagesBtn.addEventListener('click', () => {
199
+ pendingImages = [];
200
+ renderPendingImages();
201
+ });
202
+
203
+ function fileToDataUrl(file) {
204
+ return new Promise((resolve, reject) => {
205
+ const reader = new FileReader();
206
+ reader.onload = () => resolve(reader.result);
207
+ reader.onerror = reject;
208
+ reader.readAsDataURL(file);
209
+ });
210
+ }
211
+
212
+ function renderPendingImages() {
213
+ imagePreviews.innerHTML = '';
214
+ pendingImages.forEach((url, idx) => {
215
+ const wrap = document.createElement('div');
216
+ wrap.className = 'image-preview';
217
+ const img = document.createElement('img');
218
+ img.src = url;
219
+ const btn = document.createElement('button');
220
+ btn.className = 'remove';
221
+ btn.type = 'button';
222
+ btn.textContent = '×';
223
+ btn.addEventListener('click', () => {
224
+ pendingImages.splice(idx, 1);
225
+ renderPendingImages();
226
+ });
227
+ wrap.appendChild(img);
228
+ wrap.appendChild(btn);
229
+ imagePreviews.appendChild(wrap);
230
+ });
231
+ }
232
+
233
+ function clearPendingImages() {
234
+ pendingImages = [];
235
+ renderPendingImages();
236
+ }
237
+
238
+
239
+
static/index.html ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Student Homework Assistant</title>
7
+ <link rel="stylesheet" href="/static/styles.css">
8
+ <link rel="preconnect" href="https://fonts.googleapis.com">
9
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
10
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap" rel="stylesheet">
11
+ </head>
12
+ <body>
13
+ <div class="app">
14
+ <header class="app-header">
15
+ <div class="brand">Student Homework Assistant</div>
16
+ <div class="status" id="status">Ready</div>
17
+ </header>
18
+
19
+ <main class="chat-container">
20
+ <div id="messages" class="messages" aria-live="polite"></div>
21
+
22
+ <form id="chat-form" class="chat-input" autocomplete="off">
23
+ <select id="command" class="command-select" title="Command">
24
+ <option value="">Chat</option>
25
+ <option value="search">Search</option>
26
+ <option value="url_context">URL Context</option>
27
+ </select>
28
+ <div class="input-area">
29
+ <div id="image-previews" class="image-previews" aria-live="polite"></div>
30
+ <textarea id="user-input" placeholder="Ask a question..." rows="1"></textarea>
31
+ </div>
32
+ <div class="actions">
33
+ <input id="image-input" type="file" accept="image/*" hidden>
34
+ <button id="add-image-btn" type="button" class="icon-btn" title="Add image" aria-label="Add image">📷</button>
35
+ <button id="clear-images-btn" type="button" class="icon-btn" title="Clear images" aria-label="Clear images">✖️</button>
36
+ <button id="send-btn" type="submit" class="send-btn">Send</button>
37
+ </div>
38
+ </form>
39
+ </main>
40
+ </div>
41
+
42
+ <script src="/static/app.js"></script>
43
+ </body>
44
+ <!-- More content could be added here (settings, history) -->
45
+ </html>
46
+
47
+
static/styles.css ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :root {
2
+ --bg: #0b1220;
3
+ --panel: #121a2b;
4
+ --muted: #93a1ba;
5
+ --text: #e6ebf5;
6
+ --acc: #5b8cff;
7
+ --acc-2: #21d4a3;
8
+ --danger: #ff6b6b;
9
+ --border: #223154;
10
+ }
11
+
12
+ * { box-sizing: border-box; }
13
+ html, body { height: 100%; }
14
+ body {
15
+ margin: 0;
16
+ font-family: Inter, system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif;
17
+ color: var(--text);
18
+ background: radial-gradient(1200px 800px at 80% -10%, #1a2a4a55, transparent 70%),
19
+ radial-gradient(1200px 800px at -10% 120%, #163a3a55, transparent 70%),
20
+ var(--bg);
21
+ }
22
+
23
+ .app { max-width: 920px; margin: 0 auto; padding: 16px; height: 100%; display: flex; flex-direction: column; }
24
+ .app-header { display: flex; justify-content: space-between; align-items: center; padding: 12px 16px; border: 1px solid var(--border); background: linear-gradient(180deg, #17233b 0%, #101827 100%); border-radius: 14px; box-shadow: 0 10px 30px rgba(0,0,0,0.25) }
25
+ .brand { font-weight: 700; letter-spacing: 0.2px; }
26
+ .status { color: var(--muted); font-size: 13px; }
27
+
28
+ .chat-container { flex: 1; display: flex; flex-direction: column; gap: 12px; margin-top: 14px; }
29
+ .messages { flex: 1; overflow-y: auto; padding: 12px; border: 1px solid var(--border); background: #0e1627aa; border-radius: 14px; scroll-behavior: smooth; }
30
+
31
+ .message { display: grid; grid-template-columns: 36px 1fr; gap: 10px; padding: 10px 8px; border-bottom: 1px dashed #23324f; }
32
+ .message:last-child { border-bottom: none; }
33
+ .avatar { width: 36px; height: 36px; border-radius: 10px; align-self: start; background: #152341; display:flex; align-items:center; justify-content:center; color:#9db4ff; font-weight:700; }
34
+ .role-user .avatar{ background:#182b4d; color:#bfe1ff }
35
+ .role-assistant .avatar{ background:#193b2f; color:#7ff0c4 }
36
+ .bubble { padding: 10px 12px; border-radius: 12px; background: #13203b; border: 1px solid #23324f; }
37
+ .role-user .bubble { background: #162343; }
38
+ .role-assistant .bubble { background: #132f2a; border-color: #1f4b40; }
39
+
40
+ .chat-input { display: grid; grid-template-columns: 140px 1fr auto; gap: 10px; padding: 12px; border: 1px solid var(--border); background: #0e1627aa; border-radius: 14px; align-items: end; }
41
+ .command-select { height: 40px; border-radius: 10px; border: 1px solid var(--border); background: #101a30; color: var(--text); padding: 0 10px; }
42
+ textarea { width: 100%; resize: none; max-height: 120px; min-height: 40px; padding: 10px 12px; border-radius: 10px; border: 1px solid var(--border); background: #101a30; color: var(--text); outline: none; }
43
+ .send-btn { height: 40px; padding: 0 16px; border-radius: 10px; border: 1px solid #3a57a8; background: linear-gradient(180deg, #496eea 0%, #3754bd 100%); color: white; font-weight: 600; }
44
+ .send-btn:disabled{ opacity: .7; cursor: not-allowed; }
45
+
46
+ .assistant-streaming { background: repeating-linear-gradient(90deg, #132f2a, #132f2a 10px, #13392f 10px, #13392f 20px); animation: barber 1.2s linear infinite; }
47
+ @keyframes barber { from { background-position: 0 0; } to { background-position: 40px 0; } }
48
+
49
+ .error { color: var(--danger); }
50
+
51
+ /* Input area */
52
+ .input-area { display: flex; flex-direction: column; gap: 8px; }
53
+ .actions { display: flex; align-items: center; gap: 8px; }
54
+ .icon-btn { height: 40px; padding: 0 12px; border-radius: 10px; border: 1px solid var(--border); background: #0f1a33; color: var(--text); }
55
+
56
+ /* Image previews */
57
+ .image-previews { display: flex; flex-wrap: wrap; gap: 8px; }
58
+ .image-preview { position: relative; width: 56px; height: 56px; border-radius: 10px; overflow: hidden; border: 1px solid var(--border); background: #111a30; }
59
+ .image-preview img { width: 100%; height: 100%; object-fit: cover; display: block; }
60
+ .image-preview .remove { position: absolute; top: 2px; right: 2px; background: rgba(0,0,0,0.6); color: white; border: none; border-radius: 6px; font-size: 12px; padding: 2px 4px; cursor: pointer; }
61
+
62
+ /* Images inside bubbles */
63
+ .bubble-images { display: flex; flex-wrap: wrap; gap: 8px; margin-top: 8px; }
64
+ .bubble-images img { max-width: 180px; max-height: 180px; border-radius: 8px; border: 1px solid #23324f; }
65
+
66
+
utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ from PIL import Image
4
+ from typing import Optional, Dict, Any
5
+
6
+ def validate_base64_image(base64_string: str) -> bool:
7
+ """
8
+ Validate if a string is a valid base64 encoded image
9
+ """
10
+ try:
11
+ if base64_string.startswith('data:'):
12
+ # Extract base64 part from data URL
13
+ base64_part = base64_string.split(',', 1)[1] if ',' in base64_string else base64_string
14
+ else:
15
+ base64_part = base64_string
16
+
17
+ # Try to decode and open as image
18
+ image_data = base64.b64decode(base64_part)
19
+ image = Image.open(io.BytesIO(image_data))
20
+ image.verify() # Verify it's a valid image
21
+ return True
22
+
23
+ except Exception:
24
+ return False
25
+
26
+ def get_image_mime_type(base64_string: str) -> str:
27
+ """
28
+ Detect MIME type of base64 encoded image
29
+ """
30
+ try:
31
+ if base64_string.startswith('data:'):
32
+ # Extract MIME type from data URL
33
+ if ';' in base64_string:
34
+ return base64_string.split(';')[0].replace('data:', '')
35
+
36
+ # Try to detect from image data
37
+ if base64_string.startswith('data:'):
38
+ base64_part = base64_string.split(',', 1)[1] if ',' in base64_string else base64_string
39
+ else:
40
+ base64_part = base64_string
41
+
42
+ image_data = base64.b64decode(base64_part)
43
+ image = Image.open(io.BytesIO(image_data))
44
+
45
+ format_mapping = {
46
+ 'JPEG': 'image/jpeg',
47
+ 'PNG': 'image/png',
48
+ 'GIF': 'image/gif',
49
+ 'WEBP': 'image/webp'
50
+ }
51
+
52
+ return format_mapping.get(image.format, 'image/jpeg')
53
+
54
+ except Exception:
55
+ return 'image/jpeg' # Default fallback
56
+
57
+ def ensure_data_url_format(image_data: str) -> str:
58
+ """
59
+ Ensure image data is in proper data URL format
60
+ """
61
+ if image_data.startswith('data:'):
62
+ return image_data
63
+
64
+ # Detect MIME type and add data URL prefix
65
+ mime_type = get_image_mime_type(image_data)
66
+ return f"data:{mime_type};base64,{image_data}"
uv.lock ADDED
The diff for this file is too large to render. See raw diff