compendious commited on
Commit
6df9ff0
·
1 Parent(s): e35e5e3

Got the basics of the product/API working. Just need to fine-tune the model

Browse files
backend/app.py CHANGED
@@ -1,18 +1,38 @@
1
- """FastAPI backend for Précis."""
2
 
 
 
 
 
 
3
  from fastapi.middleware.cors import CORSMiddleware
4
- from fastapi import FastAPI, HTTPException, UploadFile, File, Form
5
  from fastapi.responses import HTMLResponse
6
  from pydantic import BaseModel
7
- from typing import Optional
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  app = FastAPI(
10
  title="Précis API",
11
- description="Content summarization API",
12
- version="0.1.0"
13
  )
14
 
15
- # Add CORS middleware
16
  app.add_middleware(
17
  CORSMiddleware,
18
  allow_origins=["*"],
@@ -22,21 +42,85 @@ app.add_middleware(
22
  )
23
 
24
 
 
 
 
25
 
26
  class YouTubeRequest(BaseModel):
27
  url: str
28
  max_length: Optional[int] = 512
29
 
 
30
  class TranscriptRequest(BaseModel):
31
  text: str
 
32
  max_length: Optional[int] = 512
33
 
 
34
  class SummarizeResponse(BaseModel):
35
  summary: str
36
  success: bool
37
  source_type: str
 
 
38
 
 
 
 
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  @app.get("/", response_class=HTMLResponse)
42
  async def root():
@@ -50,17 +134,20 @@ async def root():
50
  body { font-family: system-ui; max-width: 800px; margin: 50px auto; padding: 20px; }
51
  h1 { color: #333; }
52
  code { background: #f4f4f4; padding: 2px 6px; border-radius: 3px; }
 
53
  </style>
54
  </head>
55
  <body>
56
  <h1>Précis API</h1>
57
- <p>Content summarization service</p>
58
  <h2>Endpoints</h2>
59
  <ul>
60
- <li><code>POST /summarize</code> - Summarize content from URL</li>
61
- <li><code>GET /health</code> - Health check</li>
62
- <li><code>GET /status</code> - Service status</li>
63
- <li><code>GET /docs</code> - API documentation</li>
 
 
64
  </ul>
65
  </body>
66
  </html>
@@ -75,53 +162,77 @@ async def health():
75
 
76
  @app.get("/status")
77
  async def status():
78
- """Service status endpoint."""
 
 
 
 
 
 
 
 
79
  return {
80
  "service": "Précis API",
81
- "version": "0.1.0",
82
- "model": "Qwen/Qwen2.5-7B-Instruct",
83
- "model_loaded": False, # TODO: Track actual model state
84
- "endpoints": ["/", "/health", "/status", "/summarize"]
 
85
  }
86
 
87
 
88
- @app.post("/summarize/youtube", response_model=SummarizeResponse)
89
- async def summarize_youtube(request: YouTubeRequest):
90
- """Summarize a YouTube video from its URL."""
91
- # TODO: Implement YT transcript extraction and summarization
92
- return SummarizeResponse(
93
- summary=f"Summary for YouTube video at {request.url}. (Placeholder)",
94
- success=True,
95
- source_type="youtube"
96
- )
97
-
98
  @app.post("/summarize/transcript", response_model=SummarizeResponse)
99
  async def summarize_transcript(request: TranscriptRequest):
100
- """Summarize a provided transcript or article text."""
101
- # TODO: Implement summarization
102
- return SummarizeResponse(
103
- summary=f"Summary for provided text ({len(request.text)} chars). (Placeholder)",
104
- success=True,
105
- source_type="transcript"
106
- )
 
 
107
 
108
  @app.post("/summarize/file", response_model=SummarizeResponse)
109
  async def summarize_file(file: UploadFile = File(...)):
110
- """Summarize content from a .txt file."""
111
  if not file.filename.endswith(".txt"):
112
  raise HTTPException(status_code=400, detail="Only .txt files are supported")
113
-
114
  content = await file.read()
115
  text = content.decode("utf-8")
116
-
117
- # TODO: Implement summarization
118
- return SummarizeResponse(
119
- summary=f"Summary for file {file.filename} ({len(text)} chars). (Placeholder)",
120
- success=True,
121
- source_type="file"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  )
123
 
124
 
 
 
 
125
 
126
  if __name__ == "__main__":
127
  import uvicorn
 
1
+ """FastAPI backend for Précis — powered by Ollama (phi4-mini:3.8b)."""
2
 
3
+ import logging
4
+ from typing import Optional
5
+
6
+ import httpx
7
+ from fastapi import FastAPI, HTTPException, UploadFile, File
8
  from fastapi.middleware.cors import CORSMiddleware
 
9
  from fastapi.responses import HTMLResponse
10
  from pydantic import BaseModel
11
+
12
+ # ---------------------------------------------------------------------------
13
+ # Config
14
+ # ---------------------------------------------------------------------------
15
+
16
+ OLLAMA_BASE_URL = "http://127.0.0.1:11434"
17
+ OLLAMA_COMPLETIONS_URL = f"{OLLAMA_BASE_URL}/v1/completions"
18
+ MODEL_NAME = "phi4-mini:3.8b"
19
+
20
+ # Tokens to generate for the summary — keep short for speed
21
+ MAX_SUMMARY_TOKENS = 120
22
+ TEMPERATURE = 0.2
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+ # ---------------------------------------------------------------------------
27
+ # App
28
+ # ---------------------------------------------------------------------------
29
 
30
  app = FastAPI(
31
  title="Précis API",
32
+ description="Content summarisation service powered by phi4-mini via Ollama",
33
+ version="0.2.0",
34
  )
35
 
 
36
  app.add_middleware(
37
  CORSMiddleware,
38
  allow_origins=["*"],
 
42
  )
43
 
44
 
45
+ # ---------------------------------------------------------------------------
46
+ # Schemas
47
+ # ---------------------------------------------------------------------------
48
 
49
  class YouTubeRequest(BaseModel):
50
  url: str
51
  max_length: Optional[int] = 512
52
 
53
+
54
  class TranscriptRequest(BaseModel):
55
  text: str
56
+ title: Optional[str] = None
57
  max_length: Optional[int] = 512
58
 
59
+
60
  class SummarizeResponse(BaseModel):
61
  summary: str
62
  success: bool
63
  source_type: str
64
+ model: str = MODEL_NAME
65
+
66
 
67
+ # ---------------------------------------------------------------------------
68
+ # Ollama helper
69
+ # ---------------------------------------------------------------------------
70
 
71
+ def _build_prompt(title: Optional[str], text: str) -> str:
72
+ header = f"Title: {title}\n" if title else ""
73
+ return (
74
+ "Summarise the following article in 2–4 clear, factual sentences. "
75
+ "Do not add opinions or commentary.\n\n"
76
+ f"{header}"
77
+ f"Article:\n{text}\n\n"
78
+ "Summary:"
79
+ )
80
+
81
+
82
+ async def call_ollama(prompt: str, max_tokens: int = MAX_SUMMARY_TOKENS) -> str:
83
+ """Send a prompt to the local Ollama completions endpoint and return the text."""
84
+ payload = {
85
+ "model": MODEL_NAME,
86
+ "prompt": prompt,
87
+ "max_tokens": max_tokens,
88
+ "temperature": TEMPERATURE,
89
+ "stop": ["\n\n", "Article:", "Title:"], # prevent runaway generation
90
+ }
91
+
92
+ async with httpx.AsyncClient(timeout=120.0) as client:
93
+ try:
94
+ resp = await client.post(OLLAMA_COMPLETIONS_URL, json=payload)
95
+ resp.raise_for_status()
96
+ except httpx.ConnectError:
97
+ raise HTTPException(
98
+ status_code=503,
99
+ detail=(
100
+ "Cannot reach Ollama at 127.0.0.1:11434. "
101
+ "Make sure `ollama serve` is running."
102
+ ),
103
+ )
104
+ except httpx.HTTPStatusError as exc:
105
+ raise HTTPException(
106
+ status_code=502,
107
+ detail=f"Ollama returned an error: {exc.response.text}",
108
+ )
109
+
110
+ data = resp.json()
111
+ try:
112
+ return data["choices"][0]["text"].strip()
113
+ except (KeyError, IndexError) as exc:
114
+ logger.error("Unexpected Ollama response: %s", data)
115
+ raise HTTPException(
116
+ status_code=502,
117
+ detail=f"Unexpected response shape from Ollama: {exc}",
118
+ )
119
+
120
+
121
+ # ---------------------------------------------------------------------------
122
+ # Routes
123
+ # ---------------------------------------------------------------------------
124
 
125
  @app.get("/", response_class=HTMLResponse)
126
  async def root():
 
134
  body { font-family: system-ui; max-width: 800px; margin: 50px auto; padding: 20px; }
135
  h1 { color: #333; }
136
  code { background: #f4f4f4; padding: 2px 6px; border-radius: 3px; }
137
+ .model { color: #6366f1; font-weight: bold; }
138
  </style>
139
  </head>
140
  <body>
141
  <h1>Précis API</h1>
142
+ <p>Model: <span class="model">phi4-mini:3.8b</span> via Ollama</p>
143
  <h2>Endpoints</h2>
144
  <ul>
145
+ <li><code>POST /summarize/transcript</code> Summarise raw text</li>
146
+ <li><code>POST /summarize/file</code> Summarise a .txt file</li>
147
+ <li><code>POST /summarize/youtube</code> Summarise a YouTube video (transcript required)</li>
148
+ <li><code>GET /health</code> Health check</li>
149
+ <li><code>GET /status</code> — Service status</li>
150
+ <li><code>GET /docs</code> — Interactive API docs</li>
151
  </ul>
152
  </body>
153
  </html>
 
162
 
163
  @app.get("/status")
164
  async def status():
165
+ """Service status — also pings Ollama to confirm it is reachable."""
166
+ ollama_ok = False
167
+ try:
168
+ async with httpx.AsyncClient(timeout=5.0) as client:
169
+ r = await client.get(f"{OLLAMA_BASE_URL}/api/tags")
170
+ ollama_ok = r.status_code == 200
171
+ except Exception:
172
+ pass
173
+
174
  return {
175
  "service": "Précis API",
176
+ "version": "0.2.0",
177
+ "model": MODEL_NAME,
178
+ "ollama_reachable": ollama_ok,
179
+ "endpoints": ["/", "/health", "/status", "/summarize/transcript",
180
+ "/summarize/file", "/summarize/youtube"],
181
  }
182
 
183
 
 
 
 
 
 
 
 
 
 
 
184
  @app.post("/summarize/transcript", response_model=SummarizeResponse)
185
  async def summarize_transcript(request: TranscriptRequest):
186
+ """Summarise a provided article or transcript."""
187
+ if not request.text.strip():
188
+ raise HTTPException(status_code=400, detail="text must not be empty")
189
+
190
+ prompt = _build_prompt(request.title, request.text)
191
+ summary = await call_ollama(prompt)
192
+
193
+ return SummarizeResponse(summary=summary, success=True, source_type="transcript")
194
+
195
 
196
  @app.post("/summarize/file", response_model=SummarizeResponse)
197
  async def summarize_file(file: UploadFile = File(...)):
198
+ """Summarise content from an uploaded .txt file."""
199
  if not file.filename.endswith(".txt"):
200
  raise HTTPException(status_code=400, detail="Only .txt files are supported")
201
+
202
  content = await file.read()
203
  text = content.decode("utf-8")
204
+
205
+ if not text.strip():
206
+ raise HTTPException(status_code=400, detail="Uploaded file is empty")
207
+
208
+ prompt = _build_prompt(file.filename, text)
209
+ summary = await call_ollama(prompt)
210
+
211
+ return SummarizeResponse(summary=summary, success=True, source_type="file")
212
+
213
+
214
+ @app.post("/summarize/youtube", response_model=SummarizeResponse)
215
+ async def summarize_youtube(request: YouTubeRequest):
216
+ """
217
+ Summarise a YouTube video.
218
+
219
+ NOTE: Automatic transcript fetching is not yet implemented.
220
+ Pass the transcript text in a separate /summarize/transcript call,
221
+ or extend this endpoint with youtube-transcript-api.
222
+ """
223
+ # Placeholder — returns a clear message rather than silently lying
224
+ raise HTTPException(
225
+ status_code=501,
226
+ detail=(
227
+ "Automatic YouTube transcript fetching is not yet implemented. "
228
+ "Extract the transcript yourself and POST it to /summarize/transcript."
229
+ ),
230
  )
231
 
232
 
233
+ # ---------------------------------------------------------------------------
234
+ # Entry point
235
+ # ---------------------------------------------------------------------------
236
 
237
  if __name__ == "__main__":
238
  import uvicorn
frontend/README.md CHANGED
@@ -21,23 +21,13 @@ A GitHub-inspired dark theme frontend for the Précis content summarization API.
21
  npm install
22
  ```
23
 
24
- Or with Bun:
25
-
26
- ```bash
27
- bun install
28
- ```
29
-
30
  ### 2. Start the Development Server
31
 
32
  ```bash
33
  npm run dev
34
  ```
35
 
36
- Or with Bun:
37
-
38
- ```bash
39
- bun run dev
40
- ```
41
 
42
  The frontend will be available at [http://localhost:5173](http://localhost:5173).
43
 
 
21
  npm install
22
  ```
23
 
 
 
 
 
 
 
24
  ### 2. Start the Development Server
25
 
26
  ```bash
27
  npm run dev
28
  ```
29
 
30
+ (If you prefer a different m)
 
 
 
 
31
 
32
  The frontend will be available at [http://localhost:5173](http://localhost:5173).
33
 
frontend/src/App.css CHANGED
@@ -262,4 +262,57 @@
262
 
263
  .footer a:hover {
264
  text-decoration: underline;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  }
 
262
 
263
  .footer a:hover {
264
  text-decoration: underline;
265
+ }
266
+
267
+ /* ── Inline result (transcript tab) ─────────────────────────────────────── */
268
+ .inline-result {
269
+ margin-top: var(--spacing-3);
270
+ padding: var(--spacing-3) var(--spacing-4);
271
+ border-radius: var(--radius-1);
272
+ font-size: 13px;
273
+ line-height: 1.6;
274
+ display: flex;
275
+ align-items: flex-start;
276
+ gap: var(--spacing-2);
277
+ border: 1px solid transparent;
278
+ }
279
+
280
+ .inline-result--success {
281
+ background-color: rgba(46, 160, 67, 0.08);
282
+ border-color: rgba(46, 160, 67, 0.3);
283
+ color: var(--color-fg-default);
284
+ flex-direction: column;
285
+ gap: var(--spacing-2);
286
+ }
287
+
288
+ .inline-result--error {
289
+ background-color: rgba(248, 81, 73, 0.08);
290
+ border-color: rgba(248, 81, 73, 0.3);
291
+ color: var(--color-danger-fg);
292
+ align-items: center;
293
+ }
294
+
295
+ .inline-result--loading {
296
+ background-color: var(--color-canvas-inset);
297
+ border-color: var(--color-border-muted);
298
+ color: var(--color-fg-muted);
299
+ align-items: center;
300
+ }
301
+
302
+ .inline-result__label {
303
+ display: flex;
304
+ align-items: center;
305
+ gap: var(--spacing-1);
306
+ font-size: 12px;
307
+ font-weight: 600;
308
+ color: rgba(46, 160, 67, 0.9);
309
+ width: 100%;
310
+ }
311
+
312
+ .inline-result__text {
313
+ font-size: 14px;
314
+ line-height: 1.7;
315
+ color: var(--color-fg-default);
316
+ white-space: pre-wrap;
317
+ margin: 0;
318
  }
frontend/src/App.jsx CHANGED
@@ -2,6 +2,8 @@ import { useState, useRef } from 'react'
2
  import './App.css'
3
 
4
  const API_BASE = 'http://localhost:8000'
 
 
5
 
6
  function App() {
7
  const [activeTab, setActiveTab] = useState('youtube')
@@ -13,6 +15,30 @@ function App() {
13
  const [error, setError] = useState(null)
14
  const fileInputRef = useRef(null)
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  const handleSubmit = async () => {
17
  setLoading(true)
18
  setError(null)
@@ -35,12 +61,8 @@ function App() {
35
  if (!transcript.trim()) {
36
  throw new Error('Please enter some text')
37
  }
38
- const res = await fetch(`${API_BASE}/summarize/transcript`, {
39
- method: 'POST',
40
- headers: { 'Content-Type': 'application/json' },
41
- body: JSON.stringify({ text: transcript })
42
- })
43
- result = await res.json()
44
  } else if (activeTab === 'file') {
45
  if (!selectedFile) {
46
  throw new Error('Please select a file')
@@ -164,6 +186,30 @@ function App() {
164
  />
165
  <p className="form-hint">Paste any text content you want to summarize.</p>
166
  </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  </div>
168
 
169
  {/* File Tab */}
@@ -248,8 +294,8 @@ function App() {
248
  </div>
249
  </div>
250
 
251
- {/* Error display */}
252
- {error && (
253
  <div className="response-section fade-in">
254
  <div className="response-card" style={{ borderColor: 'var(--color-danger-fg)' }}>
255
  <div className="response-header" style={{ borderColor: 'var(--color-danger-fg)' }}>
@@ -269,8 +315,8 @@ function App() {
269
  </div>
270
  )}
271
 
272
- {/* Response display */}
273
- {response && (
274
  <div className="response-section fade-in">
275
  <div className="response-card">
276
  <div className="response-header">
 
2
  import './App.css'
3
 
4
  const API_BASE = 'http://localhost:8000'
5
+ const OLLAMA_URL = 'http://127.0.0.1:11434/v1/completions'
6
+ const MODEL_NAME = 'phi4-mini:3.8b'
7
 
8
  function App() {
9
  const [activeTab, setActiveTab] = useState('youtube')
 
15
  const [error, setError] = useState(null)
16
  const fileInputRef = useRef(null)
17
 
18
+ const callOllama = async (text) => {
19
+ const prompt = `Summarise the following article in 2–4 clear, factual sentences. Do not add opinions or commentary.\n\nArticle:\n${text}\n\nSummary:`
20
+
21
+ const res = await fetch(OLLAMA_URL, {
22
+ method: 'POST',
23
+ headers: { 'Content-Type': 'application/json' },
24
+ body: JSON.stringify({
25
+ model: MODEL_NAME,
26
+ prompt,
27
+ max_tokens: 120,
28
+ temperature: 0.2,
29
+ stop: ['\n\n', 'Article:', 'Title:']
30
+ })
31
+ })
32
+
33
+ if (!res.ok) {
34
+ const body = await res.text()
35
+ throw new Error(`Ollama error (${res.status}): ${body}`)
36
+ }
37
+
38
+ const data = await res.json()
39
+ return data.choices[0].text.trim()
40
+ }
41
+
42
  const handleSubmit = async () => {
43
  setLoading(true)
44
  setError(null)
 
61
  if (!transcript.trim()) {
62
  throw new Error('Please enter some text')
63
  }
64
+ const summary = await callOllama(transcript)
65
+ result = { summary, success: true, source_type: 'transcript', model: MODEL_NAME }
 
 
 
 
66
  } else if (activeTab === 'file') {
67
  if (!selectedFile) {
68
  throw new Error('Please select a file')
 
186
  />
187
  <p className="form-hint">Paste any text content you want to summarize.</p>
188
  </div>
189
+
190
+ {/* Inline result — only shown when this tab triggered it */}
191
+ {activeTab === 'transcript' && error && (
192
+ <div className="inline-result inline-result--error fade-in">
193
+ <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2"><circle cx="12" cy="12" r="10" /><line x1="12" y1="8" x2="12" y2="12" /><line x1="12" y1="16" x2="12.01" y2="16" /></svg>
194
+ {error}
195
+ </div>
196
+ )}
197
+ {activeTab === 'transcript' && loading && (
198
+ <div className="inline-result inline-result--loading fade-in">
199
+ <span className="loading-spinner" style={{ width: 14, height: 14 }} />
200
+ Generating summary…
201
+ </div>
202
+ )}
203
+ {activeTab === 'transcript' && response && !loading && (
204
+ <div className="inline-result inline-result--success fade-in">
205
+ <div className="inline-result__label">
206
+ <svg width="13" height="13" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2"><polyline points="20 6 9 17 4 12" /></svg>
207
+ Summary
208
+ <span className="response-badge" style={{ marginLeft: 'auto' }}>{response.model ?? 'phi4-mini'}</span>
209
+ </div>
210
+ <p className="inline-result__text">{response.summary}</p>
211
+ </div>
212
+ )}
213
  </div>
214
 
215
  {/* File Tab */}
 
294
  </div>
295
  </div>
296
 
297
+ {/* Error display — for YouTube / File tabs only (transcript shows inline) */}
298
+ {error && activeTab !== 'transcript' && (
299
  <div className="response-section fade-in">
300
  <div className="response-card" style={{ borderColor: 'var(--color-danger-fg)' }}>
301
  <div className="response-header" style={{ borderColor: 'var(--color-danger-fg)' }}>
 
315
  </div>
316
  )}
317
 
318
+ {/* Response display — for YouTube / File tabs only (transcript shows inline) */}
319
+ {response && activeTab !== 'transcript' && (
320
  <div className="response-section fade-in">
321
  <div className="response-card">
322
  <div className="response-header">
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- # Core ML
2
  torch
3
  transformers
4
  accelerate
@@ -10,3 +10,5 @@ sentencepiece
10
  # API
11
  fastapi
12
  uvicorn
 
 
 
1
+ # Core ML (training pipeline — not needed to run the API)
2
  torch
3
  transformers
4
  accelerate
 
10
  # API
11
  fastapi
12
  uvicorn
13
+ httpx # async HTTP client for Ollama calls
14
+ python-multipart # required by FastAPI for file uploads