Dmitry Beresnev commited on
Commit ·
4bd052e
1
Parent(s): d9a7411
change timeout
Browse files- app/pages/05_Dashboard.py +16 -8
- app/utils/ai_summary_cache.py +22 -0
- app/utils/llm_summarizer.py +1 -1
app/pages/05_Dashboard.py
CHANGED
|
@@ -876,17 +876,25 @@ st.markdown("---")
|
|
| 876 |
@st.fragment(run_every=60)
|
| 877 |
def render_ai_summary_section():
|
| 878 |
summaries, last_update = ai_summary_cache.get_summaries()
|
|
|
|
| 879 |
last_update_text = last_update.strftime("%Y-%m-%d %H:%M:%S") if last_update else "N/A"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 880 |
st.markdown("## 🤖 AI Summary")
|
| 881 |
st.markdown(
|
| 882 |
f"""
|
| 883 |
-
<div style=
|
| 884 |
-
<div style=
|
| 885 |
-
<div style=
|
| 886 |
{ai_summarized} / {total_items} items summarized
|
| 887 |
-
<span style=
|
| 888 |
</div>
|
| 889 |
-
<div style=
|
|
|
|
|
|
|
| 890 |
</div>
|
| 891 |
""",
|
| 892 |
unsafe_allow_html=True,
|
|
@@ -899,9 +907,9 @@ def render_ai_summary_section():
|
|
| 899 |
title = item.get("title", "")
|
| 900 |
st.markdown(
|
| 901 |
f"""
|
| 902 |
-
<div style=
|
| 903 |
-
<div style=
|
| 904 |
-
<div style=
|
| 905 |
</div>
|
| 906 |
""",
|
| 907 |
unsafe_allow_html=True,
|
|
|
|
| 876 |
@st.fragment(run_every=60)
|
| 877 |
def render_ai_summary_section():
|
| 878 |
summaries, last_update = ai_summary_cache.get_summaries()
|
| 879 |
+
status = ai_summary_cache.get_status()
|
| 880 |
last_update_text = last_update.strftime("%Y-%m-%d %H:%M:%S") if last_update else "N/A"
|
| 881 |
+
buffer_remaining = status.get("buffer_remaining_seconds")
|
| 882 |
+
buffer_text = "N/A"
|
| 883 |
+
if buffer_remaining is not None:
|
| 884 |
+
buffer_text = f"{int(buffer_remaining)}s"
|
| 885 |
+
|
| 886 |
st.markdown("## 🤖 AI Summary")
|
| 887 |
st.markdown(
|
| 888 |
f"""
|
| 889 |
+
<div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 20px; margin-bottom: 12px;">
|
| 890 |
+
<div style="color: #E0E3EB; font-size: 16px; font-weight: 600; margin-bottom: 6px;">Current AI Summarizations</div>
|
| 891 |
+
<div style="color: #D1D4DC; font-size: 14px; line-height: 1.6;">
|
| 892 |
{ai_summarized} / {total_items} items summarized
|
| 893 |
+
<span style="color: #787B86; font-size: 12px; margin-left: 8px;">({ai_summary_pct:.1f}% coverage)</span>
|
| 894 |
</div>
|
| 895 |
+
<div style="color: #787B86; font-size: 12px; margin-top: 6px;">Last update: {last_update_text}</div>
|
| 896 |
+
<div style="color: #787B86; font-size: 12px;">Buffer: {status.get("buffer_size", 0)} items, next flush in {buffer_text}</div>
|
| 897 |
+
<div style="color: #787B86; font-size: 12px;">Cache: {status.get("total_summaries", 0)} summaries, batch max ~{status.get("batch_max_chars", 0)} chars</div>
|
| 898 |
</div>
|
| 899 |
""",
|
| 900 |
unsafe_allow_html=True,
|
|
|
|
| 907 |
title = item.get("title", "")
|
| 908 |
st.markdown(
|
| 909 |
f"""
|
| 910 |
+
<div style="background: #131722; border: 1px solid #2A2E39; border-radius: 6px; padding: 10px; margin-bottom: 8px;">
|
| 911 |
+
<div style="color: #E0E3EB; font-size: 13px; font-weight: 600;">{source} — {title}</div>
|
| 912 |
+
<div style="color: #D1D4DC; font-size: 13px; margin-top: 4px;">{summary}</div>
|
| 913 |
</div>
|
| 914 |
""",
|
| 915 |
unsafe_allow_html=True,
|
app/utils/ai_summary_cache.py
CHANGED
|
@@ -76,6 +76,28 @@ class AISummaryCache:
|
|
| 76 |
summaries.sort(key=lambda x: x.get("timestamp", datetime.min), reverse=True)
|
| 77 |
return summaries, last_update
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
def _item_key(self, item: Dict) -> str:
|
| 80 |
if item.get("id") is not None:
|
| 81 |
return str(item.get("id"))
|
|
|
|
| 76 |
summaries.sort(key=lambda x: x.get("timestamp", datetime.min), reverse=True)
|
| 77 |
return summaries, last_update
|
| 78 |
|
| 79 |
+
def get_status(self) -> Dict:
|
| 80 |
+
with self._lock:
|
| 81 |
+
buffer_size = len(self._buffer)
|
| 82 |
+
buffer_start = self._buffer_start
|
| 83 |
+
total_summaries = len(self._summaries)
|
| 84 |
+
last_update = self._last_update
|
| 85 |
+
buffer_age_seconds = None
|
| 86 |
+
buffer_remaining_seconds = None
|
| 87 |
+
if buffer_start:
|
| 88 |
+
buffer_age_seconds = (datetime.now() - buffer_start).total_seconds()
|
| 89 |
+
buffer_remaining_seconds = max(BUFFER_SECONDS - buffer_age_seconds, 0)
|
| 90 |
+
return {
|
| 91 |
+
"buffer_size": buffer_size,
|
| 92 |
+
"buffer_started_at": buffer_start,
|
| 93 |
+
"buffer_age_seconds": buffer_age_seconds,
|
| 94 |
+
"buffer_remaining_seconds": buffer_remaining_seconds,
|
| 95 |
+
"buffer_window_seconds": BUFFER_SECONDS,
|
| 96 |
+
"total_summaries": total_summaries,
|
| 97 |
+
"last_update": last_update,
|
| 98 |
+
"batch_max_chars": DEFAULT_BATCH_MAX_CHARS,
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
def _item_key(self, item: Dict) -> str:
|
| 102 |
if item.get("id") is not None:
|
| 103 |
return str(item.get("id"))
|
app/utils/llm_summarizer.py
CHANGED
|
@@ -29,7 +29,7 @@ class OpenAICompatSummarizer:
|
|
| 29 |
self.api_base = (api_base or os.getenv("LLM_API_BASE") or "https://researchengineering-agi.hf.space").rstrip("/")
|
| 30 |
self.api_key = api_key if api_key is not None else os.getenv("LLM_API_KEY", "")
|
| 31 |
self.model = model or os.getenv("LLM_MODEL", "gpt-4o-mini")
|
| 32 |
-
self.timeout = timeout or int(os.getenv("LLM_TIMEOUT", "
|
| 33 |
# Conservative defaults to avoid large token bursts on slow servers.
|
| 34 |
self.max_items_per_request = max_items_per_request or int(os.getenv("LLM_SUMMARY_BATCH", "2"))
|
| 35 |
self.max_chars_per_item = max_chars_per_item or int(os.getenv("LLM_SUMMARY_MAX_CHARS", "600"))
|
|
|
|
| 29 |
self.api_base = (api_base or os.getenv("LLM_API_BASE") or "https://researchengineering-agi.hf.space").rstrip("/")
|
| 30 |
self.api_key = api_key if api_key is not None else os.getenv("LLM_API_KEY", "")
|
| 31 |
self.model = model or os.getenv("LLM_MODEL", "gpt-4o-mini")
|
| 32 |
+
self.timeout = timeout or int(os.getenv("LLM_TIMEOUT", "600"))
|
| 33 |
# Conservative defaults to avoid large token bursts on slow servers.
|
| 34 |
self.max_items_per_request = max_items_per_request or int(os.getenv("LLM_SUMMARY_BATCH", "2"))
|
| 35 |
self.max_chars_per_item = max_chars_per_item or int(os.getenv("LLM_SUMMARY_MAX_CHARS", "600"))
|