file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
modules/text-injector.js | JavaScript | /**
* Text injection utilities for inserting text into provider textareas
* Handles both standard textarea elements and contenteditable elements
*/
/**
* Find a text input element by selector
* @param {string} selector - CSS selector for the element
* @returns {HTMLElement|null} - The found element or null
*/
export function findTextInputElement(selector) {
if (!selector || typeof selector !== 'string') {
return null;
}
try {
return document.querySelector(selector);
} catch (error) {
console.error('Error finding element:', error);
return null;
}
}
/**
* Inject text into an element (textarea or contenteditable)
* @param {HTMLElement} element - The target element
* @param {string} text - The text to inject
* @returns {boolean} - True if injection was successful
*/
export function injectTextIntoElement(element, text) {
if (!element || !text || typeof text !== 'string' || text.trim() === '') {
return false;
}
try {
const isTextarea = element.tagName === 'TEXTAREA' || element.tagName === 'INPUT';
const isContentEditable = element.isContentEditable || element.getAttribute('contenteditable') === 'true';
if (!isTextarea && !isContentEditable) {
console.warn('Element is not a textarea or contenteditable:', element);
return false;
}
if (isTextarea) {
// For textarea/input elements
const currentValue = element.value || '';
element.value = currentValue + text;
// Trigger input event to notify React/Vue/etc
element.dispatchEvent(new Event('input', { bubbles: true }));
// Focus and move cursor to end
element.focus();
element.selectionStart = element.selectionEnd = element.value.length;
} else {
// For contenteditable elements
const currentText = element.textContent || '';
element.textContent = currentText + text;
// Trigger input event
element.dispatchEvent(new Event('input', { bubbles: true }));
// Focus and move cursor to end
element.focus();
// Move cursor to end for contenteditable
const range = document.createRange();
const selection = window.getSelection();
range.selectNodeContents(element);
range.collapse(false); // Collapse to end
selection.removeAllRanges();
selection.addRange(range);
}
return true;
} catch (error) {
console.error('Error injecting text:', error);
return false;
}
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/theme-manager.js | JavaScript | import { getSettings } from './settings.js';
export function detectTheme() {
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
return prefersDark ? 'dark' : 'light';
}
export async function applyTheme() {
const settings = await getSettings();
let theme;
if (settings.theme === 'auto') {
theme = detectTheme();
} else {
theme = settings.theme; // 'light' or 'dark'
}
document.documentElement.setAttribute('data-theme', theme);
return theme;
}
// Listen for system theme changes
if (window.matchMedia) {
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', async (e) => {
const settings = await getSettings();
if (settings.theme === 'auto') {
applyTheme();
}
});
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/url-validator.js | JavaScript | /**
* Validates and sanitizes URLs for security
*/
/**
* Checks if a URL string is valid and safe to use in iframes
* @param {string} urlString - The URL to validate
* @returns {boolean} - True if URL is valid and safe
*/
export function isValidUrl(urlString) {
if (!urlString || typeof urlString !== 'string') {
return false;
}
try {
const url = new URL(urlString);
// Only allow http and https protocols (prevent javascript:, data:, file:, etc.)
if (url.protocol !== 'http:' && url.protocol !== 'https:') {
return false;
}
// Ensure hostname is present
if (!url.hostname) {
return false;
}
return true;
} catch (error) {
// Invalid URL format
return false;
}
}
/**
* Sanitizes a URL for safe use
* @param {string} urlString - The URL to sanitize
* @returns {string|null} - Sanitized URL or null if invalid
*/
export function sanitizeUrl(urlString) {
if (!isValidUrl(urlString)) {
return null;
}
try {
const url = new URL(urlString.trim());
// Reconstruct URL to ensure it's clean
return url.href;
} catch {
return null;
}
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/version-checker.js | JavaScript | // T073: Version Check Module
// Checks for updates by comparing commit hash with GitHub
import { t } from './i18n.js';
const VERSION_INFO_PATH = '/data/version-info.json';
/**
* Load bundled version info
* @returns {Promise<Object>} Version info object {version, commitHash, buildDate}
*/
export async function loadVersionInfo() {
try {
const response = await fetch(chrome.runtime.getURL(VERSION_INFO_PATH));
if (!response.ok) {
throw new Error('Failed to load version info');
}
return await response.json();
} catch (error) {
console.error('Error loading version info:', error);
return null;
}
}
/**
* Fetch latest commit from GitHub via background service worker
* @returns {Promise<Object|null>} Latest commit info {sha, date, message} or null on error
*/
export async function fetchLatestCommit() {
try {
const response = await chrome.runtime.sendMessage({
action: 'fetchLatestCommit'
});
if (response && response.success) {
return response.data;
} else {
console.error('Error fetching latest commit:', response?.error || 'Unknown error');
return null;
}
} catch (error) {
console.error('Error fetching latest commit:', error);
return null;
}
}
/**
* Check if an update is available
* @returns {Promise<Object>} Update status {updateAvailable, currentHash, latestHash, latestDate, error}
*/
export async function checkForUpdates() {
const versionInfo = await loadVersionInfo();
if (!versionInfo) {
return {
updateAvailable: false,
error: t('errVersionInfoFailed')
};
}
const latestCommit = await fetchLatestCommit();
if (!latestCommit) {
return {
updateAvailable: false,
currentHash: versionInfo.commitHash,
error: t('errGitHubFetchFailed')
};
}
const updateAvailable = versionInfo.commitHash !== latestCommit.shortSha;
return {
updateAvailable,
currentVersion: versionInfo.version,
currentHash: versionInfo.commitHash,
currentBuildDate: versionInfo.buildDate,
latestHash: latestCommit.shortSha,
latestDate: latestCommit.date,
latestMessage: latestCommit.message,
error: null
};
}
/**
* Get the download URL for the latest version
* @returns {string} GitHub zip download URL
*/
export function getDownloadUrl() {
return 'https://github.com/xiaolai/insidebar-ai/archive/refs/heads/main.zip';
}
/**
* Get the GitHub repository URL
* @returns {string} GitHub repository URL
*/
export function getRepositoryUrl() {
return 'https://github.com/xiaolai/insidebar-ai';
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
options/options.css | CSS | /* T050-T064: Settings Page Styles */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
/* Material Symbols Configuration */
.material-symbols-outlined {
font-family: 'Material Symbols Outlined';
font-weight: normal;
font-style: normal;
font-size: 24px;
line-height: 1;
letter-spacing: normal;
text-transform: none;
display: inline-block;
white-space: nowrap;
word-wrap: normal;
direction: ltr;
-webkit-font-feature-settings: 'liga';
-webkit-font-smoothing: antialiased;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
background: #f5f5f5;
color: #333;
padding: 40px 20px;
min-height: 100vh;
}
.container {
max-width: 800px;
margin: 0 auto;
background: white;
border-radius: 8px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
overflow: hidden;
}
header {
background: linear-gradient(135deg, #1a73e8 0%, #1557b0 100%);
color: white;
padding: 32px;
}
header h1 {
font-size: 28px;
font-weight: 600;
margin-bottom: 8px;
}
header p {
font-size: 14px;
opacity: 0.9;
}
.content {
padding: 32px;
}
.section {
margin-bottom: 32px;
}
.section:last-child {
margin-bottom: 0;
}
.section-title {
font-size: 18px;
font-weight: 600;
color: #333;
margin-bottom: 16px;
padding-bottom: 8px;
border-bottom: 2px solid #e0e0e0;
}
.setting-item {
display: flex;
justify-content: space-between;
align-items: center;
padding: 16px 0;
border-bottom: 1px solid #f5f5f5;
}
.setting-item:last-child {
border-bottom: none;
}
.setting-info {
flex: 1;
}
.setting-label {
font-size: 14px;
font-weight: 500;
color: #333;
margin-bottom: 4px;
}
.setting-description {
font-size: 12px;
color: #666;
line-height: 1.4;
}
.switch {
position: relative;
display: inline-block;
width: 48px;
height: 24px;
}
.switch input {
opacity: 0;
width: 0;
height: 0;
}
.switch .slider {
position: absolute;
cursor: pointer;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: #ccc;
transition: background-color 0.2s ease;
border-radius: 24px;
}
.switch .slider::before {
position: absolute;
content: "";
height: 18px;
width: 18px;
left: 3px;
bottom: 3px;
background-color: #fff;
transition: transform 0.2s ease;
border-radius: 50%;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.2);
}
.switch input:checked + .slider {
background-color: #1a73e8;
}
.switch input:checked + .slider::before {
transform: translateX(24px);
}
.setting-control {
margin-left: 16px;
}
/* Legacy toggle-switch for backwards compatibility */
.toggle-switch {
position: relative;
width: 48px;
height: 24px;
background: #ccc;
border-radius: 12px;
cursor: pointer;
transition: background 0.3s;
}
.toggle-switch.active {
background: #1a73e8;
}
.toggle-switch::after {
content: '';
position: absolute;
top: 2px;
left: 2px;
width: 20px;
height: 20px;
background: white;
border-radius: 50%;
transition: left 0.3s;
}
.toggle-switch.active::after {
left: 26px;
}
/* Select Dropdown */
select {
padding: 8px 12px;
border: 1px solid #e0e0e0;
border-radius: 4px;
font-size: 14px;
background: white;
cursor: pointer;
min-width: 150px;
}
select:focus {
outline: none;
border-color: #1a73e8;
}
/* Buttons */
.btn {
padding: 10px 20px;
border: none;
border-radius: 4px;
font-size: 14px;
font-weight: 500;
cursor: pointer;
transition: all 0.2s;
}
.btn:disabled {
cursor: not-allowed;
opacity: 0.6;
}
.btn-primary {
background: #1a73e8;
color: white;
}
.btn-primary:hover {
background: #1557b0;
}
.btn-secondary {
background: #f5f5f5;
color: #666;
}
.btn-secondary:hover {
background: #e0e0e0;
}
.btn-danger {
background: #d32f2f;
color: white;
}
.btn-danger:hover {
background: #b71c1c;
}
.btn-group {
display: flex;
gap: 12px;
}
/* Provider List */
.provider-list {
display: flex;
flex-direction: column;
gap: 8px;
}
.provider-item {
display: flex;
align-items: center;
justify-content: space-between;
padding: 12px;
background: #f5f5f5;
border-radius: 4px;
}
.provider-info {
display: flex;
align-items: center;
gap: 12px;
flex: 1;
}
.provider-icon {
width: 32px;
height: 32px;
border-radius: 4px;
background: white;
display: flex;
align-items: center;
justify-content: center;
}
.provider-name {
font-size: 14px;
font-weight: 500;
color: #333;
}
/* Data Management Section */
.data-stats {
background: #f9f9f9;
padding: 16px;
border-radius: 4px;
margin-bottom: 16px;
}
.data-stat-item {
display: flex;
justify-content: space-between;
align-items: center;
padding: 8px 0;
font-size: 14px;
}
.data-stat-label {
color: #666;
}
.data-stat-value {
font-weight: 600;
color: #333;
}
/* Danger Zone - ensure all buttons have same width */
.section .btn-danger {
min-width: 80px;
}
/* Status Messages */
.status-message {
padding: 12px 16px;
border-radius: 4px;
margin-bottom: 16px;
font-size: 14px;
display: none;
}
.status-message.show {
display: block;
}
.status-success {
background: #e8f5e9;
color: #2e7d32;
border: 1px solid #81c784;
}
.status-error {
background: #ffebee;
color: #c62828;
border: 1px solid #ef9a9a;
}
.status-info {
background: #e3f2fd;
color: #1565c0;
border: 1px solid #90caf9;
}
/* Keyboard Shortcuts */
.shortcuts-list {
display: flex;
flex-direction: column;
gap: 12px;
}
.shortcut-item {
display: flex;
justify-content: space-between;
align-items: center;
padding: 12px;
background: #f9f9f9;
border-radius: 4px;
}
.shortcut-label {
font-size: 14px;
color: #333;
}
.shortcut-keys {
display: flex;
gap: 4px;
}
.key {
padding: 4px 8px;
background: white;
border: 1px solid #e0e0e0;
border-radius: 3px;
font-size: 12px;
font-family: monospace;
color: #666;
}
/* About Section */
.about-info {
text-align: center;
padding: 24px;
}
.about-info img {
width: 80px;
height: 80px;
margin-bottom: 16px;
}
.version-display {
margin-bottom: 16px;
}
.version {
font-size: 14px;
font-weight: 600;
color: #333;
margin-bottom: 4px;
}
.commit-hash {
font-size: 11px;
color: #999;
font-family: monospace;
}
.version-actions {
display: flex;
justify-content: center;
margin-bottom: 12px;
}
.version-download {
text-align: center;
margin-bottom: 16px;
}
.version-download a {
color: #1a73e8;
text-decoration: none;
font-size: 13px;
}
.version-download a:hover {
text-decoration: underline;
}
.update-status {
padding: 12px;
border-radius: 4px;
margin-bottom: 16px;
font-size: 13px;
line-height: 1.6;
text-align: center;
}
.update-status.update-available {
background: #e3f2fd;
border: 1px solid #90caf9;
color: #1565c0;
}
.update-status.update-current {
background: #e8f5e9;
border: 1px solid #81c784;
color: #2e7d32;
}
.update-status.update-error {
background: #ffebee;
border: 1px solid #ef9a9a;
color: #c62828;
}
.about-links {
display: flex;
justify-content: center;
gap: 16px;
margin-top: 16px;
}
.about-links a {
color: #1a73e8;
text-decoration: none;
font-size: 14px;
}
.about-links a:hover {
text-decoration: underline;
}
/* Dark Theme - both system preference and explicit theme setting */
@media (prefers-color-scheme: dark),
[data-theme="dark"] body {
background: #1e1e1e;
color: #e0e0e0;
}
[data-theme="dark"] body {
background: #1e1e1e;
color: #e0e0e0;
}
@media (prefers-color-scheme: dark),
[data-theme="dark"] {
.container {
background: #2d2d2d;
}
.section-title {
color: #e0e0e0;
border-bottom-color: #444;
}
.setting-label {
color: #e0e0e0;
}
.setting-description {
color: #aaa;
}
.setting-item {
border-bottom-color: #3a3a3a;
}
select {
background: #1e1e1e;
border-color: #444;
color: #e0e0e0;
}
.provider-item {
background: #3a3a3a;
}
.provider-icon {
background: #2d2d2d;
}
.provider-name {
color: #e0e0e0;
}
.data-stats {
background: #3a3a3a;
}
.data-stat-label {
color: #aaa;
}
.data-stat-value {
color: #e0e0e0;
}
.shortcut-item {
background: #3a3a3a;
}
.shortcut-label {
color: #e0e0e0;
}
.key {
background: #2d2d2d;
border-color: #444;
color: #aaa;
}
.btn-secondary {
background: #3a3a3a;
color: #aaa;
}
.btn-secondary:hover {
background: #444;
}
.version {
color: #e0e0e0;
}
.commit-hash {
color: #aaa;
}
.version-download a {
color: #90caf9;
}
.update-status.update-available {
background: #1a237e;
border-color: #3f51b5;
color: #90caf9;
}
.update-status.update-current {
background: #1b5e20;
border-color: #388e3c;
color: #81c784;
}
.update-status.update-error {
background: #b71c1c;
border-color: #d32f2f;
color: #ef9a9a;
}
}
/* T071: Library Import Cards */
.library-import-card {
border: 2px solid #e0e0e0;
border-radius: 6px;
padding: 20px;
margin-bottom: 16px;
transition: all 0.2s;
background: #f9f9f9;
}
.library-import-card:hover {
border-color: #1a73e8;
box-shadow: 0 4px 8px rgba(26, 115, 232, 0.1);
background: white;
}
.library-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 8px;
}
.library-header h3 {
font-size: 16px;
font-weight: 600;
margin: 0;
color: #333;
}
.library-count {
font-size: 12px;
color: #666;
background: white;
padding: 4px 10px;
border-radius: 12px;
border: 1px solid #e0e0e0;
}
.library-description {
font-size: 13px;
color: #666;
margin-bottom: 16px;
line-height: 1.5;
}
.library-import-card button {
width: 100%;
}
.library-import-card .btn-primary {
background: #1a73e8;
font-weight: 600;
}
.library-import-card .btn-secondary:disabled {
background: #4caf50;
color: white;
cursor: default;
}
@media (prefers-color-scheme: dark),
[data-theme="dark"] {
.library-import-card {
background: #3a3a3a;
border-color: #444;
}
.library-import-card:hover {
border-color: #1a73e8;
background: #2d2d2d;
}
.library-header h3 {
color: #e0e0e0;
}
.library-count {
background: #2d2d2d;
border-color: #555;
color: #aaa;
}
.library-description {
color: #aaa;
}
}
/* Enter Key Behavior Settings */
#enter-behavior-settings {
margin-top: 12px;
padding-left: 0;
}
.key-mapping-card {
border: 1px solid #e0e0e0;
border-radius: 6px;
padding: 16px;
margin-bottom: 12px;
background: #f9f9f9;
}
.key-mapping-card h3 {
font-size: 14px;
font-weight: 600;
color: #333;
margin: 0 0 12px 0;
}
.key-mapping-controls {
display: flex;
align-items: center;
gap: 16px;
flex-wrap: wrap;
}
.key-mapping-controls label {
display: flex;
align-items: center;
gap: 6px;
font-size: 13px;
color: #666;
cursor: pointer;
}
.key-mapping-controls input[type="checkbox"] {
cursor: pointer;
}
@media (prefers-color-scheme: dark),
[data-theme="dark"] {
.key-mapping-card {
background: #2d2d2d;
border-color: #444;
}
.key-mapping-card h3 {
color: #e0e0e0;
}
.key-mapping-controls label {
color: #aaa;
}
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
options/options.html | HTML | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title data-i18n="optionsPageTitle">insidebar.ai - Settings</title>
<link href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined:opsz,wght,FILL,GRAD@20..48,100..700,0..1,-50..200&display=block" rel="stylesheet">
<link rel="stylesheet" href="options.css">
</head>
<body>
<div class="container">
<header>
<h1 data-i18n="headerSettingsTitle">Settings: insidebar.ai</h1>
<p data-i18n="headerSettingsSubtitle">Customize your multi-AI sidebar experience</p>
</header>
<div class="content">
<!-- Status Messages -->
<div id="status-success" class="status-message status-success"></div>
<div id="status-error" class="status-message status-error"></div>
<!-- T051: Theme Settings -->
<section class="section">
<h2 class="section-title" data-i18n="sectionAppearance">Appearance</h2>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelTheme">Theme</div>
<div class="setting-description" data-i18n="descTheme">Choose your preferred color scheme</div>
</div>
<div class="setting-control">
<select id="theme-select">
<option value="auto" data-i18n="themeAuto">Auto (System)</option>
<option value="light" data-i18n="themeLight">Light</option>
<option value="dark" data-i18n="themeDark">Dark</option>
</select>
</div>
</div>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelLanguage">Language</div>
<div class="setting-description" data-i18n="descLanguage">Choose your preferred interface language</div>
</div>
<div class="setting-control">
<select id="language-select">
<option value="en" data-i18n="langEnglish">English</option>
<option value="zh_CN" data-i18n="langSimplifiedChinese">简体中文 (Simplified Chinese)</option>
<option value="zh_TW" data-i18n="langTraditionalChinese">繁體中文 (Traditional Chinese)</option>
<option value="ko" data-i18n="langKorean">한국어 (Korean)</option>
<option value="ja" data-i18n="langJapanese">日本語 (Japanese)</option>
<option value="es" data-i18n="langSpanish">Español (Spanish)</option>
<option value="fr" data-i18n="langFrench">Français (French)</option>
<option value="de" data-i18n="langGerman">Deutsch (German)</option>
<option value="it" data-i18n="langItalian">Italiano (Italian)</option>
<option value="ru" data-i18n="langRussian">Русский (Russian)</option>
</select>
</div>
</div>
</section>
<!-- T052-T053: Provider Settings -->
<section class="section">
<h2 class="section-title" data-i18n="sectionProviders">AI Providers</h2>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelDefaultProvider">Default Provider</div>
<div class="setting-description" data-i18n="descDefaultProvider">AI provider to load when opening the sidebar</div>
</div>
<div class="setting-control">
<select id="default-provider-select">
<!-- Options dynamically populated based on enabled providers -->
</select>
</div>
</div>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelRememberLastProvider">Remember Last Provider</div>
<div class="setting-description" data-i18n="descRememberLastProvider">When enabled, sidebar opens with the last provider you used. When disabled, always opens with default provider.</div>
</div>
<div class="setting-control">
<label class="switch">
<input type="checkbox" id="remember-last-provider-toggle" checked />
<span class="slider"></span>
</label>
</div>
</div>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelEnabledProviders">Enabled Providers</div>
<div class="setting-description" data-i18n="descEnabledProviders">Select which AI providers appear in your sidebar</div>
</div>
</div>
<div class="provider-list" id="provider-list">
<!-- Provider items will be dynamically generated -->
</div>
</section>
<!-- Keyboard Settings -->
<section class="section">
<h2 class="section-title" data-i18n="sectionKeyboard">Keyboard</h2>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelEnableKeyboardShortcut">Enable Keyboard Shortcut</div>
<div class="setting-description" data-i18n="descEnableKeyboardShortcut">
Toggle the global shortcut that opens insidebar.ai from anywhere in the browser.
</div>
</div>
<div class="setting-control">
<label class="switch">
<input type="checkbox" id="keyboard-shortcut-toggle" checked />
<span class="slider"></span>
</label>
</div>
</div>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelDefaultShortcuts">Default Shortcuts</div>
<div class="setting-description" data-i18n-html="descDefaultShortcuts">
Command+Shift+E (Ctrl+Shift+E on Windows/Linux) opens the sidebar with default provider.<br>
Command+Shift+P (Ctrl+Shift+P on Windows/Linux) opens Prompt Library.<br>
You can customize these in your browser's shortcut settings.
</div>
</div>
<div class="setting-control">
<button class="btn btn-secondary" id="open-shortcuts-btn" data-i18n="btnOpenShortcuts">Open Shortcuts</button>
</div>
</div>
<div class="setting-item" id="edge-shortcut-helper" style="display: none;">
<div class="setting-info">
<div class="setting-label" data-i18n="labelEdgeShortcutHelper">Confirm in Microsoft Edge</div>
<div class="setting-description" data-i18n="descEdgeShortcutHelper">
Edge requires you to confirm the shortcut once. Click configure to jump straight to the Edge shortcuts page.
</div>
</div>
<div class="setting-control">
<button class="btn btn-secondary" id="open-edge-shortcuts-btn" data-i18n="btnConfigureEdge">Configure in Edge</button>
</div>
</div>
<!-- Enter Key Behavior -->
<p style="font-size: 14px; color: #666; margin-top: 24px; margin-bottom: 8px;" data-i18n="descEnterBehavior1">
Customize how Enter and modifier keys work when typing in AI provider input fields.
</p>
<p style="font-size: 13px; color: #888; margin-bottom: 16px;" data-i18n-html="descEnterBehavior2">
<strong>Applies to:</strong> ChatGPT, Claude, Gemini, Google (AI Mode), Grok, DeepSeek, and Perplexity.
</p>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelEnableEnterCustom">Enable Enter Key Customization</div>
<div class="setting-description" data-i18n="descEnableEnterCustom">
When enabled, modifies Enter key behavior across all supported AI providers
</div>
</div>
<div class="setting-control">
<label class="switch">
<input type="checkbox" id="enter-behavior-toggle" checked />
<span class="slider"></span>
</label>
</div>
</div>
<div id="enter-behavior-settings">
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelPreset">Preset</div>
<div class="setting-description" data-i18n="descPreset">Choose a common key combination pattern</div>
</div>
<div class="setting-control">
<select id="enter-preset-select">
<option value="default" data-i18n="presetDefault">Default (Enter=Send, Shift+Enter=Newline)</option>
<option value="swapped" selected data-i18n="presetSwapped">Swapped (Enter=Newline, Shift+Enter=Send)</option>
<option value="slack" data-i18n="presetSlack">Slack-style (Enter=Send, Ctrl+Enter=Newline)</option>
<option value="discord" data-i18n="presetDiscord">Discord-style (Enter=Newline, Ctrl+Enter=Send)</option>
<option value="custom" data-i18n="presetCustom">Custom</option>
</select>
</div>
</div>
<div id="custom-enter-settings" style="display: none;">
<div class="key-mapping-card">
<h3 data-i18n="labelInsertNewline">Insert Newline</h3>
<div class="key-mapping-controls">
<label>
<input type="checkbox" id="newline-shift" /> <span data-i18n="keyShift">Shift</span>
</label>
<label>
<input type="checkbox" id="newline-ctrl" /> <span data-i18n="keyCtrl">Ctrl</span>
</label>
<label>
<input type="checkbox" id="newline-alt" /> <span data-i18n="keyAlt">Alt</span>
</label>
<label>
<input type="checkbox" id="newline-meta" /> <span data-i18n="keyMeta">⌘/Win</span>
</label>
<span style="margin-left: auto;" data-i18n="keyPlusEnter">+ Enter</span>
</div>
</div>
<div class="key-mapping-card">
<h3 data-i18n="labelSendMessage">Send Message</h3>
<div class="key-mapping-controls">
<label>
<input type="checkbox" id="send-shift" /> <span data-i18n="keyShift">Shift</span>
</label>
<label>
<input type="checkbox" id="send-ctrl" /> <span data-i18n="keyCtrl">Ctrl</span>
</label>
<label>
<input type="checkbox" id="send-alt" /> <span data-i18n="keyAlt">Alt</span>
</label>
<label>
<input type="checkbox" id="send-meta" /> <span data-i18n="keyMeta">⌘/Win</span>
</label>
<span style="margin-left: auto;" data-i18n="keyPlusEnter">+ Enter</span>
</div>
</div>
</div>
</div>
</section>
<!-- Chat History Settings -->
<section class="section">
<h2 class="section-title" data-i18n="sectionChatHistory">Chat History</h2>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelAutoOpenSidebar">Auto-open Sidebar After Save</div>
<div class="setting-description" data-i18n="descAutoOpenSidebar">
Automatically open the sidebar and switch to chat history when you save a conversation
</div>
</div>
<div class="setting-control">
<label class="switch">
<input type="checkbox" id="auto-open-sidebar-toggle" />
<span class="slider"></span>
</label>
</div>
</div>
</section>
<!-- Prompt Library -->
<section class="section">
<h2 class="section-title" data-i18n="sectionPromptLibrary">Prompt Library</h2>
<p style="font-size: 14px; color: #666; margin-bottom: 16px;" data-i18n="descPromptLibrary">
Import curated meta-prompts for AI workflows, research, coding, and analysis
</p>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelAutoPaste">Auto-paste Clipboard to Prompt Library</div>
<div class="setting-description" data-i18n="descAutoPaste">
Automatically paste clipboard content to workspace when opening Prompt Library with Command+Shift+P
</div>
</div>
<div class="setting-control">
<label class="switch">
<input type="checkbox" id="auto-paste-toggle" />
<span class="slider"></span>
</label>
</div>
</div>
<div class="library-import-card">
<div class="library-header">
<h3 data-i18n="labelDefaultPromptLibrary">Default Prompt Library</h3>
</div>
<p class="library-description" data-i18n="descDefaultPromptLibrary">
Meta-prompts for prompt refinement, planning, coding, testing, research, writing, and more.
Includes variables support for dynamic templates.
</p>
<div class="btn-group">
<button class="btn btn-primary" id="import-default-library" data-i18n="btnImportDefault">Import Default Prompts</button>
<button class="btn btn-secondary" id="import-custom-library" data-i18n="btnImportCustom">Import Custom Prompts</button>
</div>
<p style="font-size: 12px; color: #888; margin-top: 12px; font-style: italic;" data-i18n-html="descCustomPromptReadme">
Before importing custom prompts, please read: <code style="background: #f5f5f5; padding: 2px 6px; border-radius: 3px;">data/prompt-libraries/Generate_a_Basic_Prompt_Library.md</code>
</p>
<input type="file" id="import-custom-library-file" accept=".json" style="display: none;" />
</div>
</section>
<!-- Content Sending Settings -->
<section class="section">
<h2 class="section-title" data-i18n="sectionContentSending">Content Sending</h2>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelSourceUrlPlacement">Source URL Placement</div>
<div class="setting-description" data-i18n="descSourceUrlPlacement">
Choose where to place the source URL when sending selected text or page content to AI providers
</div>
</div>
<div class="setting-control">
<select id="source-url-placement-select">
<option value="end" data-i18n="urlPlacementEnd">At end (after content)</option>
<option value="beginning" data-i18n="urlPlacementBeginning">At beginning (before content)</option>
<option value="none" data-i18n="urlPlacementNone">Don't include URL</option>
</select>
</div>
</div>
</section>
<!-- T056-T064: Data Management -->
<section class="section">
<h2 class="section-title" data-i18n="sectionDataManagement">Data Management</h2>
<div class="data-stats" id="data-stats">
<div class="data-stat-item">
<span class="data-stat-label" data-i18n="statChatHistory">Chat History</span>
<span class="data-stat-value" id="stat-conversations">0</span>
</div>
<div class="data-stat-item">
<span class="data-stat-label" data-i18n="statPrompts">Prompts</span>
<span class="data-stat-value" id="stat-prompts">0</span>
</div>
<div class="data-stat-item">
<span class="data-stat-label" data-i18n="statStorageUsed">Storage Used</span>
<span class="data-stat-value" id="stat-storage">~0 KB</span>
</div>
</div>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelExportData">Export Data</div>
<div class="setting-description" data-i18n="descExportData">Download all prompts and settings as JSON</div>
</div>
<div class="setting-control">
<button class="btn btn-secondary" id="export-btn" data-i18n="btnExport">Export</button>
</div>
</div>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelImportData">Import Data</div>
<div class="setting-description" data-i18n="descImportData">Restore prompts and settings from JSON file</div>
</div>
<div class="setting-control">
<input type="file" id="import-file" accept=".json" style="display: none;" />
<button class="btn btn-secondary" id="import-btn" data-i18n="btnImport">Import</button>
</div>
</div>
</section>
<!-- Danger Zone -->
<section class="section">
<h2 class="section-title" style="color: #d32f2f;" data-i18n="sectionDangerZone">Danger Zone</h2>
<p style="font-size: 14px; color: #d32f2f; margin-bottom: 16px;" data-i18n="descDangerZone">
These actions cannot be undone. Use with caution.
</p>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelClearHistory">Clear Chat History</div>
<div class="setting-description" data-i18n="descClearHistory">Delete all saved conversations</div>
</div>
<div class="setting-control">
<button class="btn btn-danger" id="clear-conversations-btn" data-i18n="btnClear">Clear</button>
</div>
</div>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelClearPrompts">Clear Prompts</div>
<div class="setting-description" data-i18n="descClearPrompts">Delete all prompts from your library</div>
</div>
<div class="setting-control">
<button class="btn btn-danger" id="clear-prompts-btn" data-i18n="btnClear">Clear</button>
</div>
</div>
<div class="setting-item">
<div class="setting-info">
<div class="setting-label" data-i18n="labelResetSettings">Reset Settings</div>
<div class="setting-description" data-i18n="descResetSettings">Reset all settings to defaults</div>
</div>
<div class="setting-control">
<button class="btn btn-danger" id="reset-settings-btn" data-i18n="btnReset">Reset</button>
</div>
</div>
</section>
<!-- About Section -->
<section class="section">
<h2 class="section-title" data-i18n="sectionAbout">About</h2>
<div class="about-info">
<div class="version-display">
<div class="version" id="version">Version 1.0.0</div>
<div class="commit-hash" id="commit-hash" data-i18n="labelLoading">Loading...</div>
</div>
<div class="version-actions">
<button class="btn btn-secondary" id="check-updates-btn" data-i18n="btnCheckUpdates">Check for Updates</button>
</div>
<div id="update-status" class="update-status" style="display: none;"></div>
<div class="version-download">
<a href="https://github.com/xiaolai/insidebar-ai/archive/refs/heads/main.zip"
id="download-latest-link"
target="_blank"
rel="noopener noreferrer"
data-i18n="linkDownloadLatest">Download Latest Version</a>
</div>
<p style="font-size: 14px; color: #666; margin-bottom: 8px; margin-top: 16px;" data-i18n="descAbout1">
insidebar.ai provides quick access to multiple AI assistants in one convenient sidebar.
</p>
<p style="font-size: 12px; color: #999;" data-i18n="descAbout2">
Built with privacy-first architecture. All data stored locally.
</p>
</div>
</section>
</div>
</div>
<script type="module" src="options.js"></script>
</body>
</html>
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
options/options.js | JavaScript | // T050-T064: Settings Page Implementation
import { PROVIDERS } from '../modules/providers.js';
import { getSettings, getSetting, saveSettings, saveSetting, resetSettings, exportSettings, importSettings } from '../modules/settings.js';
import { applyTheme } from '../modules/theme-manager.js';
import {
getAllPrompts,
exportPrompts,
importPrompts,
clearAllPrompts,
importDefaultLibrary
} from '../modules/prompt-manager.js';
import {
getAllConversations,
exportConversations,
importConversations,
clearAllConversations
} from '../modules/history-manager.js';
import {
loadVersionInfo,
checkForUpdates
} from '../modules/version-checker.js';
import { t, translatePage, getCurrentLanguage, initializeLanguage } from '../modules/i18n.js';
const DEFAULT_ENABLED_PROVIDERS = ['chatgpt', 'claude', 'gemini', 'google', 'grok', 'deepseek', 'copilot'];
// Helper function to get browser's current language in our supported format
function getCurrentBrowserLanguage() {
const browserLang = getCurrentLanguage();
// Map browser language codes to our supported locales
if (browserLang.startsWith('zh')) {
if (browserLang.includes('TW') || browserLang.includes('HK') || browserLang.includes('Hant')) {
return 'zh_TW';
}
return 'zh_CN';
}
return 'en';
}
function getEnabledProvidersOrDefault(settings) {
if (settings.enabledProviders && Array.isArray(settings.enabledProviders)) {
return [...settings.enabledProviders];
}
return [...DEFAULT_ENABLED_PROVIDERS];
}
function isEdgeBrowser() {
const uaData = navigator.userAgentData;
if (uaData && Array.isArray(uaData.brands)) {
return uaData.brands.some(brand => /Edge/i.test(brand.brand));
}
return navigator.userAgent.includes('Edg/');
}
function openShortcutSettings(browserOverride) {
const isEdge = browserOverride === 'edge' || (browserOverride !== 'chrome' && isEdgeBrowser());
const url = isEdge ? 'edge://extensions/shortcuts' : 'chrome://extensions/shortcuts';
try {
chrome.tabs.create({ url });
} catch (error) {
// Fallback to window.open if chrome.tabs unavailable
window.open(url, '_blank');
}
}
function setupShortcutHelpers() {
const openShortcutsBtn = document.getElementById('open-shortcuts-btn');
if (openShortcutsBtn) {
openShortcutsBtn.addEventListener('click', () => openShortcutSettings());
}
const edgeHelper = document.getElementById('edge-shortcut-helper');
const edgeButton = document.getElementById('open-edge-shortcuts-btn');
if (edgeHelper && edgeButton) {
edgeButton.addEventListener('click', () => openShortcutSettings('edge'));
}
}
// Helper to detect if extension is installed from Chrome Web Store
async function isWebStoreInstall() {
try {
const info = await chrome.management.getSelf();
// installType: 'normal' = Chrome Web Store, 'development' = loaded unpacked
return info.installType === 'normal';
} catch (error) {
console.error('Error detecting install type:', error);
// Default to false (show update checking) if detection fails
return false;
}
}
// Hide update checking UI for web store installations
async function hideUpdateCheckingIfNeeded() {
const isFromStore = await isWebStoreInstall();
if (isFromStore) {
// Hide "Check for Updates" button
const checkUpdatesBtn = document.getElementById('check-updates-btn');
if (checkUpdatesBtn) {
checkUpdatesBtn.style.display = 'none';
}
// Hide update status message area
const updateStatus = document.getElementById('update-status');
if (updateStatus) {
updateStatus.style.display = 'none';
}
// Hide "Download Latest Version" link
const downloadLink = document.getElementById('download-latest-link');
if (downloadLink) {
const downloadContainer = downloadLink.closest('.version-download');
if (downloadContainer) {
downloadContainer.style.display = 'none';
}
}
}
}
function updateShortcutHelperVisibility(isEnabled) {
const edgeHelper = document.getElementById('edge-shortcut-helper');
if (!edgeHelper) return;
if (isEdgeBrowser() && isEnabled) {
edgeHelper.style.display = 'flex';
} else {
edgeHelper.style.display = 'none';
}
}
// T050: Initialize settings page
async function init() {
await applyTheme(); // Apply theme first
await initializeLanguage(); // Initialize language from user settings
translatePage(); // Translate all static text
await loadSettings();
await loadDataStats();
await loadLibraryCount(); // Load default library count
await loadVersionDisplay(); // T073: Load and display version info
await hideUpdateCheckingIfNeeded(); // Hide update checking for web store installations
await renderProviderList();
setupEventListeners();
setupShortcutHelpers();
}
// T051: Load and display current settings
async function loadSettings() {
const settings = await getSettings();
// Theme
document.getElementById('theme-select').value = settings.theme || 'auto';
// Language
const currentLanguage = settings.language || getCurrentBrowserLanguage();
document.getElementById('language-select').value = currentLanguage;
// Default provider - now dynamically populated
await updateDefaultProviderDropdown();
const keyboardShortcutEnabled = settings.keyboardShortcutEnabled !== false;
const shortcutToggle = document.getElementById('keyboard-shortcut-toggle');
if (shortcutToggle) {
shortcutToggle.checked = keyboardShortcutEnabled;
}
updateShortcutHelperVisibility(keyboardShortcutEnabled);
// Auto-paste clipboard setting
const autoPasteToggle = document.getElementById('auto-paste-toggle');
if (autoPasteToggle) {
autoPasteToggle.checked = settings.autoPasteClipboard === true;
}
// Auto-open sidebar after save setting
const autoOpenSidebarToggle = document.getElementById('auto-open-sidebar-toggle');
if (autoOpenSidebarToggle) {
autoOpenSidebarToggle.checked = settings.autoOpenSidebarOnSave === true;
}
// Remember last provider setting
const rememberLastProviderToggle = document.getElementById('remember-last-provider-toggle');
if (rememberLastProviderToggle) {
rememberLastProviderToggle.checked = settings.rememberLastProvider !== false;
}
// Source URL placement setting
const sourceUrlPlacementSelect = document.getElementById('source-url-placement-select');
if (sourceUrlPlacementSelect) {
sourceUrlPlacementSelect.value = settings.sourceUrlPlacement || 'end';
}
// Enter key behavior settings
const enterBehavior = settings.enterKeyBehavior || {
enabled: true,
preset: 'swapped',
newlineModifiers: { shift: false, ctrl: false, alt: false, meta: false },
sendModifiers: { shift: true, ctrl: false, alt: false, meta: false }
};
const enterBehaviorToggle = document.getElementById('enter-behavior-toggle');
if (enterBehaviorToggle) {
enterBehaviorToggle.checked = enterBehavior.enabled;
updateEnterBehaviorVisibility(enterBehavior.enabled);
}
const enterPresetSelect = document.getElementById('enter-preset-select');
if (enterPresetSelect) {
enterPresetSelect.value = enterBehavior.preset || 'swapped';
updateCustomEnterSettingsVisibility(enterBehavior.preset);
}
// Load custom settings
loadCustomEnterSettings(enterBehavior);
}
// T052-T053: Render provider enable/disable toggles
async function renderProviderList() {
const settings = await getSettings();
const enabledProviders = getEnabledProvidersOrDefault(settings);
const listContainer = document.getElementById('provider-list');
listContainer.innerHTML = PROVIDERS.map(provider => {
const isEnabled = enabledProviders.includes(provider.id);
return `
<div class="provider-item">
<div class="provider-info">
<div class="provider-icon">
<img src="${provider.icon}" alt="${provider.name}" width="24" height="24"
onerror="this.style.display='none'" />
</div>
<span class="provider-name">${provider.name}</span>
</div>
<div class="toggle-switch ${isEnabled ? 'active' : ''}" data-provider-id="${provider.id}"></div>
</div>
`;
}).join('');
// Add click listeners to toggles
listContainer.querySelectorAll('.toggle-switch').forEach(toggle => {
toggle.addEventListener('click', async () => {
await toggleProvider(toggle.dataset.providerId);
});
});
}
// Update the default provider dropdown to show only enabled providers
async function updateDefaultProviderDropdown() {
const settings = await getSettings();
const enabledProviders = getEnabledProvidersOrDefault(settings);
const dropdown = document.getElementById('default-provider-select');
const currentDefault = settings.defaultProvider || 'chatgpt';
// Clear existing options
dropdown.innerHTML = '';
// Populate with enabled providers only
enabledProviders.forEach(providerId => {
const provider = PROVIDERS.find(p => p.id === providerId);
if (provider) {
const option = document.createElement('option');
option.value = provider.id;
option.textContent = provider.name;
dropdown.appendChild(option);
}
});
// Set the selected value
// If current default is still enabled, keep it; otherwise use first enabled provider
if (enabledProviders.includes(currentDefault)) {
dropdown.value = currentDefault;
} else {
// Current default was disabled, switch to first enabled provider
const newDefault = enabledProviders[0];
dropdown.value = newDefault;
await saveSetting('defaultProvider', newDefault);
}
}
async function toggleProvider(providerId) {
const settings = await getSettings();
let enabledProviders = getEnabledProvidersOrDefault(settings);
if (enabledProviders.includes(providerId)) {
// Disable - but ensure at least one provider remains enabled
if (enabledProviders.length === 1) {
showStatus('error', t('msgOneProviderRequired'));
return;
}
enabledProviders = enabledProviders.filter(id => id !== providerId);
// If disabling the last selected provider, clear it so sidebar uses the new default
const lastSelected = await chrome.storage.sync.get({ lastSelectedProvider: null });
if (lastSelected.lastSelectedProvider === providerId) {
await chrome.storage.sync.set({ lastSelectedProvider: null });
}
} else {
// Enable
enabledProviders.push(providerId);
}
await saveSetting('enabledProviders', enabledProviders);
await renderProviderList();
await updateDefaultProviderDropdown(); // Update dropdown when providers change
showStatus('success', t('msgProviderSettingsUpdated'));
}
// T056: Load and display data statistics
async function loadDataStats() {
try {
const prompts = await getAllPrompts();
const conversations = await getAllConversations();
document.getElementById('stat-prompts').textContent = prompts.length;
document.getElementById('stat-conversations').textContent = conversations.length;
// Estimate storage size (include both prompts and conversations)
const promptsSize = JSON.stringify(prompts).length;
const conversationsSize = JSON.stringify(conversations).length;
const totalSize = promptsSize + conversationsSize;
const sizeKB = Math.round(totalSize / 1024);
document.getElementById('stat-storage').textContent = `~${sizeKB} KB`;
} catch (error) {
// Silently handle data stats errors
document.getElementById('stat-prompts').textContent = '0';
document.getElementById('stat-conversations').textContent = '0';
document.getElementById('stat-storage').textContent = '0 KB';
}
}
// Load default library count
async function loadLibraryCount() {
const countElement = document.getElementById('library-count');
if (!countElement) return;
try {
const response = await fetch(chrome.runtime.getURL('data/prompt-libraries/default-prompts.json'));
const promptsArray = await response.json();
const count = Array.isArray(promptsArray) ? promptsArray.length : 0;
countElement.textContent = t('msgPromptsCount', count.toString());
} catch (error) {
console.error('Failed to load library count:', error);
countElement.textContent = t('msgUnknownCount');
}
}
// T057-T064: Setup event listeners
function setupEventListeners() {
// Theme change
document.getElementById('theme-select').addEventListener('change', async (e) => {
await saveSetting('theme', e.target.value);
await applyTheme(); // Re-apply theme immediately
showStatus('success', t('msgThemeUpdated'));
});
// Language change
document.getElementById('language-select').addEventListener('change', async (e) => {
const newLanguage = e.target.value;
await saveSetting('language', newLanguage);
// Reload translations with new language
await initializeLanguage(newLanguage);
// Re-translate the entire page
translatePage();
// Show success message (now in the new language)
showStatus('success', t('msgLanguageUpdated'));
});
// Default provider change
document.getElementById('default-provider-select').addEventListener('change', async (e) => {
await saveSetting('defaultProvider', e.target.value);
showStatus('success', t('msgDefaultProviderUpdated'));
});
// Keyboard shortcut toggle
const shortcutToggle = document.getElementById('keyboard-shortcut-toggle');
if (shortcutToggle) {
shortcutToggle.addEventListener('change', async (e) => {
const enabled = e.target.checked;
await saveSetting('keyboardShortcutEnabled', enabled);
updateShortcutHelperVisibility(enabled);
showStatus('success', enabled ? t('msgShortcutEnabled') : t('msgShortcutDisabled'));
});
}
// Auto-paste clipboard toggle
const autoPasteToggle = document.getElementById('auto-paste-toggle');
if (autoPasteToggle) {
autoPasteToggle.addEventListener('change', async (e) => {
const enabled = e.target.checked;
await saveSetting('autoPasteClipboard', enabled);
showStatus('success', enabled ? t('msgAutoPasteEnabled') : t('msgAutoPasteDisabled'));
});
}
// Auto-open sidebar after save toggle
const autoOpenSidebarToggle = document.getElementById('auto-open-sidebar-toggle');
if (autoOpenSidebarToggle) {
autoOpenSidebarToggle.addEventListener('change', async (e) => {
const enabled = e.target.checked;
await saveSetting('autoOpenSidebarOnSave', enabled);
showStatus('success', enabled ? t('msgAutoOpenEnabled') : t('msgAutoOpenDisabled'));
});
}
// Source URL placement change
const sourceUrlPlacementSelect = document.getElementById('source-url-placement-select');
if (sourceUrlPlacementSelect) {
sourceUrlPlacementSelect.addEventListener('change', async (e) => {
await saveSetting('sourceUrlPlacement', e.target.value);
showStatus('success', t('msgSourceUrlPlacementUpdated'));
});
}
// Remember last provider toggle
const rememberLastProviderToggle = document.getElementById('remember-last-provider-toggle');
if (rememberLastProviderToggle) {
rememberLastProviderToggle.addEventListener('change', async (e) => {
const enabled = e.target.checked;
await saveSetting('rememberLastProvider', enabled);
// If disabling, clear lastSelectedProvider so sidebar uses default provider next time
if (!enabled) {
await chrome.storage.sync.set({ lastSelectedProvider: null });
}
showStatus('success', enabled ? t('msgRememberLastProviderEnabled') : t('msgRememberLastProviderDisabled'));
});
}
// Export data
document.getElementById('export-btn').addEventListener('click', exportData);
// Import data
document.getElementById('import-btn').addEventListener('click', () => {
document.getElementById('import-file').click();
});
document.getElementById('import-file').addEventListener('change', async (e) => {
const file = e.target.files[0];
if (file) {
await importData(file);
e.target.value = ''; // Reset file input
}
});
// Danger Zone - Clear buttons
document.getElementById('clear-prompts-btn').addEventListener('click', clearPrompts);
document.getElementById('clear-conversations-btn').addEventListener('click', clearConversations);
document.getElementById('reset-settings-btn').addEventListener('click', resetSettingsOnly);
// Default library import button
document.getElementById('import-default-library')?.addEventListener('click', importDefaultLibraryHandler);
// Custom library import button
document.getElementById('import-custom-library')?.addEventListener('click', () => {
document.getElementById('import-custom-library-file').click();
});
document.getElementById('import-custom-library-file')?.addEventListener('change', async (e) => {
const file = e.target.files[0];
if (file) {
await importCustomLibraryHandler(file);
e.target.value = ''; // Reset file input
}
});
// Enter key behavior toggle
const enterBehaviorToggle = document.getElementById('enter-behavior-toggle');
if (enterBehaviorToggle) {
enterBehaviorToggle.addEventListener('change', async (e) => {
const enabled = e.target.checked;
const settings = await getSettings();
const enterBehavior = settings.enterKeyBehavior || {};
enterBehavior.enabled = enabled;
await saveSetting('enterKeyBehavior', enterBehavior);
updateEnterBehaviorVisibility(enabled);
showStatus('success', enabled ? t('msgEnterCustomEnabled') : t('msgEnterCustomDisabled'));
});
}
// Preset selection
const enterPresetSelect = document.getElementById('enter-preset-select');
if (enterPresetSelect) {
enterPresetSelect.addEventListener('change', async (e) => {
await applyEnterKeyPreset(e.target.value);
updateCustomEnterSettingsVisibility(e.target.value);
});
}
// Custom modifier checkboxes
['newline-shift', 'newline-ctrl', 'newline-alt', 'newline-meta',
'send-shift', 'send-ctrl', 'send-alt', 'send-meta'].forEach(id => {
const checkbox = document.getElementById(id);
if (checkbox) {
checkbox.addEventListener('change', saveCustomEnterSettings);
}
});
// T073: Version check button
const checkUpdatesBtn = document.getElementById('check-updates-btn');
if (checkUpdatesBtn) {
checkUpdatesBtn.addEventListener('click', performVersionCheck);
}
}
// T057: Export all data
async function exportData() {
try {
// Export prompts
const promptsData = await exportPrompts();
// Export conversations (chat history)
const conversationsData = await exportConversations();
// Export settings
const settingsData = await exportSettings();
// Combine into single export file
const exportData = {
version: '1.0',
exportDate: new Date().toISOString(),
prompts: promptsData.prompts,
conversations: conversationsData.conversations,
settings: settingsData
};
// Create download
const blob = new Blob([JSON.stringify(exportData, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `insidebar-backup-${Date.now()}.json`;
a.click();
URL.revokeObjectURL(url);
showStatus('success', t('msgDataExported'));
} catch (error) {
showStatus('error', t('msgDataExportFailed'));
}
}
// T058-T062: Import data from file
async function importData(file) {
try {
const text = await file.text();
const data = JSON.parse(text);
if (!data.version) {
throw new Error('Invalid export file format');
}
// Confirm import
const confirmMsg = t('msgImportConfirm', [
new Date(data.exportDate).toLocaleString(),
(data.prompts?.length || 0).toString(),
(data.conversations?.length || 0).toString()
]);
if (!confirm(confirmMsg)) {
return;
}
// Import prompts
let promptImportSummary = null;
if (data.prompts && Array.isArray(data.prompts)) {
promptImportSummary = await importPrompts({ prompts: data.prompts }, 'skip');
}
// Import conversations (chat history)
let conversationImportSummary = null;
if (data.conversations && Array.isArray(data.conversations)) {
conversationImportSummary = await importConversations({ conversations: data.conversations }, 'skip');
}
// Import settings (but preserve current enabled providers)
if (data.settings) {
const currentSettings = await getSettings();
const settingsToImport = {
...data.settings,
enabledProviders: currentSettings.enabledProviders // Don't overwrite provider settings
};
await importSettings(settingsToImport);
}
await loadSettings();
await loadDataStats();
// Build success message
const messages = [];
if (promptImportSummary) {
const { imported = 0, skipped = 0 } = promptImportSummary;
messages.push(t('msgPromptsImported', [imported.toString(), skipped.toString()]));
}
if (conversationImportSummary) {
const { imported = 0, skipped = 0 } = conversationImportSummary;
messages.push(t('msgConversationsImported', [imported.toString(), skipped.toString()]));
}
if (messages.length > 0) {
showStatus('success', t('msgDataImported') + ' — ' + messages.join('; ') + '.');
} else {
showStatus('success', t('msgDataImported'));
}
} catch (error) {
showStatus('error', t('msgDataImportFailed'));
}
}
// Danger Zone: Clear Prompts
async function clearPrompts() {
if (!confirm(t('msgConfirmClearPrompts'))) {
return;
}
try {
await clearAllPrompts();
await loadDataStats();
showStatus('success', t('msgPromptsCleared'));
} catch (error) {
showStatus('error', t('msgClearPromptsFailed'));
}
}
// Danger Zone: Clear Chat History
async function clearConversations() {
if (!confirm(t('msgConfirmClearHistory'))) {
return;
}
try {
await clearAllConversations();
await loadDataStats();
showStatus('success', t('msgHistoryCleared'));
} catch (error) {
showStatus('error', t('msgClearHistoryFailed'));
}
}
// Danger Zone: Reset Settings
async function resetSettingsOnly() {
if (!confirm(t('msgConfirmResetSettings'))) {
return;
}
try {
await resetSettings();
await loadSettings();
await renderProviderList();
showStatus('success', t('msgSettingsReset'));
} catch (error) {
showStatus('error', t('msgResetSettingsFailed'));
}
}
// Status message helpers
function showStatus(type, message) {
const elementId = type === 'error' ? 'status-error' : 'status-success';
const element = document.getElementById(elementId);
element.textContent = message;
element.classList.add('show');
setTimeout(() => {
element.classList.remove('show');
}, 3000);
}
// Validate prompt structure against expected format
function validatePromptStructure(prompt) {
const errors = [];
// Required fields
if (!prompt.title || typeof prompt.title !== 'string') {
errors.push('Missing or invalid "title" (string)');
}
if (!prompt.content || typeof prompt.content !== 'string') {
errors.push('Missing or invalid "content" (string)');
}
if (!prompt.category || typeof prompt.category !== 'string') {
errors.push('Missing or invalid "category" (string)');
}
// Tags should be array
if (!Array.isArray(prompt.tags)) {
errors.push('"tags" must be an array of strings');
}
// Variables should be array (can be empty)
if (!Array.isArray(prompt.variables)) {
errors.push('"variables" must be an array');
}
// Optional but typed fields
if (prompt.isFavorite !== undefined && typeof prompt.isFavorite !== 'boolean') {
errors.push('"isFavorite" should be boolean');
}
if (prompt.useCount !== undefined && typeof prompt.useCount !== 'number') {
errors.push('"useCount" should be number');
}
if (prompt.lastUsed !== undefined && prompt.lastUsed !== null && typeof prompt.lastUsed !== 'number') {
errors.push('"lastUsed" should be number or null');
}
return errors;
}
// Generate example prompt structure
function getPromptStructureExample() {
return `Expected JSON structure (array of prompt objects):
[
{
"title": "Short descriptive title",
"content": "Full prompt text. Use {variables} for placeholders.",
"category": "Category name",
"tags": ["tag1", "tag2"],
"variables": ["variable1", "variable2"],
"isFavorite": false,
"useCount": 0,
"lastUsed": null
}
]
Required fields:
- title (string)
- content (string)
- category (string)
- tags (array of strings)
- variables (array of strings)
Optional fields:
- isFavorite (boolean, default: false)
- useCount (number, default: 0)
- lastUsed (number or null, default: null)
See: data/prompt-libraries/Generate_a_Basic_Prompt_Library.md`;
}
// Import Custom Prompt Library
async function importCustomLibraryHandler(file) {
try {
const text = await file.text();
const data = JSON.parse(text);
// Check if it's an array
if (!Array.isArray(data)) {
showStatus('error', t('msgInvalidPromptFormat'));
alert(`${t('msgInvalidFormat')}\n\n${getPromptStructureExample()}`);
return;
}
// Validate first prompt as a sample
if (data.length > 0) {
const errors = validatePromptStructure(data[0]);
if (errors.length > 0) {
const errorMsg = `${t('msgInvalidPromptStructure')}:\n\n${errors.join('\n')}\n\n${getPromptStructureExample()}`;
showStatus('error', t('msgInvalidPromptStructure'));
alert(errorMsg);
return;
}
}
// Validate all prompts
const validationErrors = [];
data.forEach((prompt, index) => {
const errors = validatePromptStructure(prompt);
if (errors.length > 0) {
validationErrors.push(`Prompt #${index + 1}: ${errors.join(', ')}`);
}
});
if (validationErrors.length > 0) {
const errorMsg = t('msgValidationErrors', validationErrors.length.toString()) + `:\n\n${validationErrors.slice(0, 5).join('\n')}${validationErrors.length > 5 ? '\n...' : ''}\n\n${getPromptStructureExample()}`;
showStatus('error', t('msgValidationErrors', validationErrors.length.toString()));
alert(errorMsg);
return;
}
// Wrap in expected format
const libraryData = { prompts: data };
// Import using the prompt manager
const result = await importDefaultLibrary(libraryData);
// Show results
if (result.imported > 0) {
showStatus('success', t('msgCustomPromptsImported', [result.imported.toString(), result.skipped.toString()]));
} else {
showStatus('success', t('msgAllPromptsExist'));
}
// Refresh stats
await loadDataStats();
} catch (error) {
if (error instanceof SyntaxError) {
showStatus('error', t('msgInvalidJSON'));
alert(`${t('msgJSONParseError')}\n\n${getPromptStructureExample()}`);
} else {
showStatus('error', t('msgCustomImportFailed'));
console.error('Import error:', error);
}
}
}
// Import Default Prompt Library
async function importDefaultLibraryHandler() {
const button = document.getElementById('import-default-library');
try {
button.disabled = true;
button.textContent = t('msgImporting');
// Fetch the default library data
const response = await fetch(chrome.runtime.getURL('data/prompt-libraries/default-prompts.json'));
const promptsArray = await response.json();
// Wrap array in expected format { prompts: [...] }
const libraryData = Array.isArray(promptsArray)
? { prompts: promptsArray }
: promptsArray;
// Import using the prompt manager
const result = await importDefaultLibrary(libraryData);
// Update UI
if (result.imported > 0) {
button.textContent = t('msgImported');
button.style.background = '#4caf50';
button.style.color = 'white';
showStatus('success', t('msgDefaultPromptsImported', [result.imported.toString(), result.skipped.toString()]));
} else {
button.textContent = t('msgAlreadyImported');
button.disabled = true;
showStatus('success', t('msgAllPromptsExist'));
}
// Refresh stats
await loadDataStats();
} catch (error) {
showStatus('error', t('msgDefaultImportFailed'));
button.disabled = false;
button.textContent = t('btnImportDefault');
}
}
// Enter Key Behavior Helper Functions
function updateEnterBehaviorVisibility(enabled) {
const settingsDiv = document.getElementById('enter-behavior-settings');
if (settingsDiv) {
settingsDiv.style.display = enabled ? 'block' : 'none';
}
}
function updateCustomEnterSettingsVisibility(preset) {
const customDiv = document.getElementById('custom-enter-settings');
if (customDiv) {
customDiv.style.display = preset === 'custom' ? 'block' : 'none';
}
}
function loadCustomEnterSettings(enterBehavior) {
// Load newline modifiers
document.getElementById('newline-shift').checked = enterBehavior.newlineModifiers.shift || false;
document.getElementById('newline-ctrl').checked = enterBehavior.newlineModifiers.ctrl || false;
document.getElementById('newline-alt').checked = enterBehavior.newlineModifiers.alt || false;
document.getElementById('newline-meta').checked = enterBehavior.newlineModifiers.meta || false;
// Load send modifiers
document.getElementById('send-shift').checked = enterBehavior.sendModifiers.shift || false;
document.getElementById('send-ctrl').checked = enterBehavior.sendModifiers.ctrl || false;
document.getElementById('send-alt').checked = enterBehavior.sendModifiers.alt || false;
document.getElementById('send-meta').checked = enterBehavior.sendModifiers.meta || false;
}
async function applyEnterKeyPreset(preset) {
const settings = await getSettings();
const enterBehavior = settings.enterKeyBehavior || {};
enterBehavior.preset = preset;
// Define preset configurations
const presets = {
default: {
newlineModifiers: { shift: true, ctrl: false, alt: false, meta: false },
sendModifiers: { shift: false, ctrl: false, alt: false, meta: false }
},
swapped: {
newlineModifiers: { shift: false, ctrl: false, alt: false, meta: false },
sendModifiers: { shift: true, ctrl: false, alt: false, meta: false }
},
slack: {
newlineModifiers: { shift: false, ctrl: true, alt: false, meta: false },
sendModifiers: { shift: false, ctrl: false, alt: false, meta: false }
},
discord: {
newlineModifiers: { shift: false, ctrl: false, alt: false, meta: false },
sendModifiers: { shift: false, ctrl: true, alt: false, meta: false }
}
};
if (preset !== 'custom' && presets[preset]) {
enterBehavior.newlineModifiers = presets[preset].newlineModifiers;
enterBehavior.sendModifiers = presets[preset].sendModifiers;
loadCustomEnterSettings(enterBehavior);
}
await saveSetting('enterKeyBehavior', enterBehavior);
showStatus('success', t('msgPresetChanged', preset));
}
async function saveCustomEnterSettings() {
const settings = await getSettings();
const enterBehavior = settings.enterKeyBehavior || {};
enterBehavior.preset = 'custom';
enterBehavior.newlineModifiers = {
shift: document.getElementById('newline-shift').checked,
ctrl: document.getElementById('newline-ctrl').checked,
alt: document.getElementById('newline-alt').checked,
meta: document.getElementById('newline-meta').checked
};
enterBehavior.sendModifiers = {
shift: document.getElementById('send-shift').checked,
ctrl: document.getElementById('send-ctrl').checked,
alt: document.getElementById('send-alt').checked,
meta: document.getElementById('send-meta').checked
};
await saveSetting('enterKeyBehavior', enterBehavior);
// Update preset dropdown to show custom
const presetSelect = document.getElementById('enter-preset-select');
if (presetSelect) {
presetSelect.value = 'custom';
}
showStatus('success', t('msgCustomMappingSaved'));
}
// T073: Version Check Functions
async function loadVersionDisplay() {
const versionInfo = await loadVersionInfo();
if (!versionInfo) {
document.getElementById('version').textContent = t('msgVersionUnknown');
document.getElementById('commit-hash').textContent = t('msgCommitHashUnavailable');
return;
}
document.getElementById('version').textContent = t('labelVersion', versionInfo.version);
document.getElementById('commit-hash').textContent = t('msgBuildInfo', [versionInfo.commitHash, versionInfo.buildDate]);
// Automatically check for updates on page load
await performVersionCheck();
}
async function performVersionCheck() {
const button = document.getElementById('check-updates-btn');
const statusDiv = document.getElementById('update-status');
try {
button.disabled = true;
button.textContent = t('msgChecking');
statusDiv.style.display = 'none';
const result = await checkForUpdates();
if (result.error) {
statusDiv.textContent = result.error;
statusDiv.className = 'update-status update-error';
statusDiv.style.display = 'block';
showStatus('error', result.error);
} else if (result.updateAvailable) {
const latest = result.latestHash;
const date = new Date(result.latestDate).toLocaleDateString();
const current = result.currentHash;
const message = result.latestMessage.split('\n')[0];
statusDiv.innerHTML = t('msgUpdateStatusAvailable', [latest, date, current, message]);
statusDiv.className = 'update-status update-available';
statusDiv.style.display = 'block';
showStatus('success', t('msgUpdateAvailable'));
} else {
statusDiv.textContent = t('msgLatestVersion');
statusDiv.className = 'update-status update-current';
statusDiv.style.display = 'block';
showStatus('success', t('msgUpToDate'));
}
} catch (error) {
statusDiv.textContent = t('msgCheckUpdatesFailed');
statusDiv.className = 'update-status update-error';
statusDiv.style.display = 'block';
showStatus('error', t('msgCheckUpdatesFailed'));
console.error('Version check error:', error);
} finally {
button.disabled = false;
button.textContent = t('btnCheckUpdates');
}
}
// Initialize on load
init();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
sidebar/marked.min.js | JavaScript | /**
* marked v11.1.1 - a markdown parser
* Copyright (c) 2011-2023, Christopher Jeffrey. (MIT Licensed)
* https://github.com/markedjs/marked
*/
!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).marked={})}(this,(function(e){"use strict";function t(){return{async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null}}function n(t){e.defaults=t}e.defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};const s=/[&<>"']/,r=new RegExp(s.source,"g"),i=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,l=new RegExp(i.source,"g"),o={"&":"&","<":"<",">":">",'"':""","'":"'"},a=e=>o[e];function c(e,t){if(t){if(s.test(e))return e.replace(r,a)}else if(i.test(e))return e.replace(l,a);return e}const h=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi;function p(e){return e.replace(h,((e,t)=>"colon"===(t=t.toLowerCase())?":":"#"===t.charAt(0)?"x"===t.charAt(1)?String.fromCharCode(parseInt(t.substring(2),16)):String.fromCharCode(+t.substring(1)):""))}const u=/(^|[^\[])\^/g;function k(e,t){let n="string"==typeof e?e:e.source;t=t||"";const s={replace:(e,t)=>{let r="string"==typeof t?t:t.source;return r=r.replace(u,"$1"),n=n.replace(e,r),s},getRegex:()=>new RegExp(n,t)};return s}function g(e){try{e=encodeURI(e).replace(/%25/g,"%")}catch(e){return null}return e}const f={exec:()=>null};function d(e,t){const n=e.replace(/\|/g,((e,t,n)=>{let s=!1,r=t;for(;--r>=0&&"\\"===n[r];)s=!s;return s?"|":" |"})).split(/ \|/);let s=0;if(n[0].trim()||n.shift(),n.length>0&&!n[n.length-1].trim()&&n.pop(),t)if(n.length>t)n.splice(t);else for(;n.length<t;)n.push("");for(;s<n.length;s++)n[s]=n[s].trim().replace(/\\\|/g,"|");return n}function x(e,t,n){const s=e.length;if(0===s)return"";let r=0;for(;r<s;){const i=e.charAt(s-r-1);if(i!==t||n){if(i===t||!n)break;r++}else r++}return e.slice(0,s-r)}function b(e,t,n,s){const r=t.href,i=t.title?c(t.title):null,l=e[1].replace(/\\([\[\]])/g,"$1");if("!"!==e[0].charAt(0)){s.state.inLink=!0;const e={type:"link",raw:n,href:r,title:i,text:l,tokens:s.inlineTokens(l)};return s.state.inLink=!1,e}return{type:"image",raw:n,href:r,title:i,text:c(l)}}class w{options;rules;lexer;constructor(t){this.options=t||e.defaults}space(e){const t=this.rules.block.newline.exec(e);if(t&&t[0].length>0)return{type:"space",raw:t[0]}}code(e){const t=this.rules.block.code.exec(e);if(t){const e=t[0].replace(/^ {1,4}/gm,"");return{type:"code",raw:t[0],codeBlockStyle:"indented",text:this.options.pedantic?e:x(e,"\n")}}}fences(e){const t=this.rules.block.fences.exec(e);if(t){const e=t[0],n=function(e,t){const n=e.match(/^(\s+)(?:```)/);if(null===n)return t;const s=n[1];return t.split("\n").map((e=>{const t=e.match(/^\s+/);if(null===t)return e;const[n]=t;return n.length>=s.length?e.slice(s.length):e})).join("\n")}(e,t[3]||"");return{type:"code",raw:e,lang:t[2]?t[2].trim().replace(this.rules.inline.anyPunctuation,"$1"):t[2],text:n}}}heading(e){const t=this.rules.block.heading.exec(e);if(t){let e=t[2].trim();if(/#$/.test(e)){const t=x(e,"#");this.options.pedantic?e=t.trim():t&&!/ $/.test(t)||(e=t.trim())}return{type:"heading",raw:t[0],depth:t[1].length,text:e,tokens:this.lexer.inline(e)}}}hr(e){const t=this.rules.block.hr.exec(e);if(t)return{type:"hr",raw:t[0]}}blockquote(e){const t=this.rules.block.blockquote.exec(e);if(t){const e=x(t[0].replace(/^ *>[ \t]?/gm,""),"\n"),n=this.lexer.state.top;this.lexer.state.top=!0;const s=this.lexer.blockTokens(e);return this.lexer.state.top=n,{type:"blockquote",raw:t[0],tokens:s,text:e}}}list(e){let t=this.rules.block.list.exec(e);if(t){let n=t[1].trim();const s=n.length>1,r={type:"list",raw:"",ordered:s,start:s?+n.slice(0,-1):"",loose:!1,items:[]};n=s?`\\d{1,9}\\${n.slice(-1)}`:`\\${n}`,this.options.pedantic&&(n=s?n:"[*+-]");const i=new RegExp(`^( {0,3}${n})((?:[\t ][^\\n]*)?(?:\\n|$))`);let l="",o="",a=!1;for(;e;){let n=!1;if(!(t=i.exec(e)))break;if(this.rules.block.hr.test(e))break;l=t[0],e=e.substring(l.length);let s=t[2].split("\n",1)[0].replace(/^\t+/,(e=>" ".repeat(3*e.length))),c=e.split("\n",1)[0],h=0;this.options.pedantic?(h=2,o=s.trimStart()):(h=t[2].search(/[^ ]/),h=h>4?1:h,o=s.slice(h),h+=t[1].length);let p=!1;if(!s&&/^ *$/.test(c)&&(l+=c+"\n",e=e.substring(c.length+1),n=!0),!n){const t=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ \t][^\\n]*)?(?:\\n|$))`),n=new RegExp(`^ {0,${Math.min(3,h-1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`),r=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:\`\`\`|~~~)`),i=new RegExp(`^ {0,${Math.min(3,h-1)}}#`);for(;e;){const a=e.split("\n",1)[0];if(c=a,this.options.pedantic&&(c=c.replace(/^ {1,4}(?=( {4})*[^ ])/g," ")),r.test(c))break;if(i.test(c))break;if(t.test(c))break;if(n.test(e))break;if(c.search(/[^ ]/)>=h||!c.trim())o+="\n"+c.slice(h);else{if(p)break;if(s.search(/[^ ]/)>=4)break;if(r.test(s))break;if(i.test(s))break;if(n.test(s))break;o+="\n"+c}p||c.trim()||(p=!0),l+=a+"\n",e=e.substring(a.length+1),s=c.slice(h)}}r.loose||(a?r.loose=!0:/\n *\n *$/.test(l)&&(a=!0));let u,k=null;this.options.gfm&&(k=/^\[[ xX]\] /.exec(o),k&&(u="[ ] "!==k[0],o=o.replace(/^\[[ xX]\] +/,""))),r.items.push({type:"list_item",raw:l,task:!!k,checked:u,loose:!1,text:o,tokens:[]}),r.raw+=l}r.items[r.items.length-1].raw=l.trimEnd(),r.items[r.items.length-1].text=o.trimEnd(),r.raw=r.raw.trimEnd();for(let e=0;e<r.items.length;e++)if(this.lexer.state.top=!1,r.items[e].tokens=this.lexer.blockTokens(r.items[e].text,[]),!r.loose){const t=r.items[e].tokens.filter((e=>"space"===e.type)),n=t.length>0&&t.some((e=>/\n.*\n/.test(e.raw)));r.loose=n}if(r.loose)for(let e=0;e<r.items.length;e++)r.items[e].loose=!0;return r}}html(e){const t=this.rules.block.html.exec(e);if(t){return{type:"html",block:!0,raw:t[0],pre:"pre"===t[1]||"script"===t[1]||"style"===t[1],text:t[0]}}}def(e){const t=this.rules.block.def.exec(e);if(t){const e=t[1].toLowerCase().replace(/\s+/g," "),n=t[2]?t[2].replace(/^<(.*)>$/,"$1").replace(this.rules.inline.anyPunctuation,"$1"):"",s=t[3]?t[3].substring(1,t[3].length-1).replace(this.rules.inline.anyPunctuation,"$1"):t[3];return{type:"def",tag:e,raw:t[0],href:n,title:s}}}table(e){const t=this.rules.block.table.exec(e);if(!t)return;if(!/[:|]/.test(t[2]))return;const n=d(t[1]),s=t[2].replace(/^\||\| *$/g,"").split("|"),r=t[3]&&t[3].trim()?t[3].replace(/\n[ \t]*$/,"").split("\n"):[],i={type:"table",raw:t[0],header:[],align:[],rows:[]};if(n.length===s.length){for(const e of s)/^ *-+: *$/.test(e)?i.align.push("right"):/^ *:-+: *$/.test(e)?i.align.push("center"):/^ *:-+ *$/.test(e)?i.align.push("left"):i.align.push(null);for(const e of n)i.header.push({text:e,tokens:this.lexer.inline(e)});for(const e of r)i.rows.push(d(e,i.header.length).map((e=>({text:e,tokens:this.lexer.inline(e)}))));return i}}lheading(e){const t=this.rules.block.lheading.exec(e);if(t)return{type:"heading",raw:t[0],depth:"="===t[2].charAt(0)?1:2,text:t[1],tokens:this.lexer.inline(t[1])}}paragraph(e){const t=this.rules.block.paragraph.exec(e);if(t){const e="\n"===t[1].charAt(t[1].length-1)?t[1].slice(0,-1):t[1];return{type:"paragraph",raw:t[0],text:e,tokens:this.lexer.inline(e)}}}text(e){const t=this.rules.block.text.exec(e);if(t)return{type:"text",raw:t[0],text:t[0],tokens:this.lexer.inline(t[0])}}escape(e){const t=this.rules.inline.escape.exec(e);if(t)return{type:"escape",raw:t[0],text:c(t[1])}}tag(e){const t=this.rules.inline.tag.exec(e);if(t)return!this.lexer.state.inLink&&/^<a /i.test(t[0])?this.lexer.state.inLink=!0:this.lexer.state.inLink&&/^<\/a>/i.test(t[0])&&(this.lexer.state.inLink=!1),!this.lexer.state.inRawBlock&&/^<(pre|code|kbd|script)(\s|>)/i.test(t[0])?this.lexer.state.inRawBlock=!0:this.lexer.state.inRawBlock&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(t[0])&&(this.lexer.state.inRawBlock=!1),{type:"html",raw:t[0],inLink:this.lexer.state.inLink,inRawBlock:this.lexer.state.inRawBlock,block:!1,text:t[0]}}link(e){const t=this.rules.inline.link.exec(e);if(t){const e=t[2].trim();if(!this.options.pedantic&&/^</.test(e)){if(!/>$/.test(e))return;const t=x(e.slice(0,-1),"\\");if((e.length-t.length)%2==0)return}else{const e=function(e,t){if(-1===e.indexOf(t[1]))return-1;let n=0;for(let s=0;s<e.length;s++)if("\\"===e[s])s++;else if(e[s]===t[0])n++;else if(e[s]===t[1]&&(n--,n<0))return s;return-1}(t[2],"()");if(e>-1){const n=(0===t[0].indexOf("!")?5:4)+t[1].length+e;t[2]=t[2].substring(0,e),t[0]=t[0].substring(0,n).trim(),t[3]=""}}let n=t[2],s="";if(this.options.pedantic){const e=/^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(n);e&&(n=e[1],s=e[3])}else s=t[3]?t[3].slice(1,-1):"";return n=n.trim(),/^</.test(n)&&(n=this.options.pedantic&&!/>$/.test(e)?n.slice(1):n.slice(1,-1)),b(t,{href:n?n.replace(this.rules.inline.anyPunctuation,"$1"):n,title:s?s.replace(this.rules.inline.anyPunctuation,"$1"):s},t[0],this.lexer)}}reflink(e,t){let n;if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){const e=t[(n[2]||n[1]).replace(/\s+/g," ").toLowerCase()];if(!e){const e=n[0].charAt(0);return{type:"text",raw:e,text:e}}return b(n,e,n[0],this.lexer)}}emStrong(e,t,n=""){let s=this.rules.inline.emStrongLDelim.exec(e);if(!s)return;if(s[3]&&n.match(/[\p{L}\p{N}]/u))return;if(!(s[1]||s[2]||"")||!n||this.rules.inline.punctuation.exec(n)){const n=[...s[0]].length-1;let r,i,l=n,o=0;const a="*"===s[0][0]?this.rules.inline.emStrongRDelimAst:this.rules.inline.emStrongRDelimUnd;for(a.lastIndex=0,t=t.slice(-1*e.length+n);null!=(s=a.exec(t));){if(r=s[1]||s[2]||s[3]||s[4]||s[5]||s[6],!r)continue;if(i=[...r].length,s[3]||s[4]){l+=i;continue}if((s[5]||s[6])&&n%3&&!((n+i)%3)){o+=i;continue}if(l-=i,l>0)continue;i=Math.min(i,i+l+o);const t=[...s[0]][0].length,a=e.slice(0,n+s.index+t+i);if(Math.min(n,i)%2){const e=a.slice(1,-1);return{type:"em",raw:a,text:e,tokens:this.lexer.inlineTokens(e)}}const c=a.slice(2,-2);return{type:"strong",raw:a,text:c,tokens:this.lexer.inlineTokens(c)}}}}codespan(e){const t=this.rules.inline.code.exec(e);if(t){let e=t[2].replace(/\n/g," ");const n=/[^ ]/.test(e),s=/^ /.test(e)&&/ $/.test(e);return n&&s&&(e=e.substring(1,e.length-1)),e=c(e,!0),{type:"codespan",raw:t[0],text:e}}}br(e){const t=this.rules.inline.br.exec(e);if(t)return{type:"br",raw:t[0]}}del(e){const t=this.rules.inline.del.exec(e);if(t)return{type:"del",raw:t[0],text:t[2],tokens:this.lexer.inlineTokens(t[2])}}autolink(e){const t=this.rules.inline.autolink.exec(e);if(t){let e,n;return"@"===t[2]?(e=c(t[1]),n="mailto:"+e):(e=c(t[1]),n=e),{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}url(e){let t;if(t=this.rules.inline.url.exec(e)){let e,n;if("@"===t[2])e=c(t[0]),n="mailto:"+e;else{let s;do{s=t[0],t[0]=this.rules.inline._backpedal.exec(t[0])?.[0]??""}while(s!==t[0]);e=c(t[0]),n="www."===t[1]?"http://"+t[0]:t[0]}return{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}inlineText(e){const t=this.rules.inline.text.exec(e);if(t){let e;return e=this.lexer.state.inRawBlock?t[0]:c(t[0]),{type:"text",raw:t[0],text:e}}}}const m=/^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,y=/(?:[*+-]|\d{1,9}[.)])/,$=k(/^(?!bull )((?:.|\n(?!\s*?\n|bull ))+?)\n {0,3}(=+|-+) *(?:\n+|$)/).replace(/bull/g,y).getRegex(),z=/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,T=/(?!\s*\])(?:\\.|[^\[\]\\])+/,R=k(/^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/).replace("label",T).replace("title",/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/).getRegex(),_=k(/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/).replace(/bull/g,y).getRegex(),A="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",S=/<!--(?!-?>)[\s\S]*?(?:-->|$)/,I=k("^ {0,3}(?:<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|<![A-Z][\\s\\S]*?(?:>\\n*|$)|<!\\[CDATA\\[[\\s\\S]*?(?:\\]\\]>\\n*|$)|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)|</(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$))","i").replace("comment",S).replace("tag",A).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),E=k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("|table","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),Z={blockquote:k(/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/).replace("paragraph",E).getRegex(),code:/^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,def:R,fences:/^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,hr:m,html:I,lheading:$,list:_,newline:/^(?: *(?:\n|$))+/,paragraph:E,table:f,text:/^[^\n]+/},q=k("^ *([^\\n ].*)\\n {0,3}((?:\\| *)?:?-+:? *(?:\\| *:?-+:? *)*(?:\\| *)?)(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)").replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),L={...Z,table:q,paragraph:k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("table",q).replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex()},P={...Z,html:k("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)|<tag(?:\"[^\"]*\"|'[^']*'|\\s[^'\"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",S).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:f,lheading:/^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,paragraph:k(z).replace("hr",m).replace("heading"," *#{1,6} *[^\n]").replace("lheading",$).replace("|table","").replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").replace("|tag","").getRegex()},Q=/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,v=/^( {2,}|\\)\n(?!\s*$)/,B="\\p{P}$+<=>`^|~",M=k(/^((?![*_])[\spunctuation])/,"u").replace(/punctuation/g,B).getRegex(),O=k(/^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/,"u").replace(/punct/g,B).getRegex(),C=k("^[^_*]*?__[^_*]*?\\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\\*)[punct](\\*+)(?=[\\s]|$)|[^punct\\s](\\*+)(?!\\*)(?=[punct\\s]|$)|(?!\\*)[punct\\s](\\*+)(?=[^punct\\s])|[\\s](\\*+)(?!\\*)(?=[punct])|(?!\\*)[punct](\\*+)(?!\\*)(?=[punct])|[^punct\\s](\\*+)(?=[^punct\\s])","gu").replace(/punct/g,B).getRegex(),D=k("^[^_*]*?\\*\\*[^_*]*?_[^_*]*?(?=\\*\\*)|[^_]+(?=[^_])|(?!_)[punct](_+)(?=[\\s]|$)|[^punct\\s](_+)(?!_)(?=[punct\\s]|$)|(?!_)[punct\\s](_+)(?=[^punct\\s])|[\\s](_+)(?!_)(?=[punct])|(?!_)[punct](_+)(?!_)(?=[punct])","gu").replace(/punct/g,B).getRegex(),j=k(/\\([punct])/,"gu").replace(/punct/g,B).getRegex(),H=k(/^<(scheme:[^\s\x00-\x1f<>]*|email)>/).replace("scheme",/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/).replace("email",/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/).getRegex(),U=k(S).replace("(?:--\x3e|$)","--\x3e").getRegex(),X=k("^comment|^</[a-zA-Z][\\w:-]*\\s*>|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^<![a-zA-Z]+\\s[\\s\\S]*?>|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>").replace("comment",U).replace("attribute",/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/).getRegex(),F=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,N=k(/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/).replace("label",F).replace("href",/<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/).replace("title",/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/).getRegex(),G=k(/^!?\[(label)\]\[(ref)\]/).replace("label",F).replace("ref",T).getRegex(),J=k(/^!?\[(ref)\](?:\[\])?/).replace("ref",T).getRegex(),K={_backpedal:f,anyPunctuation:j,autolink:H,blockSkip:/\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g,br:v,code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,del:f,emStrongLDelim:O,emStrongRDelimAst:C,emStrongRDelimUnd:D,escape:Q,link:N,nolink:J,punctuation:M,reflink:G,reflinkSearch:k("reflink|nolink(?!\\()","g").replace("reflink",G).replace("nolink",J).getRegex(),tag:X,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*_]|\b_|$)|[^ ](?= {2,}\n)))/,url:f},V={...K,link:k(/^!?\[(label)\]\((.*?)\)/).replace("label",F).getRegex(),reflink:k(/^!?\[(label)\]\s*\[([^\]]*)\]/).replace("label",F).getRegex()},W={...K,escape:k(Q).replace("])","~|])").getRegex(),url:k(/^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,"i").replace("email",/[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/).getRegex(),_backpedal:/(?:[^?!.,:;*_'"~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_'"~)]+(?!$))+/,del:/^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,text:/^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\<!\[`*~_]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)))/},Y={...W,br:k(v).replace("{2,}","*").getRegex(),text:k(W.text).replace("\\b_","\\b_| {2,}\\n").replace(/\{2,\}/g,"*").getRegex()},ee={normal:Z,gfm:L,pedantic:P},te={normal:K,gfm:W,breaks:Y,pedantic:V};class ne{tokens;options;state;tokenizer;inlineQueue;constructor(t){this.tokens=[],this.tokens.links=Object.create(null),this.options=t||e.defaults,this.options.tokenizer=this.options.tokenizer||new w,this.tokenizer=this.options.tokenizer,this.tokenizer.options=this.options,this.tokenizer.lexer=this,this.inlineQueue=[],this.state={inLink:!1,inRawBlock:!1,top:!0};const n={block:ee.normal,inline:te.normal};this.options.pedantic?(n.block=ee.pedantic,n.inline=te.pedantic):this.options.gfm&&(n.block=ee.gfm,this.options.breaks?n.inline=te.breaks:n.inline=te.gfm),this.tokenizer.rules=n}static get rules(){return{block:ee,inline:te}}static lex(e,t){return new ne(t).lex(e)}static lexInline(e,t){return new ne(t).inlineTokens(e)}lex(e){e=e.replace(/\r\n|\r/g,"\n"),this.blockTokens(e,this.tokens);for(let e=0;e<this.inlineQueue.length;e++){const t=this.inlineQueue[e];this.inlineTokens(t.src,t.tokens)}return this.inlineQueue=[],this.tokens}blockTokens(e,t=[]){let n,s,r,i;for(e=this.options.pedantic?e.replace(/\t/g," ").replace(/^ +$/gm,""):e.replace(/^( *)(\t+)/gm,((e,t,n)=>t+" ".repeat(n.length)));e;)if(!(this.options.extensions&&this.options.extensions.block&&this.options.extensions.block.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.space(e))e=e.substring(n.raw.length),1===n.raw.length&&t.length>0?t[t.length-1].raw+="\n":t.push(n);else if(n=this.tokenizer.code(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?t.push(n):(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.fences(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.heading(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.hr(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.blockquote(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.list(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.html(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.def(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?this.tokens.links[n.tag]||(this.tokens.links[n.tag]={href:n.href,title:n.title}):(s.raw+="\n"+n.raw,s.text+="\n"+n.raw,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.table(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.lheading(e))e=e.substring(n.raw.length),t.push(n);else{if(r=e,this.options.extensions&&this.options.extensions.startBlock){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startBlock.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(this.state.top&&(n=this.tokenizer.paragraph(r)))s=t[t.length-1],i&&"paragraph"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n),i=r.length!==e.length,e=e.substring(n.raw.length);else if(n=this.tokenizer.text(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}return this.state.top=!0,t}inline(e,t=[]){return this.inlineQueue.push({src:e,tokens:t}),t}inlineTokens(e,t=[]){let n,s,r,i,l,o,a=e;if(this.tokens.links){const e=Object.keys(this.tokens.links);if(e.length>0)for(;null!=(i=this.tokenizer.rules.inline.reflinkSearch.exec(a));)e.includes(i[0].slice(i[0].lastIndexOf("[")+1,-1))&&(a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex))}for(;null!=(i=this.tokenizer.rules.inline.blockSkip.exec(a));)a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);for(;null!=(i=this.tokenizer.rules.inline.anyPunctuation.exec(a));)a=a.slice(0,i.index)+"++"+a.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);for(;e;)if(l||(o=""),l=!1,!(this.options.extensions&&this.options.extensions.inline&&this.options.extensions.inline.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.escape(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.tag(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.link(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.reflink(e,this.tokens.links))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.emStrong(e,a,o))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.codespan(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.br(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.del(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.autolink(e))e=e.substring(n.raw.length),t.push(n);else if(this.state.inLink||!(n=this.tokenizer.url(e))){if(r=e,this.options.extensions&&this.options.extensions.startInline){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startInline.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(n=this.tokenizer.inlineText(r))e=e.substring(n.raw.length),"_"!==n.raw.slice(-1)&&(o=n.raw.slice(-1)),l=!0,s=t[t.length-1],s&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}else e=e.substring(n.raw.length),t.push(n);return t}}class se{options;constructor(t){this.options=t||e.defaults}code(e,t,n){const s=(t||"").match(/^\S*/)?.[0];return e=e.replace(/\n$/,"")+"\n",s?'<pre><code class="language-'+c(s)+'">'+(n?e:c(e,!0))+"</code></pre>\n":"<pre><code>"+(n?e:c(e,!0))+"</code></pre>\n"}blockquote(e){return`<blockquote>\n${e}</blockquote>\n`}html(e,t){return e}heading(e,t,n){return`<h${t}>${e}</h${t}>\n`}hr(){return"<hr>\n"}list(e,t,n){const s=t?"ol":"ul";return"<"+s+(t&&1!==n?' start="'+n+'"':"")+">\n"+e+"</"+s+">\n"}listitem(e,t,n){return`<li>${e}</li>\n`}checkbox(e){return"<input "+(e?'checked="" ':"")+'disabled="" type="checkbox">'}paragraph(e){return`<p>${e}</p>\n`}table(e,t){return t&&(t=`<tbody>${t}</tbody>`),"<table>\n<thead>\n"+e+"</thead>\n"+t+"</table>\n"}tablerow(e){return`<tr>\n${e}</tr>\n`}tablecell(e,t){const n=t.header?"th":"td";return(t.align?`<${n} align="${t.align}">`:`<${n}>`)+e+`</${n}>\n`}strong(e){return`<strong>${e}</strong>`}em(e){return`<em>${e}</em>`}codespan(e){return`<code>${e}</code>`}br(){return"<br>"}del(e){return`<del>${e}</del>`}link(e,t,n){const s=g(e);if(null===s)return n;let r='<a href="'+(e=s)+'"';return t&&(r+=' title="'+t+'"'),r+=">"+n+"</a>",r}image(e,t,n){const s=g(e);if(null===s)return n;let r=`<img src="${e=s}" alt="${n}"`;return t&&(r+=` title="${t}"`),r+=">",r}text(e){return e}}class re{strong(e){return e}em(e){return e}codespan(e){return e}del(e){return e}html(e){return e}text(e){return e}link(e,t,n){return""+n}image(e,t,n){return""+n}br(){return""}}class ie{options;renderer;textRenderer;constructor(t){this.options=t||e.defaults,this.options.renderer=this.options.renderer||new se,this.renderer=this.options.renderer,this.renderer.options=this.options,this.textRenderer=new re}static parse(e,t){return new ie(t).parse(e)}static parseInline(e,t){return new ie(t).parseInline(e)}parse(e,t=!0){let n="";for(let s=0;s<e.length;s++){const r=e[s];if(this.options.extensions&&this.options.extensions.renderers&&this.options.extensions.renderers[r.type]){const e=r,t=this.options.extensions.renderers[e.type].call({parser:this},e);if(!1!==t||!["space","hr","heading","code","table","blockquote","list","html","paragraph","text"].includes(e.type)){n+=t||"";continue}}switch(r.type){case"space":continue;case"hr":n+=this.renderer.hr();continue;case"heading":{const e=r;n+=this.renderer.heading(this.parseInline(e.tokens),e.depth,p(this.parseInline(e.tokens,this.textRenderer)));continue}case"code":{const e=r;n+=this.renderer.code(e.text,e.lang,!!e.escaped);continue}case"table":{const e=r;let t="",s="";for(let t=0;t<e.header.length;t++)s+=this.renderer.tablecell(this.parseInline(e.header[t].tokens),{header:!0,align:e.align[t]});t+=this.renderer.tablerow(s);let i="";for(let t=0;t<e.rows.length;t++){const n=e.rows[t];s="";for(let t=0;t<n.length;t++)s+=this.renderer.tablecell(this.parseInline(n[t].tokens),{header:!1,align:e.align[t]});i+=this.renderer.tablerow(s)}n+=this.renderer.table(t,i);continue}case"blockquote":{const e=r,t=this.parse(e.tokens);n+=this.renderer.blockquote(t);continue}case"list":{const e=r,t=e.ordered,s=e.start,i=e.loose;let l="";for(let t=0;t<e.items.length;t++){const n=e.items[t],s=n.checked,r=n.task;let o="";if(n.task){const e=this.renderer.checkbox(!!s);i?n.tokens.length>0&&"paragraph"===n.tokens[0].type?(n.tokens[0].text=e+" "+n.tokens[0].text,n.tokens[0].tokens&&n.tokens[0].tokens.length>0&&"text"===n.tokens[0].tokens[0].type&&(n.tokens[0].tokens[0].text=e+" "+n.tokens[0].tokens[0].text)):n.tokens.unshift({type:"text",text:e+" "}):o+=e+" "}o+=this.parse(n.tokens,i),l+=this.renderer.listitem(o,r,!!s)}n+=this.renderer.list(l,t,s);continue}case"html":{const e=r;n+=this.renderer.html(e.text,e.block);continue}case"paragraph":{const e=r;n+=this.renderer.paragraph(this.parseInline(e.tokens));continue}case"text":{let i=r,l=i.tokens?this.parseInline(i.tokens):i.text;for(;s+1<e.length&&"text"===e[s+1].type;)i=e[++s],l+="\n"+(i.tokens?this.parseInline(i.tokens):i.text);n+=t?this.renderer.paragraph(l):l;continue}default:{const e='Token with "'+r.type+'" type was not found.';if(this.options.silent)return console.error(e),"";throw new Error(e)}}}return n}parseInline(e,t){t=t||this.renderer;let n="";for(let s=0;s<e.length;s++){const r=e[s];if(this.options.extensions&&this.options.extensions.renderers&&this.options.extensions.renderers[r.type]){const e=this.options.extensions.renderers[r.type].call({parser:this},r);if(!1!==e||!["escape","html","link","image","strong","em","codespan","br","del","text"].includes(r.type)){n+=e||"";continue}}switch(r.type){case"escape":{const e=r;n+=t.text(e.text);break}case"html":{const e=r;n+=t.html(e.text);break}case"link":{const e=r;n+=t.link(e.href,e.title,this.parseInline(e.tokens,t));break}case"image":{const e=r;n+=t.image(e.href,e.title,e.text);break}case"strong":{const e=r;n+=t.strong(this.parseInline(e.tokens,t));break}case"em":{const e=r;n+=t.em(this.parseInline(e.tokens,t));break}case"codespan":{const e=r;n+=t.codespan(e.text);break}case"br":n+=t.br();break;case"del":{const e=r;n+=t.del(this.parseInline(e.tokens,t));break}case"text":{const e=r;n+=t.text(e.text);break}default:{const e='Token with "'+r.type+'" type was not found.';if(this.options.silent)return console.error(e),"";throw new Error(e)}}}return n}}class le{options;constructor(t){this.options=t||e.defaults}static passThroughHooks=new Set(["preprocess","postprocess","processAllTokens"]);preprocess(e){return e}postprocess(e){return e}processAllTokens(e){return e}}class oe{defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};options=this.setOptions;parse=this.#e(ne.lex,ie.parse);parseInline=this.#e(ne.lexInline,ie.parseInline);Parser=ie;Renderer=se;TextRenderer=re;Lexer=ne;Tokenizer=w;Hooks=le;constructor(...e){this.use(...e)}walkTokens(e,t){let n=[];for(const s of e)switch(n=n.concat(t.call(this,s)),s.type){case"table":{const e=s;for(const s of e.header)n=n.concat(this.walkTokens(s.tokens,t));for(const s of e.rows)for(const e of s)n=n.concat(this.walkTokens(e.tokens,t));break}case"list":{const e=s;n=n.concat(this.walkTokens(e.items,t));break}default:{const e=s;this.defaults.extensions?.childTokens?.[e.type]?this.defaults.extensions.childTokens[e.type].forEach((s=>{n=n.concat(this.walkTokens(e[s],t))})):e.tokens&&(n=n.concat(this.walkTokens(e.tokens,t)))}}return n}use(...e){const t=this.defaults.extensions||{renderers:{},childTokens:{}};return e.forEach((e=>{const n={...e};if(n.async=this.defaults.async||n.async||!1,e.extensions&&(e.extensions.forEach((e=>{if(!e.name)throw new Error("extension name required");if("renderer"in e){const n=t.renderers[e.name];t.renderers[e.name]=n?function(...t){let s=e.renderer.apply(this,t);return!1===s&&(s=n.apply(this,t)),s}:e.renderer}if("tokenizer"in e){if(!e.level||"block"!==e.level&&"inline"!==e.level)throw new Error("extension level must be 'block' or 'inline'");const n=t[e.level];n?n.unshift(e.tokenizer):t[e.level]=[e.tokenizer],e.start&&("block"===e.level?t.startBlock?t.startBlock.push(e.start):t.startBlock=[e.start]:"inline"===e.level&&(t.startInline?t.startInline.push(e.start):t.startInline=[e.start]))}"childTokens"in e&&e.childTokens&&(t.childTokens[e.name]=e.childTokens)})),n.extensions=t),e.renderer){const t=this.defaults.renderer||new se(this.defaults);for(const n in e.renderer){if(!(n in t))throw new Error(`renderer '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.renderer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n||""}}n.renderer=t}if(e.tokenizer){const t=this.defaults.tokenizer||new w(this.defaults);for(const n in e.tokenizer){if(!(n in t))throw new Error(`tokenizer '${n}' does not exist`);if(["options","rules","lexer"].includes(n))continue;const s=n,r=e.tokenizer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.tokenizer=t}if(e.hooks){const t=this.defaults.hooks||new le;for(const n in e.hooks){if(!(n in t))throw new Error(`hook '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.hooks[s],i=t[s];le.passThroughHooks.has(n)?t[s]=e=>{if(this.defaults.async)return Promise.resolve(r.call(t,e)).then((e=>i.call(t,e)));const n=r.call(t,e);return i.call(t,n)}:t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.hooks=t}if(e.walkTokens){const t=this.defaults.walkTokens,s=e.walkTokens;n.walkTokens=function(e){let n=[];return n.push(s.call(this,e)),t&&(n=n.concat(t.call(this,e))),n}}this.defaults={...this.defaults,...n}})),this}setOptions(e){return this.defaults={...this.defaults,...e},this}lexer(e,t){return ne.lex(e,t??this.defaults)}parser(e,t){return ie.parse(e,t??this.defaults)}#e(e,t){return(n,s)=>{const r={...s},i={...this.defaults,...r};!0===this.defaults.async&&!1===r.async&&(i.silent||console.warn("marked(): The async option was set to true by an extension. The async: false option sent to parse will be ignored."),i.async=!0);const l=this.#t(!!i.silent,!!i.async);if(null==n)return l(new Error("marked(): input parameter is undefined or null"));if("string"!=typeof n)return l(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(n)+", string expected"));if(i.hooks&&(i.hooks.options=i),i.async)return Promise.resolve(i.hooks?i.hooks.preprocess(n):n).then((t=>e(t,i))).then((e=>i.hooks?i.hooks.processAllTokens(e):e)).then((e=>i.walkTokens?Promise.all(this.walkTokens(e,i.walkTokens)).then((()=>e)):e)).then((e=>t(e,i))).then((e=>i.hooks?i.hooks.postprocess(e):e)).catch(l);try{i.hooks&&(n=i.hooks.preprocess(n));let s=e(n,i);i.hooks&&(s=i.hooks.processAllTokens(s)),i.walkTokens&&this.walkTokens(s,i.walkTokens);let r=t(s,i);return i.hooks&&(r=i.hooks.postprocess(r)),r}catch(e){return l(e)}}}#t(e,t){return n=>{if(n.message+="\nPlease report this to https://github.com/markedjs/marked.",e){const e="<p>An error occurred:</p><pre>"+c(n.message+"",!0)+"</pre>";return t?Promise.resolve(e):e}if(t)return Promise.reject(n);throw n}}}const ae=new oe;function ce(e,t){return ae.parse(e,t)}ce.options=ce.setOptions=function(e){return ae.setOptions(e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.getDefaults=t,ce.defaults=e.defaults,ce.use=function(...e){return ae.use(...e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.walkTokens=function(e,t){return ae.walkTokens(e,t)},ce.parseInline=ae.parseInline,ce.Parser=ie,ce.parser=ie.parse,ce.Renderer=se,ce.TextRenderer=re,ce.Lexer=ne,ce.lexer=ne.lex,ce.Tokenizer=w,ce.Hooks=le,ce.parse=ce;const he=ce.options,pe=ce.setOptions,ue=ce.use,ke=ce.walkTokens,ge=ce.parseInline,fe=ce,de=ie.parse,xe=ne.lex;e.Hooks=le,e.Lexer=ne,e.Marked=oe,e.Parser=ie,e.Renderer=se,e.TextRenderer=re,e.Tokenizer=w,e.getDefaults=t,e.lexer=xe,e.marked=ce,e.options=he,e.parse=fe,e.parseInline=ge,e.parser=de,e.setOptions=pe,e.use=ue,e.walkTokens=ke}));
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
sidebar/sidebar.css | CSS | * {
margin: 0;
padding: 0;
box-sizing: border-box;
}
/* Material Symbols Icon Utilities */
.material-symbols-outlined {
font-family: 'Material Symbols Outlined';
font-weight: normal;
font-style: normal;
font-size: 20px;
line-height: 1;
letter-spacing: normal;
text-transform: none;
display: inline-block;
white-space: nowrap;
word-wrap: normal;
direction: ltr;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
text-rendering: optimizeLegibility;
font-feature-settings: 'liga';
transition: font-variation-settings 0.2s ease;
/* Default variable axes */
font-variation-settings:
'FILL' 0,
'wght' 400,
'GRAD' 0,
'opsz' 24;
}
/* Theme-aware gradient adjustment for Material Symbols */
[data-theme="light"] .material-symbols-outlined,
body:not([data-theme]) .material-symbols-outlined {
font-variation-settings:
'FILL' 0,
'wght' 400,
'GRAD' 0,
'opsz' 24;
}
[data-theme="dark"] .material-symbols-outlined {
font-variation-settings:
'FILL' 0,
'wght' 400,
'GRAD' -25,
'opsz' 24;
}
/* Filled state for icons (e.g., filled star for favorites) */
.material-symbols-outlined.filled {
font-variation-settings:
'FILL' 1,
'wght' 400,
'GRAD' 0,
'opsz' 24;
}
[data-theme="dark"] .material-symbols-outlined.filled {
font-variation-settings:
'FILL' 1,
'wght' 400,
'GRAD' -25,
'opsz' 24;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
width: 100%;
height: 100vh;
overflow: hidden;
background: #ffffff;
color: #333333;
}
#app {
display: flex;
flex-direction: column;
height: 100vh;
}
#provider-container {
flex: 1;
position: relative;
overflow: hidden;
background: #f5f5f5;
}
#provider-container iframe {
width: 100%;
height: 100%;
border: none;
display: block;
}
#loading, #error {
display: flex;
align-items: center;
justify-content: center;
height: 100%;
padding: 20px;
text-align: center;
font-size: 14px;
}
#loading {
color: #666;
}
#error {
color: #d32f2f;
background: #ffebee;
border: 1px solid #ffcdd2;
border-radius: 4px;
margin: 20px;
height: auto;
}
#provider-tabs {
display: flex;
border-top: 1px solid #e0e0e0;
background: #f5f5f5;
overflow-x: auto;
flex-shrink: 0;
}
#provider-tabs button {
flex: 0 0 48px;
width: 48px;
padding: 10px 8px;
border: none;
background: transparent;
cursor: pointer;
transition: background 0.2s, border-top 0.2s, opacity 0.2s;
font-size: 12px;
color: #666;
border-top: 2px solid transparent;
display: flex;
align-items: center;
justify-content: center;
opacity: 0.6;
}
.provider-icon {
width: 24px;
height: 24px;
object-fit: contain;
display: block;
}
#provider-tabs button:hover {
background: #eeeeee;
color: #333;
opacity: 1;
}
#provider-tabs button.active {
background: #ffffff;
border-top: 2px solid #1a73e8;
color: #1a73e8;
font-weight: 500;
opacity: 1;
}
.tab-separator {
flex: 1;
width: 1px;
background: transparent;
margin: 8px 4px;
}
/* UI tabs (chat history, prompt library, settings) - fixed width on right */
#provider-tabs button[data-view],
#provider-tabs button#settings-tab {
flex: 0 0 48px;
max-width: 48px;
}
/* Dark theme */
[data-theme="dark"] {
background: #1e1e1e;
color: #e0e0e0;
}
[data-theme="dark"] body {
background: #1e1e1e;
color: #e0e0e0;
}
[data-theme="dark"] #provider-container {
background: #2d2d2d;
}
[data-theme="dark"] #loading {
color: #aaa;
}
[data-theme="dark"] #error {
color: #ff6b6b;
background: #3a2a2a;
border-color: #5a3a3a;
}
[data-theme="dark"] #provider-tabs {
background: #2d2d2d;
border-top-color: #444;
}
[data-theme="dark"] #provider-tabs button {
color: #aaa;
opacity: 0.6;
}
[data-theme="dark"] #provider-tabs button:hover {
background: #3a3a3a;
color: #e0e0e0;
opacity: 1;
}
[data-theme="dark"] #provider-tabs button.active {
background: #1e1e1e;
border-top-color: #4285f4;
color: #4285f4;
opacity: 1;
}
/* Dark theme icons - invert to make dark icons appear light */
[data-theme="dark"] .provider-icon {
filter: invert(1) brightness(1.2);
}
[data-theme="dark"] .tab-separator {
background: transparent;
}
/* T045: Prompt Library Styles */
#prompt-library {
display: flex;
flex-direction: column;
height: 100%;
padding: 16px;
overflow: hidden;
}
#prompt-library-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 16px;
}
#prompt-library-header h2 {
font-size: 20px;
font-weight: 600;
color: #333;
margin: 0;
}
#prompt-filters {
display: flex;
flex-direction: column;
gap: 8px;
margin-bottom: 16px;
}
.filter-row-top {
display: flex;
gap: 8px;
align-items: center;
}
.filter-row-bottom {
display: flex;
gap: 8px;
align-items: center;
}
#prompt-search {
flex: 1;
padding: 8px 12px;
border: 1px solid #e0e0e0;
border-radius: 4px;
font-size: 14px;
}
#category-filter-wrapper {
position: relative;
}
#category-filter-btn {
padding: 8px 12px;
border: none;
border-radius: 4px;
background: transparent;
cursor: pointer;
transition: background 0.2s, opacity 0.2s;
min-width: 44px;
opacity: 0.7;
}
/* All icons in category filter wrapper (button and popup) */
#category-filter-wrapper .material-symbols-outlined {
font-size: 18px !important;
}
#category-filter-btn:hover {
background: #f5f5f5;
opacity: 1;
}
#category-filter-btn.active {
background: #e3f2fd;
opacity: 1;
}
.category-popup {
position: absolute;
top: 100%;
left: 0;
margin-top: 4px;
background: white;
border: 1px solid #e0e0e0;
border-radius: 4px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
z-index: 100;
min-width: 200px;
max-height: calc(100vh - 200px);
overflow-y: auto;
}
.category-popup-item {
padding: 10px 12px;
cursor: pointer;
font-size: 14px;
color: #333;
transition: background 0.2s;
}
.category-popup-item:hover {
background: #f5f5f5;
}
.category-popup-item.selected {
background: #e3f2fd;
color: #1976d2;
font-weight: 500;
}
#sort-buttons {
display: flex;
gap: 4px;
flex: 1;
justify-content: flex-end;
}
.sort-btn {
padding: 8px 10px;
border: none;
border-radius: 4px;
background: transparent;
cursor: pointer;
transition: background 0.2s, opacity 0.2s;
min-width: 36px;
opacity: 0.7;
}
.sort-btn .material-symbols-outlined {
font-size: 18px;
}
.sort-btn:hover {
background: #f5f5f5;
opacity: 1;
}
.sort-btn.active {
background: #e3f2fd;
color: #1976d2;
opacity: 1;
}
#show-favorites {
padding: 8px 10px;
border: none;
border-radius: 4px;
background: transparent;
cursor: pointer;
transition: background 0.2s, opacity 0.2s;
min-width: 36px;
opacity: 0.7;
}
#show-favorites .material-symbols-outlined {
font-size: 18px;
}
#show-favorites:hover {
background: #f5f5f5;
opacity: 1;
}
#show-favorites.active {
background: #fff9c4;
color: #f57c00;
opacity: 1;
}
/* Workspace header */
#workspace-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 16px;
}
#workspace-header h2 {
font-size: 20px;
font-weight: 600;
color: #333;
margin: 0;
}
/* Workspace for editing received text */
#prompt-workspace {
background: #f9f9f9;
border: 1px solid #e0e0e0;
border-radius: 4px;
padding: 12px;
margin-bottom: 16px;
}
.workspace-actions {
display: flex;
justify-content: space-between;
align-items: center;
gap: 12px;
margin-top: 8px;
}
.workspace-actions-left {
display: flex;
gap: 6px;
align-items: center;
}
.workspace-actions-right {
display: flex;
gap: 6px;
align-items: center;
}
#workspace-provider-wrapper {
position: relative;
}
#workspace-provider-btn {
padding: 6px 10px;
border: none;
border-radius: 4px;
background: transparent;
cursor: pointer;
transition: background 0.2s, opacity 0.2s;
display: flex;
align-items: center;
justify-content: center;
min-width: 36px;
opacity: 0.8;
}
#workspace-provider-btn:hover {
background: #f5f5f5;
opacity: 1;
}
#workspace-provider-btn.active {
background: #e3f2fd;
opacity: 1;
}
.provider-icon-small {
width: 24px;
height: 24px;
object-fit: contain;
display: block;
}
.workspace-provider-popup {
position: absolute;
bottom: 100%;
left: 0;
margin-bottom: 4px;
background: white;
border: 1px solid #e0e0e0;
border-radius: 4px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
z-index: 100;
min-width: 150px;
max-height: calc(100vh - 200px);
overflow-y: auto;
}
.workspace-provider-popup-item {
display: flex;
align-items: center;
gap: 10px;
padding: 10px 12px;
cursor: pointer;
font-size: 13px;
color: #333;
transition: background 0.2s;
}
.workspace-provider-popup-item:hover {
background: #f5f5f5;
}
.workspace-provider-popup-item.selected {
background: #e3f2fd;
color: #1976d2;
font-weight: 500;
}
.workspace-actions button {
padding: 6px 10px;
border: none;
border-radius: 4px;
background: transparent;
cursor: pointer;
font-size: 12px;
color: #666;
transition: background 0.2s, opacity 0.2s;
}
.workspace-actions button:hover {
background: #f5f5f5;
opacity: 1;
}
.workspace-actions .material-symbols-outlined {
font-size: 20px;
}
#workspace-send-btn {
padding: 6px 10px;
border: none;
border-radius: 4px;
background: #1a73e8;
color: white;
cursor: pointer;
transition: background 0.2s;
display: flex;
align-items: center;
justify-content: center;
min-width: 36px;
}
#workspace-send-btn:hover {
background: #1557b0;
}
#prompt-workspace-text {
width: 100%;
min-height: 320px;
max-height: 500px;
padding: 10px;
border: 1px solid #e0e0e0;
border-radius: 4px;
font-size: 14px;
font-family: inherit;
line-height: 1.6;
resize: vertical;
background: white;
color: #333;
overflow-y: auto;
}
#prompt-list {
flex: 1;
overflow-y: auto;
display: flex;
flex-direction: column;
gap: 8px;
}
.prompt-item {
padding: 12px;
border: 1px solid #e0e0e0;
border-radius: 4px;
background: white;
cursor: pointer;
transition: border-color 0.2s, box-shadow 0.2s;
}
.prompt-item:hover {
border-color: #1a73e8;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.prompt-item-header {
display: flex;
justify-content: space-between;
align-items: start;
margin-bottom: 8px;
}
.prompt-item-title {
font-weight: 500;
font-size: 14px;
color: #333;
margin: 0;
}
.prompt-item-actions {
display: flex;
gap: 4px;
}
.prompt-item-actions button {
padding: 4px 6px;
border: none;
background: transparent;
cursor: pointer;
font-size: 14px;
opacity: 0.6;
transition: opacity 0.2s;
}
.prompt-item-actions button:hover {
opacity: 1;
}
.prompt-item-actions .material-symbols-outlined {
font-size: 18px;
}
.prompt-item-content {
font-size: 12px;
color: #666;
line-height: 1.4;
margin-bottom: 8px;
overflow: hidden;
text-overflow: ellipsis;
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
}
.prompt-item-meta {
display: flex;
gap: 8px;
font-size: 11px;
color: #999;
}
.prompt-item-category {
padding: 2px 6px;
background: #f5f5f5;
border-radius: 3px;
}
.prompt-item-tags {
display: flex;
gap: 4px;
flex-wrap: wrap;
}
.prompt-item-tag {
padding: 2px 6px;
background: #e3f2fd;
color: #1976d2;
border-radius: 3px;
}
/* T046: Modal Styles */
.modal {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0, 0, 0, 0.5);
display: flex;
align-items: center;
justify-content: center;
z-index: 1000;
}
.modal-content {
background: white;
border-radius: 8px;
width: 90%;
max-width: 600px;
max-height: 90vh;
display: flex;
flex-direction: column;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3);
}
/* View conversation modal - larger size (80% width and height) */
#view-conversation-modal .modal-content {
width: 80%;
max-width: none;
height: 80vh;
max-height: 80vh;
}
.modal-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 16px 20px;
border-bottom: 1px solid #e0e0e0;
}
.modal-header h3 {
margin: 0;
font-size: 18px;
color: #333;
}
.modal-header-actions {
display: flex;
gap: 8px;
align-items: center;
}
.header-action-btn {
background: none;
border: none;
cursor: pointer;
color: #666;
padding: 4px;
display: flex;
align-items: center;
justify-content: center;
transition: color 0.2s;
}
.header-action-btn:hover {
color: #333;
}
[data-theme="dark"] .header-action-btn {
color: #999;
}
[data-theme="dark"] .header-action-btn:hover {
color: #e0e0e0;
}
.close-btn {
background: none;
border: none;
font-size: 24px;
cursor: pointer;
color: #999;
padding: 0;
width: 28px;
height: 28px;
display: flex;
align-items: center;
justify-content: center;
border-radius: 4px;
transition: background 0.2s;
}
.close-btn:hover {
background: #f5f5f5;
color: #333;
}
.modal-body {
padding: 20px;
overflow-y: auto;
display: flex;
flex-direction: column;
gap: 12px;
}
.modal-body input[type="text"],
.modal-body textarea {
width: 100%;
padding: 10px;
border: 1px solid #e0e0e0;
border-radius: 4px;
font-size: 14px;
font-family: inherit;
}
.modal-body textarea {
min-height: 150px;
resize: vertical;
}
.modal-body label {
display: flex;
align-items: center;
gap: 8px;
font-size: 14px;
color: #666;
cursor: pointer;
}
.modal-footer {
display: flex;
justify-content: flex-end;
gap: 8px;
padding: 16px 20px;
border-top: 1px solid #e0e0e0;
}
.modal-footer button {
padding: 8px;
border: none;
background: transparent;
cursor: pointer;
transition: opacity 0.2s;
color: #666;
opacity: 0.7;
display: flex;
align-items: center;
justify-content: center;
}
.modal-footer button:hover {
opacity: 1;
}
.modal-footer button .material-symbols-outlined {
font-size: 24px;
}
/* Dark theme for Prompt Library */
[data-theme="dark"] #prompt-library-header h2 {
color: #e0e0e0;
}
[data-theme="dark"] #prompt-search {
background: #2d2d2d;
border-color: #444;
color: #e0e0e0;
}
[data-theme="dark"] #category-filter-btn {
background: transparent;
}
[data-theme="dark"] #category-filter-btn:hover {
background: #3a3a3a;
}
[data-theme="dark"] #category-filter-btn.active {
background: #1a3a5a;
}
[data-theme="dark"] .category-popup {
background: #2d2d2d;
border-color: #444;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.5);
}
[data-theme="dark"] .category-popup-item {
color: #e0e0e0;
}
[data-theme="dark"] .category-popup-item:hover {
background: #3a3a3a;
}
[data-theme="dark"] .category-popup-item.selected {
background: #1a3a5a;
color: #64b5f6;
}
[data-theme="dark"] .sort-btn {
background: transparent;
color: #e0e0e0;
}
[data-theme="dark"] .sort-btn:hover {
background: #3a3a3a;
}
[data-theme="dark"] .sort-btn.active {
background: #1a3a5a;
color: #64b5f6;
}
[data-theme="dark"] #show-favorites {
background: transparent;
color: #e0e0e0;
}
[data-theme="dark"] #show-favorites:hover {
background: #3a3a3a;
}
[data-theme="dark"] #show-favorites.active {
background: #4a3a00;
color: #fbc02d;
}
[data-theme="dark"] #workspace-header h2 {
color: #e0e0e0;
}
[data-theme="dark"] #prompt-workspace {
background: #2d2d2d;
border-color: #444;
}
[data-theme="dark"] #workspace-provider-btn {
background: transparent;
}
[data-theme="dark"] #workspace-provider-btn:hover {
background: #3a3a3a;
}
[data-theme="dark"] #workspace-provider-btn.active {
background: #1a3a5a;
}
[data-theme="dark"] .provider-icon-small {
filter: invert(1) brightness(1.2);
}
[data-theme="dark"] .workspace-provider-popup {
background: #2d2d2d;
border-color: #444;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.5);
}
[data-theme="dark"] .workspace-provider-popup-item {
color: #e0e0e0;
}
[data-theme="dark"] .workspace-provider-popup-item:hover {
background: #3a3a3a;
}
[data-theme="dark"] .workspace-provider-popup-item.selected {
background: #1a3a5a;
color: #64b5f6;
}
[data-theme="dark"] .workspace-actions button {
background: transparent;
color: #aaa;
}
[data-theme="dark"] .workspace-actions button:hover {
background: #3a3a3a;
}
[data-theme="dark"] #workspace-send-btn {
background: #1a73e8;
color: white;
}
[data-theme="dark"] #workspace-send-btn:hover {
background: #1557b0;
}
[data-theme="dark"] #prompt-workspace-text {
background: #1e1e1e;
border-color: #444;
color: #e0e0e0;
}
[data-theme="dark"] .prompt-item {
background: #2d2d2d;
border-color: #444;
}
[data-theme="dark"] .prompt-item:hover {
border-color: #4285f4;
}
[data-theme="dark"] .prompt-item-title {
color: #e0e0e0;
}
[data-theme="dark"] .prompt-item-content {
color: #aaa;
}
[data-theme="dark"] .prompt-item-category {
background: #3a3a3a;
color: #aaa;
}
[data-theme="dark"] .prompt-item-tag {
background: #1a3a5a;
color: #64b5f6;
}
[data-theme="dark"] .prompt-item-actions button {
color: #e0e0e0;
}
[data-theme="dark"] .modal-content {
background: #2d2d2d;
}
[data-theme="dark"] .modal-header {
border-bottom-color: #444;
}
[data-theme="dark"] .modal-header h3 {
color: #e0e0e0;
}
[data-theme="dark"] .modal-body input[type="text"],
[data-theme="dark"] .modal-body textarea {
background: #1e1e1e;
border-color: #444;
color: #e0e0e0;
}
[data-theme="dark"] .modal-body label {
color: #aaa;
}
[data-theme="dark"] .modal-footer {
border-top-color: #444;
}
[data-theme="dark"] .modal-footer button {
background: transparent;
color: #aaa;
}
[data-theme="dark"] .modal-footer button:hover {
opacity: 1;
}
/* Empty state */
.prompt-list-empty {
text-align: center;
padding: 40px 20px;
color: #999;
}
.prompt-list-empty p {
margin: 8px 0;
font-size: 14px;
}
/* Settings View Styles */
#settings-view {
display: flex;
flex-direction: column;
height: 100%;
overflow: hidden;
}
#settings-header {
padding: 16px;
border-bottom: 1px solid #e0e0e0;
}
#settings-header h2 {
font-size: 20px;
font-weight: 600;
color: #333;
margin: 0;
}
#settings-content {
flex: 1;
overflow-y: auto;
padding: 16px;
}
.settings-section {
margin-bottom: 24px;
padding: 16px;
background: white;
border: 1px solid #e0e0e0;
border-radius: 8px;
}
.settings-section h3 {
font-size: 16px;
font-weight: 600;
color: #333;
margin: 0 0 16px 0;
}
.setting-item {
margin-bottom: 16px;
}
.setting-item:last-child {
margin-bottom: 0;
}
.setting-item label {
display: block;
font-size: 14px;
font-weight: 500;
color: #666;
margin-bottom: 8px;
}
.setting-item select {
width: 100%;
padding: 10px;
border: 1px solid #e0e0e0;
border-radius: 4px;
font-size: 14px;
background: white;
cursor: pointer;
}
.setting-description {
font-size: 12px;
color: #999;
margin: 0 0 12px 0;
line-height: 1.4;
}
.checkbox-label {
display: flex !important;
align-items: center;
gap: 8px;
padding: 8px 0;
font-size: 14px !important;
font-weight: 400 !important;
color: #333 !important;
cursor: pointer;
margin-bottom: 0 !important;
}
.checkbox-label input[type="checkbox"] {
width: 18px;
height: 18px;
cursor: pointer;
}
.settings-actions {
display: flex;
gap: 12px;
padding: 16px;
border-top: 1px solid #e0e0e0;
background: #f5f5f5;
}
.settings-actions button {
flex: 1;
padding: 10px 16px;
border: none;
border-radius: 4px;
font-size: 14px;
font-weight: 500;
cursor: pointer;
transition: background 0.2s;
}
.settings-actions button.primary {
background: #1a73e8;
color: white;
}
.settings-actions button.primary:hover {
background: #1557b0;
}
.settings-actions button.secondary {
background: white;
color: #666;
border: 1px solid #e0e0e0;
}
.settings-actions button.secondary:hover {
background: #f5f5f5;
}
/* Dark theme for Settings */
[data-theme="dark"] #settings-header {
border-bottom-color: #444;
}
[data-theme="dark"] #settings-header h2 {
color: #e0e0e0;
}
[data-theme="dark"] #settings-content {
background: #1e1e1e;
}
[data-theme="dark"] .settings-section {
background: #2d2d2d;
border-color: #444;
}
[data-theme="dark"] .settings-section h3 {
color: #e0e0e0;
}
[data-theme="dark"] .setting-item label {
color: #aaa;
}
[data-theme="dark"] .setting-item select {
background: #1e1e1e;
border-color: #444;
color: #e0e0e0;
}
[data-theme="dark"] .setting-description {
color: #777;
}
[data-theme="dark"] .checkbox-label {
color: #e0e0e0 !important;
}
[data-theme="dark"] .settings-actions {
background: #2d2d2d;
border-top-color: #444;
}
[data-theme="dark"] .settings-actions button.secondary {
background: #1e1e1e;
border-color: #444;
color: #aaa;
}
[data-theme="dark"] .settings-actions button.secondary:hover {
background: #3a3a3a;
}
/* T071: Quick Access Panel Styles */
#quick-access-panel {
margin-bottom: 16px;
border: 1px solid #e0e0e0;
border-radius: 4px;
background: #fafafa;
overflow: hidden;
}
.quick-access-section {
border-bottom: 1px solid #e0e0e0;
}
.quick-access-section:last-child {
border-bottom: none;
}
.quick-access-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 10px 12px;
background: #f5f5f5;
cursor: pointer;
user-select: none;
}
.quick-access-header:hover {
background: #eeeeee;
}
.quick-access-title {
font-size: 13px;
font-weight: 600;
color: #333;
}
.quick-access-toggle {
background: none;
border: none;
font-size: 16px;
font-weight: bold;
color: #666;
cursor: pointer;
padding: 0;
width: 20px;
height: 20px;
display: flex;
align-items: center;
justify-content: center;
}
.quick-access-toggle:hover {
color: #333;
}
.quick-access-items {
display: flex;
gap: 8px;
padding: 10px;
overflow-x: auto;
overflow-y: hidden;
scroll-behavior: smooth;
}
.quick-access-items::-webkit-scrollbar {
height: 6px;
}
.quick-access-items::-webkit-scrollbar-thumb {
background: #ccc;
border-radius: 3px;
}
.quick-access-items::-webkit-scrollbar-thumb:hover {
background: #aaa;
}
.quick-access-items.collapsed {
display: none;
}
.quick-access-item {
flex-shrink: 0;
min-width: 180px;
max-width: 220px;
padding: 10px;
background: white;
border: 1px solid #e0e0e0;
border-radius: 4px;
cursor: pointer;
transition: border-color 0.2s, box-shadow 0.2s;
}
.quick-access-item:hover {
border-color: #1a73e8;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.quick-access-item-title {
font-size: 12px;
font-weight: 500;
color: #333;
margin-bottom: 6px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.quick-access-item-content {
font-size: 11px;
color: #666;
line-height: 1.3;
overflow: hidden;
text-overflow: ellipsis;
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
}
.quick-access-item-meta {
font-size: 10px;
color: #999;
margin-top: 6px;
display: flex;
gap: 8px;
align-items: center;
}
.quick-access-empty {
padding: 10px;
text-align: center;
font-size: 12px;
color: #999;
font-style: italic;
}
[data-theme="dark"] #quick-access-panel {
background: #2d2d2d;
border-color: #444;
}
[data-theme="dark"] .quick-access-section {
border-bottom-color: #444;
}
[data-theme="dark"] .quick-access-header {
background: #3a3a3a;
}
[data-theme="dark"] .quick-access-header:hover {
background: #444;
}
[data-theme="dark"] .quick-access-title {
color: #e0e0e0;
}
[data-theme="dark"] .quick-access-toggle {
color: #aaa;
}
[data-theme="dark"] .quick-access-toggle:hover {
color: #e0e0e0;
}
[data-theme="dark"] .quick-access-item {
background: #1e1e1e;
border-color: #444;
}
[data-theme="dark"] .quick-access-item:hover {
border-color: #4285f4;
}
[data-theme="dark"] .quick-access-item-title {
color: #e0e0e0;
}
[data-theme="dark"] .quick-access-item-content {
color: #aaa;
}
[data-theme="dark"] .quick-access-items::-webkit-scrollbar-thumb {
background: #555;
}
[data-theme="dark"] .quick-access-items::-webkit-scrollbar-thumb:hover {
background: #666;
}
/* T071: Insert Prompt Modal Styles */
.modal-description {
font-size: 14px;
color: #666;
margin: 12px 0 0 0;
}
.insert-prompt-preview {
background: #f9f9f9;
border: 1px solid #e0e0e0;
border-radius: 4px;
padding: 12px;
min-height: calc(8 * 1.5em);
max-height: 300px;
overflow-y: auto;
font-size: 13px;
color: #333;
line-height: 1.5;
white-space: pre-wrap;
word-break: break-word;
}
#insert-prompt-modal .modal-footer,
#prompt-editor-modal .modal-footer {
display: flex;
justify-content: space-between;
align-items: center;
gap: 8px;
}
#insert-prompt-modal .modal-footer button,
#prompt-editor-modal .modal-footer button {
flex: 1;
background: transparent;
border: none;
padding: 0;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
color: #333;
transition: opacity 0.2s;
opacity: 0.7;
}
#insert-prompt-modal .modal-footer button:hover,
#prompt-editor-modal .modal-footer button:hover {
opacity: 1;
}
#insert-prompt-modal .modal-footer .material-symbols-outlined,
#prompt-editor-modal .modal-footer .material-symbols-outlined {
font-size: 28px;
}
[data-theme="dark"] .modal-description {
color: #aaa;
}
[data-theme="dark"] .insert-prompt-preview {
background: #1e1e1e;
border-color: #444;
color: #e0e0e0;
}
[data-theme="dark"] #insert-prompt-modal .modal-footer button,
[data-theme="dark"] #prompt-editor-modal .modal-footer button {
color: #e0e0e0;
}
/* Chat History Styles */
#chat-history {
display: flex;
flex-direction: column;
height: 100%;
padding: 16px;
overflow: hidden;
}
#history-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 16px;
}
#history-header h2 {
font-size: 20px;
font-weight: 600;
color: #333;
margin: 0;
}
.primary-action-btn {
padding: 8px 12px;
border: none;
border-radius: 4px;
background: #1a73e8;
color: white;
cursor: pointer;
transition: background 0.2s;
display: flex;
align-items: center;
gap: 4px;
}
.primary-action-btn:hover {
background: #1557b0;
}
.primary-action-btn .material-symbols-outlined {
font-size: 20px;
}
#history-filters {
display: flex;
flex-direction: column;
gap: 8px;
margin-bottom: 16px;
}
#history-search {
flex: 1;
padding: 8px 12px;
border: 1px solid #e0e0e0;
border-radius: 4px;
font-size: 14px;
}
/* Search info section (result count and helper) */
.search-info {
display: flex;
flex-direction: column;
gap: 6px;
}
.search-result-count {
font-size: 12px;
color: #666;
padding: 4px 8px;
}
.search-helper {
background: #e3f2fd;
border: 1px solid #90caf9;
border-radius: 4px;
padding: 8px 10px;
font-size: 11px;
line-height: 1.4;
display: flex;
justify-content: space-between;
align-items: start;
gap: 8px;
}
.search-helper .helper-text {
flex: 1;
color: #1565c0;
}
.search-helper .helper-text code {
background: #bbdefb;
padding: 2px 4px;
border-radius: 2px;
font-family: 'Consolas', 'Monaco', 'Courier New', monospace;
font-size: 10px;
font-weight: 600;
}
.search-helper .helper-close {
background: none;
border: none;
font-size: 18px;
line-height: 1;
color: #1565c0;
cursor: pointer;
padding: 0;
opacity: 0.7;
transition: opacity 0.2s;
}
.search-helper .helper-close:hover {
opacity: 1;
}
/* Highlight search terms in results */
.search-highlight {
background: #ffeb3b;
padding: 1px 0;
border-radius: 2px;
font-weight: 500;
}
#history-show-search-tips {
padding: 8px 10px;
border: none;
border-radius: 4px;
background: transparent;
cursor: pointer;
transition: background 0.2s, opacity 0.2s;
min-width: 36px;
opacity: 0.7;
}
#history-show-search-tips .material-symbols-outlined {
font-size: 18px;
}
#history-show-search-tips:hover {
background: #f5f5f5;
opacity: 1;
}
#provider-filter-wrapper {
position: relative;
}
#provider-filter-btn {
padding: 8px 12px;
border: none;
border-radius: 4px;
background: transparent;
cursor: pointer;
transition: background 0.2s, opacity 0.2s;
min-width: 44px;
opacity: 0.7;
}
#provider-filter-btn .material-symbols-outlined {
font-size: 18px;
}
#provider-filter-btn:hover {
background: #f5f5f5;
opacity: 1;
}
#provider-filter-btn.active {
background: #e3f2fd;
opacity: 1;
}
.provider-popup {
position: absolute;
top: 100%;
left: 0;
margin-top: 4px;
background: white;
border: 1px solid #e0e0e0;
border-radius: 4px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
z-index: 100;
min-width: 200px;
max-height: calc(100vh - 200px);
overflow-y: auto;
}
.provider-popup-item {
padding: 10px 12px;
cursor: pointer;
font-size: 14px;
color: #333;
transition: background 0.2s;
}
.provider-popup-item:hover {
background: #f5f5f5;
}
.provider-popup-item.selected {
background: #e3f2fd;
color: #1976d2;
font-weight: 500;
}
#history-show-favorites {
padding: 8px 10px;
border: none;
border-radius: 4px;
background: transparent;
cursor: pointer;
transition: background 0.2s, opacity 0.2s;
min-width: 36px;
opacity: 0.7;
}
#history-show-favorites .material-symbols-outlined {
font-size: 18px;
}
#history-show-favorites:hover {
background: #f5f5f5;
opacity: 1;
}
#history-show-favorites.active {
background: #fff9c4;
color: #f57c00;
opacity: 1;
}
#conversation-list {
flex: 1;
overflow-y: auto;
display: flex;
flex-direction: column;
gap: 8px;
}
.conversation-item {
padding: 12px;
border: 1px solid #e0e0e0;
border-radius: 4px;
background: white;
cursor: pointer;
transition: border-color 0.2s, box-shadow 0.2s;
}
.conversation-item:hover {
border-color: #1a73e8;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.conversation-item-header {
display: flex;
justify-content: space-between;
align-items: start;
margin-bottom: 8px;
}
.conversation-item-title {
font-weight: 500;
font-size: 14px;
color: #333;
margin: 0;
flex: 1;
}
.conversation-item-actions {
display: flex;
gap: 4px;
}
.conversation-item-actions button {
padding: 4px 6px;
border: none;
background: transparent;
cursor: pointer;
font-size: 14px;
opacity: 0.6;
transition: opacity 0.2s;
}
.conversation-item-actions button:hover {
opacity: 1;
}
.conversation-item-actions .material-symbols-outlined {
font-size: 18px;
}
.conversation-item-preview {
font-size: 12px;
color: #666;
line-height: 1.4;
margin-bottom: 8px;
}
.conversation-item-meta {
display: flex;
flex-wrap: wrap;
gap: 8px;
align-items: center;
font-size: 11px;
color: #999;
}
.conversation-item-provider {
padding: 2px 6px;
background: #e3f2fd;
color: #1976d2;
border-radius: 3px;
font-weight: 500;
display: inline-flex;
align-items: center;
gap: 4px;
}
.conversation-item-provider .provider-icon-small {
width: 16px;
height: 16px;
}
.conversation-item-date {
padding: 2px 6px;
background: #f5f5f5;
border-radius: 3px;
}
/* Clickable conversation link styling */
a.conversation-item-link {
text-decoration: none;
color: inherit;
transition: all 0.2s ease;
}
a.conversation-item-link:hover {
background: #e3f2fd;
color: #1976d2;
text-decoration: underline;
}
/* Clickable timestamp link in modal view */
a.conversation-link {
color: #1976d2;
text-decoration: none;
transition: all 0.2s ease;
}
a.conversation-link:hover {
color: #1557b0;
text-decoration: underline;
}
.conversation-item-tags {
display: flex;
gap: 4px;
flex-wrap: wrap;
}
.conversation-item-tag {
padding: 2px 6px;
background: #fff3e0;
color: #e65100;
border-radius: 3px;
}
.conversation-list-empty {
text-align: center;
padding: 40px 20px;
color: #999;
}
.conversation-list-empty p {
margin: 8px 0;
font-size: 14px;
}
#view-conversation-modal #view-conversation-content {
background: #f9f9f9;
border: 1px solid #e0e0e0;
border-radius: 4px;
padding: 12px;
flex: 1;
overflow-y: auto;
font-size: 13px;
color: #333;
line-height: 1.5;
white-space: pre-wrap;
word-break: break-word;
margin-bottom: 16px;
}
/* Markdown-rendered content styling */
#view-conversation-modal .markdown-content {
white-space: normal;
}
#view-conversation-modal .markdown-content h1,
#view-conversation-modal .markdown-content h2,
#view-conversation-modal .markdown-content h3,
#view-conversation-modal .markdown-content h4,
#view-conversation-modal .markdown-content h5,
#view-conversation-modal .markdown-content h6 {
font-weight: 600;
margin-top: 16px;
margin-bottom: 8px;
line-height: 1.3;
color: #1a1a1a;
}
#view-conversation-modal .markdown-content h1 { font-size: 18px; }
#view-conversation-modal .markdown-content h2 { font-size: 16px; }
#view-conversation-modal .markdown-content h3 { font-size: 15px; }
#view-conversation-modal .markdown-content h4 { font-size: 14px; }
#view-conversation-modal .markdown-content h5,
#view-conversation-modal .markdown-content h6 { font-size: 13px; }
#view-conversation-modal .markdown-content p {
margin: 8px 0;
}
#view-conversation-modal .markdown-content ul,
#view-conversation-modal .markdown-content ol {
margin: 8px 0;
padding-left: 24px;
}
#view-conversation-modal .markdown-content li {
margin: 4px 0;
}
#view-conversation-modal .markdown-content code {
background: #f0f0f0;
padding: 2px 5px;
border-radius: 3px;
font-family: 'Consolas', 'Monaco', 'Courier New', monospace;
font-size: 12px;
}
#view-conversation-modal .markdown-content pre {
background: #f5f5f5;
border: 1px solid #e0e0e0;
border-radius: 4px;
padding: 10px;
overflow-x: auto;
margin: 12px 0;
}
#view-conversation-modal .markdown-content pre code {
background: none;
padding: 0;
border-radius: 0;
display: block;
line-height: 1.4;
}
#view-conversation-modal .markdown-content blockquote {
border-left: 3px solid #1976d2;
padding-left: 12px;
margin: 12px 0;
color: #666;
font-style: italic;
}
#view-conversation-modal .markdown-content a {
color: #1976d2;
text-decoration: none;
transition: text-decoration 0.2s;
}
#view-conversation-modal .markdown-content a:hover {
text-decoration: underline;
}
#view-conversation-modal .markdown-content strong {
font-weight: 600;
}
#view-conversation-modal .markdown-content em {
font-style: italic;
}
#view-conversation-modal .markdown-content hr {
border: none;
border-top: 1px solid #e0e0e0;
margin: 16px 0;
}
#view-conversation-meta {
display: flex;
flex-direction: column;
gap: 8px;
font-size: 13px;
color: #666;
}
#view-conversation-meta > div {
display: flex;
align-items: center;
gap: 4px;
}
#view-conversation-meta strong {
color: #333;
}
#view-conversation-meta .provider-icon-small {
width: 13px;
height: 13px;
display: inline-block;
}
.conversation-tag {
display: inline-block;
padding: 2px 6px;
background: #fff3e0;
color: #e65100;
border-radius: 3px;
margin-right: 4px;
font-size: 12px;
}
/* Dark theme for Chat History */
[data-theme="dark"] #history-header h2 {
color: #e0e0e0;
}
[data-theme="dark"] #history-search {
background: #2d2d2d;
border-color: #444;
color: #e0e0e0;
}
[data-theme="dark"] .search-result-count {
color: #aaa;
}
[data-theme="dark"] .search-helper {
background: #1a3a5a;
border-color: #2962ff;
}
[data-theme="dark"] .search-helper .helper-text {
color: #90caf9;
}
[data-theme="dark"] .search-helper .helper-text code {
background: #2962ff;
color: #e3f2fd;
}
[data-theme="dark"] .search-helper .helper-close {
color: #90caf9;
}
[data-theme="dark"] .search-highlight {
background: #9e6b00;
color: #fff;
}
[data-theme="dark"] #history-show-search-tips {
background: transparent;
color: #e0e0e0;
}
[data-theme="dark"] #history-show-search-tips:hover {
background: #3a3a3a;
}
[data-theme="dark"] #provider-filter-btn {
background: transparent;
}
[data-theme="dark"] #provider-filter-btn:hover {
background: #3a3a3a;
}
[data-theme="dark"] #provider-filter-btn.active {
background: #1a3a5a;
}
[data-theme="dark"] .provider-popup {
background: #2d2d2d;
border-color: #444;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.5);
}
[data-theme="dark"] .provider-popup-item {
color: #e0e0e0;
}
[data-theme="dark"] .provider-popup-item:hover {
background: #3a3a3a;
}
[data-theme="dark"] .provider-popup-item.selected {
background: #1a3a5a;
color: #64b5f6;
}
[data-theme="dark"] #history-show-favorites {
background: transparent;
color: #e0e0e0;
}
[data-theme="dark"] #history-show-favorites:hover {
background: #3a3a3a;
}
[data-theme="dark"] #history-show-favorites.active {
background: #4a3a00;
color: #fbc02d;
}
[data-theme="dark"] .conversation-item {
background: #2d2d2d;
border-color: #444;
}
[data-theme="dark"] .conversation-item:hover {
border-color: #4285f4;
}
[data-theme="dark"] .conversation-item-title {
color: #e0e0e0;
}
[data-theme="dark"] .conversation-item-preview {
color: #aaa;
}
[data-theme="dark"] .conversation-item-provider {
background: #1a3a5a;
color: #64b5f6;
}
[data-theme="dark"] .conversation-item-date {
background: #3a3a3a;
color: #aaa;
}
[data-theme="dark"] a.conversation-item-link:hover {
background: #1a3a5a;
color: #64b5f6;
}
[data-theme="dark"] a.conversation-link {
color: #64b5f6;
}
[data-theme="dark"] a.conversation-link:hover {
color: #90caf9;
text-decoration: underline;
}
[data-theme="dark"] .conversation-item-tag {
background: #4a3a00;
color: #fbc02d;
}
[data-theme="dark"] .conversation-item-actions button {
color: #e0e0e0;
}
[data-theme="dark"] #view-conversation-modal #view-conversation-content {
background: #1e1e1e;
border-color: #444;
color: #e0e0e0;
}
/* Dark theme for markdown content */
[data-theme="dark"] #view-conversation-modal .markdown-content h1,
[data-theme="dark"] #view-conversation-modal .markdown-content h2,
[data-theme="dark"] #view-conversation-modal .markdown-content h3,
[data-theme="dark"] #view-conversation-modal .markdown-content h4,
[data-theme="dark"] #view-conversation-modal .markdown-content h5,
[data-theme="dark"] #view-conversation-modal .markdown-content h6 {
color: #f0f0f0;
}
[data-theme="dark"] #view-conversation-modal .markdown-content code {
background: #2d2d2d;
color: #e0e0e0;
}
[data-theme="dark"] #view-conversation-modal .markdown-content pre {
background: #2d2d2d;
border-color: #444;
}
[data-theme="dark"] #view-conversation-modal .markdown-content blockquote {
border-left-color: #64b5f6;
color: #aaa;
}
[data-theme="dark"] #view-conversation-modal .markdown-content a {
color: #64b5f6;
}
[data-theme="dark"] #view-conversation-modal .markdown-content hr {
border-top-color: #444;
}
[data-theme="dark"] #view-conversation-meta {
color: #aaa;
}
[data-theme="dark"] #view-conversation-meta strong {
color: #e0e0e0;
}
[data-theme="dark"] .conversation-tag {
background: #4a3a00;
color: #fbc02d;
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
sidebar/sidebar.html | HTML | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>insidebar.ai</title>
<link rel="stylesheet" href="sidebar.css">
<!-- Material Symbols font -->
<link href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined:opsz,wght,FILL,GRAD@20..48,100..700,0..1,-50..200&display=block" rel="stylesheet">
<!-- DNS prefetch and preconnect for faster loading -->
<link rel="dns-prefetch" href="https://chatgpt.com">
<link rel="dns-prefetch" href="https://claude.ai">
<link rel="dns-prefetch" href="https://gemini.google.com">
<link rel="dns-prefetch" href="https://grok.com">
<link rel="dns-prefetch" href="https://chat.deepseek.com">
<link rel="preconnect" href="https://chatgpt.com">
<link rel="preconnect" href="https://claude.ai">
<link rel="preconnect" href="https://gemini.google.com">
<link rel="preconnect" href="https://grok.com">
<link rel="preconnect" href="https://chat.deepseek.com">
</head>
<body data-theme="auto">
<div id="app">
<!-- Provider iframe container -->
<div id="provider-container">
<div id="loading" style="display: none;">Loading AI provider...</div>
<div id="error" style="display: none;"></div>
<!-- Iframes will be dynamically inserted here -->
</div>
<!-- Chat History UI -->
<div id="chat-history" style="display: none;">
<div id="history-header">
<h2 data-i18n="sidebarChatHistory">Chat History</h2>
</div>
<div id="history-filters">
<div class="filter-row-top">
<input type="text" id="history-search" data-i18n-placeholder="placeholderSearchConversations" placeholder="Search conversations..." list="search-history-list" autocomplete="off" />
<datalist id="search-history-list">
<!-- Search history will be dynamically populated here -->
</datalist>
</div>
<div class="search-info">
<div id="search-result-count" class="search-result-count" style="display: none;"></div>
<div id="search-helper" class="search-helper" style="display: none;">
<span class="helper-text">
<strong>Tips:</strong>
<code>title:keyword</code> search in titles,
<code>tag:keyword</code> search in tags,
<code>provider:name</code> filter by provider,
<code>"exact phrase"</code> exact match,
<code>-keyword</code> exclude,
<code>OR</code> for either term
</span>
<button id="close-search-helper" class="helper-close" title="Hide tips">×</button>
</div>
</div>
<div class="filter-row-bottom">
<div id="provider-filter-wrapper">
<button id="provider-filter-btn" data-i18n-title="tooltipFilterProvider" title="Filter by provider"><span class="material-symbols-outlined">filter_alt</span></button>
<div id="provider-popup" class="provider-popup" style="display: none;">
<div class="provider-popup-item" data-value="" data-i18n="labelAllProviders">All Providers</div>
<!-- Providers will be dynamically populated here -->
</div>
</div>
<button id="history-show-search-tips" data-i18n-title="tooltipShowSearchTips" title="Show search tips"><span class="material-symbols-outlined">help</span></button>
<button id="history-show-favorites" data-i18n-title="tooltipShowFavorites" title="Show favorites only"><span class="material-symbols-outlined">star</span></button>
</div>
</div>
<div id="conversation-list">
<!-- Conversation items will be dynamically rendered here -->
</div>
<!-- Save Conversation Modal -->
<div id="save-conversation-modal" class="modal" style="display: none;">
<div class="modal-content">
<div class="modal-header">
<h3 data-i18n="btnSave">Save Conversation</h3>
<button class="close-btn" data-i18n-title="btnClose" id="close-save-conversation"><span class="material-symbols-outlined">close</span></button>
</div>
<div class="modal-body">
<input type="text" id="conversation-title-input" placeholder="Conversation title (auto-generated from content)" />
<textarea id="conversation-content-input" placeholder="Paste conversation content here..."></textarea>
<input type="text" id="conversation-provider-input" placeholder="Provider (e.g., ChatGPT, Claude)" readonly />
<input type="text" id="conversation-tags-input" placeholder="Tags (comma-separated)" />
<textarea id="conversation-notes-input" placeholder="Notes (optional)"></textarea>
<label>
<input type="checkbox" id="conversation-favorite-input" />
Mark as favorite
</label>
</div>
<div class="modal-footer">
<button id="cancel-save-conversation-btn" class="secondary" data-i18n-title="btnCancel" title="Cancel"><span class="material-symbols-outlined">close</span></button>
<button id="save-conversation-submit-btn" class="primary" data-i18n-title="btnSave" title="Save"><span class="material-symbols-outlined">check</span></button>
</div>
</div>
</div>
<!-- View Conversation Modal -->
<div id="view-conversation-modal" class="modal" style="display: none;">
<div class="modal-content">
<div class="modal-header">
<h3 id="view-conversation-title">Conversation</h3>
<div class="modal-header-actions">
<button id="copy-conversation-btn" class="header-action-btn" data-i18n-title="tooltipCopyContent" title="Copy content"><span class="material-symbols-outlined" style="font-size: 20px;">content_copy</span></button>
<button id="delete-conversation-from-view-btn" class="header-action-btn" data-i18n-title="btnDelete" title="Delete"><span class="material-symbols-outlined" style="font-size: 20px;">delete</span></button>
<button class="close-btn" data-i18n-title="btnClose" id="close-view-conversation"><span class="material-symbols-outlined">close</span></button>
</div>
</div>
<div class="modal-body">
<div id="view-conversation-content"></div>
<div id="view-conversation-meta">
<div id="view-conversation-provider"></div>
<div id="view-conversation-timestamp"></div>
<div id="view-conversation-tags"></div>
<div id="view-conversation-notes"></div>
</div>
</div>
</div>
</div>
</div>
<!-- T045: Prompt Genie UI -->
<div id="prompt-library" style="display: none;">
<!-- Workspace for editing received text - Always visible at top -->
<div id="workspace-header">
<h2 data-i18n="sidebarEditingPrompt">Editing Prompt</h2>
</div>
<div id="prompt-workspace" style="display: block;">
<textarea id="prompt-workspace-text" placeholder="Selected text will appear here..."></textarea>
<div class="workspace-actions">
<div class="workspace-actions-left">
<button id="workspace-save-btn" data-i18n-title="tooltipSaveNew" title="Save as new prompt"><span class="material-symbols-outlined">add_2</span></button>
<button id="workspace-copy-btn" data-i18n-title="tooltipCopyClipboard" title="Copy to clipboard"><span class="material-symbols-outlined">content_copy</span></button>
<button id="workspace-clear-btn" data-i18n-title="tooltipClear" title="Clear"><span class="material-symbols-outlined">close</span></button>
</div>
<div class="workspace-actions-right">
<div id="workspace-provider-wrapper">
<button id="workspace-provider-btn" data-i18n-title="tooltipSelectProvider" title="Select provider">
<img class="provider-icon-small" src="" alt="">
</button>
<div id="workspace-provider-popup" class="workspace-provider-popup" style="display: none;">
<!-- Providers will be dynamically populated here -->
</div>
</div>
<button id="workspace-send-btn" data-i18n-title="tooltipSendProvider" title="Send to selected provider"><span class="material-symbols-outlined">keyboard_return</span></button>
</div>
</div>
</div>
<div id="prompt-library-header">
<h2 data-i18n="sidebarPromptGenie">Prompt Genie</h2>
</div>
<div id="prompt-filters">
<div class="filter-row-top">
<input type="text" id="prompt-search" data-i18n-placeholder="placeholderSearchPrompts" placeholder="Search prompts..." />
</div>
<div class="filter-row-bottom">
<div id="category-filter-wrapper">
<button id="category-filter-btn" data-i18n-title="tooltipFilterCategory" title="Filter by category"><span class="material-symbols-outlined">category</span></button>
<div id="category-popup" class="category-popup" style="display: none;">
<div class="category-popup-item" data-value="" data-i18n="labelAllCategories">All Categories</div>
<!-- Categories will be dynamically populated here -->
</div>
</div>
<div id="sort-buttons">
<button class="sort-btn active" data-sort="recent" data-i18n-title="tooltipRecentlyUsed" title="Recently Used"><span class="material-symbols-outlined">schedule</span></button>
<button class="sort-btn" data-sort="most-used" data-i18n-title="tooltipMostUsed" title="Most Used"><span class="material-symbols-outlined">numbers</span></button>
<button class="sort-btn sort-toggle" data-sort="alphabetical" data-alt-sort="reverse-alphabetical" data-i18n-title="tooltipAlphabetical" title="A-Z"><span class="material-symbols-outlined">sort_by_alpha</span></button>
<button class="sort-btn sort-toggle" data-sort="newest" data-alt-sort="oldest" data-i18n-title="tooltipNewestFirst" title="Newest First"><span class="material-symbols-outlined">new_releases</span></button>
</div>
<button id="show-favorites" data-i18n-title="tooltipShowFavorites" title="Show favorites only"><span class="material-symbols-outlined">star</span></button>
</div>
</div>
<!-- T071: Quick Access Panel -->
<div id="quick-access-panel" style="display: none;">
<div class="quick-access-section">
<div class="quick-access-header">
<span class="quick-access-title" data-i18n="labelRecentlyUsed">Recently Used</span>
<button class="quick-access-toggle" id="toggle-recent" data-i18n-title="tooltipToggleSection" title="Toggle section"><span class="material-symbols-outlined">remove</span></button>
</div>
<div class="quick-access-items" id="recent-prompts-list">
<!-- Recently used prompts will be rendered here -->
</div>
</div>
<div class="quick-access-section">
<div class="quick-access-header">
<span class="quick-access-title" data-i18n="labelTopFavorites">Top Favorites</span>
<button class="quick-access-toggle" id="toggle-favorites" data-i18n-title="tooltipToggleSection" title="Toggle section"><span class="material-symbols-outlined">remove</span></button>
</div>
<div class="quick-access-items" id="top-favorites-list">
<!-- Top favorites will be rendered here -->
</div>
</div>
</div>
<div id="prompt-list">
<!-- Prompt items will be dynamically rendered here -->
</div>
<!-- T046: Prompt Editor Modal -->
<div id="prompt-editor-modal" class="modal" style="display: none;">
<div class="modal-content">
<div class="modal-header">
<h3 id="editor-title">New Prompt</h3>
<button class="close-btn" data-i18n-title="btnClose" id="close-editor"><span class="material-symbols-outlined">close</span></button>
</div>
<div class="modal-body">
<input type="text" id="prompt-title-input" placeholder="Prompt title" />
<textarea id="prompt-content-input" placeholder="Enter your prompt here..."></textarea>
<input type="text" id="prompt-category-input" placeholder="Category (e.g., Writing, Coding)" />
<input type="text" id="prompt-tags-input" placeholder="Tags (comma-separated)" />
<label>
<input type="checkbox" id="prompt-favorite-input" />
Mark as favorite
</label>
</div>
<div class="modal-footer">
<button id="cancel-edit-btn" class="secondary" data-i18n-title="btnCancel" title="Cancel"><span class="material-symbols-outlined">close</span></button>
<button id="save-prompt-btn" class="primary" data-i18n-title="btnSave" title="Save"><span class="material-symbols-outlined">check</span></button>
</div>
</div>
</div>
<!-- T071: Insert Prompt Modal -->
<div id="insert-prompt-modal" class="modal" style="display: none;">
<div class="modal-content">
<div class="modal-header">
<h3>Insert text to Prompt Workspace Area</h3>
<button class="close-btn" data-i18n-title="btnClose" id="close-insert-modal"><span class="material-symbols-outlined">close</span></button>
</div>
<div class="modal-body">
<div class="insert-prompt-preview" id="insert-prompt-preview">
<!-- Prompt preview will be shown here -->
</div>
<p class="modal-description">Choose how to insert this prompt: at the beginning, at the end, or replace the existing text entirely?</p>
</div>
<div class="modal-footer">
<button id="insert-beginning-btn" class="primary" data-i18n-title="tooltipInsertBeginning" title="Insert at beginning"><span class="material-symbols-outlined">vertical_align_top</span></button>
<button id="insert-end-btn" class="primary" data-i18n-title="tooltipInsertEnd" title="Insert at end"><span class="material-symbols-outlined">vertical_align_bottom</span></button>
<button id="replace-workspace-btn" class="secondary" data-i18n-title="tooltipReplaceAll" title="Replace all"><span class="material-symbols-outlined">sync</span></button>
</div>
</div>
</div>
</div>
<!-- Provider tabs (bottom navigation) -->
<nav id="provider-tabs">
<!-- Provider tabs will be dynamically generated here -->
</nav>
</div>
<script src="marked.min.js"></script>
<script type="module" src="sidebar.js"></script>
</body>
</html>
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
sidebar/sidebar.js | JavaScript | import { PROVIDERS, getProviderById, getProviderByIdWithSettings, getEnabledProviders } from '../modules/providers.js';
import { applyTheme } from '../modules/theme-manager.js';
import { t, translatePage, initializeLanguage } from '../modules/i18n.js';
import {
getAllPrompts,
savePrompt,
updatePrompt,
deletePrompt,
searchPrompts,
getPromptsByCategory,
getFavoritePrompts,
toggleFavorite,
recordPromptUsage,
getAllCategories,
getRecentlyUsedPrompts,
getTopFavorites
} from '../modules/prompt-manager.js';
import {
saveConversation,
getConversation,
getAllConversations,
updateConversation,
deleteConversation,
searchConversations,
getConversationsByProvider,
getFavoriteConversations,
toggleConversationFavorite,
getAllConversationTags,
generateAutoTitle,
findConversationByConversationId
} from '../modules/history-manager.js';
let currentProvider = null;
const loadedIframes = new Map(); // providerId -> iframe element
const loadedIframesState = new Map(); // providerId -> 'loading' | 'ready'
let currentView = 'providers'; // 'providers', 'prompt-library', or 'chat-history'
let currentEditingPromptId = null;
let currentInsertPromptId = null; // T071: For insert prompt modal
let isShowingFavorites = false;
let currentSortOrder = 'recent'; // T071: Current sort order
let isSwitchingProvider = false;
let pendingProviderId = null;
const EDGE_SHORTCUT_STORAGE_KEY = 'edgeShortcutReminderDismissed';
// Chat History state
let isShowingHistoryFavorites = false;
let currentEditingConversationId = null;
let currentViewingConversationId = null;
// Helper function to detect if dark theme is currently active
function isDarkTheme() {
const theme = document.body.getAttribute('data-theme');
if (theme === 'dark') return true;
if (theme === 'light') return false;
// Auto mode: check system preference
return window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches;
}
// T013: Initialize sidebar
async function init() {
await applyTheme();
await initializeLanguage(); // Initialize i18n
translatePage(); // Translate all static text
await renderProviderTabs();
await loadDefaultProvider();
setupMessageListener();
setupPromptLibrary(); // T045: Initialize prompt library
setupChatHistory(); // Initialize chat history
// Re-render tabs when theme changes
setupThemeChangeListener();
// Show shortcut reminder for Edge users once
await checkEdgeShortcutReminder();
// T070: Notify background when sidebar closes
window.addEventListener('beforeunload', () => {
try {
chrome.runtime.sendMessage({ action: 'sidePanelClosed', payload: {} });
} catch (error) {
// Ignore errors during unload
}
});
}
// Listen for theme changes and re-render tabs with appropriate icons
function setupThemeChangeListener() {
// Watch for data-theme attribute changes on body
const observer = new MutationObserver((mutations) => {
mutations.forEach((mutation) => {
if (mutation.attributeName === 'data-theme') {
renderProviderTabs();
updateWorkspaceProviderSelector();
}
});
});
observer.observe(document.body, {
attributes: true,
attributeFilter: ['data-theme']
});
// Also listen for system theme changes when in auto mode
if (window.matchMedia) {
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', () => {
const theme = document.body.getAttribute('data-theme');
if (theme === 'auto') {
renderProviderTabs();
updateWorkspaceProviderSelector();
}
});
}
}
// T014: Render provider tabs with icons
async function renderProviderTabs() {
const enabledProviders = await getEnabledProviders();
const tabsContainer = document.getElementById('provider-tabs');
const useDarkIcons = isDarkTheme();
tabsContainer.innerHTML = '';
// Add provider tabs
enabledProviders.forEach(provider => {
const button = document.createElement('button');
button.dataset.providerId = provider.id;
button.title = provider.name; // Tooltip shows name on hover
// Create icon element - use dark icon if available and theme is dark
const icon = document.createElement('img');
icon.src = useDarkIcons && provider.iconDark ? provider.iconDark : provider.icon;
icon.alt = provider.name;
icon.className = 'provider-icon';
button.appendChild(icon);
button.addEventListener('click', () => switchProvider(provider.id));
tabsContainer.appendChild(button);
});
// Add separator between providers and UI tabs
const separator = document.createElement('div');
separator.className = 'tab-separator';
tabsContainer.appendChild(separator);
// Add chat history tab
const historyTab = document.createElement('button');
historyTab.id = 'chat-history-tab';
historyTab.dataset.view = 'chat-history';
historyTab.title = t('sidebarChatHistory');
const historyIcon = document.createElement('img');
historyIcon.src = useDarkIcons ? '/icons/ui/dark/chat-history.png' : '/icons/ui/chat-history.png';
historyIcon.alt = 'History';
historyIcon.className = 'provider-icon';
historyTab.appendChild(historyIcon);
historyTab.addEventListener('click', () => switchToView('chat-history'));
tabsContainer.appendChild(historyTab);
// Add prompt library tab
const promptLibraryTab = document.createElement('button');
promptLibraryTab.id = 'prompt-library-tab';
promptLibraryTab.dataset.view = 'prompt-library';
promptLibraryTab.title = t('sidebarPromptGenie');
const promptIcon = document.createElement('img');
promptIcon.src = useDarkIcons ? '/icons/ui/dark/prompts.png' : '/icons/ui/prompts.png';
promptIcon.alt = 'Prompts';
promptIcon.className = 'provider-icon';
promptLibraryTab.appendChild(promptIcon);
promptLibraryTab.addEventListener('click', () => switchToView('prompt-library'));
tabsContainer.appendChild(promptLibraryTab);
// Add settings tab at the very end (right side)
const settingsTab = document.createElement('button');
settingsTab.id = 'settings-tab';
settingsTab.title = t('sectionAbout');
const settingsIcon = document.createElement('img');
settingsIcon.src = useDarkIcons ? '/icons/ui/dark/settings.png' : '/icons/ui/settings.png';
settingsIcon.alt = 'Settings';
settingsIcon.className = 'provider-icon';
settingsTab.appendChild(settingsIcon);
settingsTab.addEventListener('click', () => {
chrome.runtime.openOptionsPage();
});
tabsContainer.appendChild(settingsTab);
}
// T015: Switch to a provider
async function switchProvider(providerId) {
if (isSwitchingProvider) {
pendingProviderId = providerId;
return;
}
isSwitchingProvider = true;
const provider = await getProviderByIdWithSettings(providerId);
if (!provider) {
showError(`Provider ${providerId} not found`);
isSwitchingProvider = false;
// Process any pending switch request
if (pendingProviderId && pendingProviderId !== providerId) {
const next = pendingProviderId;
pendingProviderId = null;
switchProvider(next);
}
return;
}
// Hide non-provider views if currently active
currentView = 'providers';
document.getElementById('prompt-library').style.display = 'none';
document.getElementById('chat-history').style.display = 'none';
// Show provider container
document.getElementById('provider-container').style.display = 'flex';
// Update active tab - deactivate all tabs first
document.querySelectorAll('#provider-tabs button').forEach(btn => {
btn.classList.remove('active');
});
// Deactivate prompt library tab
const promptLibraryTab = document.getElementById('prompt-library-tab');
if (promptLibraryTab) promptLibraryTab.classList.remove('active');
// Activate the selected provider tab
const activeProviderTab = document.querySelector(`#provider-tabs button[data-provider-id="${providerId}"]`);
if (activeProviderTab) activeProviderTab.classList.add('active');
// Hide current provider iframe
if (currentProvider && loadedIframes.has(currentProvider)) {
loadedIframes.get(currentProvider).style.display = 'none';
}
// Load or show provider iframe
if (!loadedIframes.has(providerId)) {
const iframe = createProviderIframe(provider);
loadedIframes.set(providerId, iframe);
} else {
loadedIframes.get(providerId).style.display = 'block';
}
currentProvider = providerId;
// Save last selected provider
await chrome.storage.sync.set({ lastSelectedProvider: providerId });
hideError();
isSwitchingProvider = false;
if (pendingProviderId && pendingProviderId !== providerId) {
const next = pendingProviderId;
pendingProviderId = null;
switchProvider(next);
} else {
pendingProviderId = null;
}
}
// T016: Create iframe for provider
function createProviderIframe(provider) {
const container = document.getElementById('provider-container');
const iframe = document.createElement('iframe');
iframe.src = provider.url;
// Sandbox must allow same-origin + scripts so provider UIs can function; popups are
// permitted to support OAuth flows within embedded sites. See README "Permissions"
// for the full security rationale.
iframe.sandbox = 'allow-same-origin allow-scripts allow-forms allow-popups allow-popups-to-escape-sandbox';
iframe.allow = 'clipboard-read; clipboard-write';
iframe.loading = 'eager'; // Hint to browser to load immediately
// Set initial state as loading
loadedIframesState.set(provider.id, 'loading');
iframe.addEventListener('load', () => {
hideLoading();
// Mark iframe as ready
loadedIframesState.set(provider.id, 'ready');
});
iframe.addEventListener('error', () => {
showError(`Failed to load ${provider.name}. Please try again or check your internet connection.`);
// Mark as ready even on error to prevent infinite waiting
loadedIframesState.set(provider.id, 'ready');
});
container.appendChild(iframe);
return iframe;
}
// T017: Load default or last selected provider
async function loadDefaultProvider() {
const settings = await chrome.storage.sync.get({
lastSelectedProvider: 'chatgpt',
defaultProvider: 'chatgpt',
rememberLastProvider: true
});
// If rememberLastProvider is enabled, use last selected; otherwise always use default
const providerId = settings.rememberLastProvider
? (settings.lastSelectedProvider || settings.defaultProvider)
: settings.defaultProvider;
await switchProvider(providerId);
}
// T018: Setup message listener
function setupMessageListener() {
chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
// Handle async operations properly
(async () => {
try {
if (message.action === 'switchProvider') {
await switchProvider(message.payload.providerId);
// If there's selected text, inject it into the provider iframe
if (message.payload.selectedText) {
await injectTextIntoProvider(message.payload.providerId, message.payload.selectedText);
}
sendResponse({ success: true });
} else if (message.action === 'openPromptLibrary') {
// T048: Switch to Prompt Genie tab
switchToView('prompt-library');
// If there's selected text, show it in the workspace
if (message.payload?.selectedText) {
showWorkspaceWithText(message.payload.selectedText);
} else {
// T069: Check if auto-paste is enabled
const settings = await chrome.storage.sync.get({ autoPasteClipboard: false });
if (settings.autoPasteClipboard) {
try {
const clipboardText = await navigator.clipboard.readText();
if (clipboardText && clipboardText.trim()) {
showWorkspaceWithText(clipboardText);
}
} catch (error) {
console.warn('Could not read clipboard:', error);
// Silently fail - user may not have granted clipboard permission
}
}
}
sendResponse({ success: true });
} else if (message.action === 'closeSidePanel') {
// T070: Close side panel when requested
window.close();
sendResponse({ success: true });
} else if (message.action === 'saveExtractedConversation') {
// Handle extracted conversation from ChatGPT page
await handleExtractedConversation(message.payload);
sendResponse({ success: true });
} else if (message.action === 'checkDuplicateConversation') {
// Handle duplicate conversation check
const { conversationId } = message.payload;
const existingConversation = await findConversationByConversationId(conversationId);
if (existingConversation) {
sendResponse({
isDuplicate: true,
existingConversation: {
id: existingConversation.id,
title: existingConversation.title,
timestamp: existingConversation.timestamp
}
});
} else {
sendResponse({ isDuplicate: false });
}
} else if (message.action === 'switchToChatHistory') {
// Switch to chat history view after save
switchToView('chat-history');
sendResponse({ success: true });
} else if (message.action === 'refreshChatHistory') {
// Refresh chat history view if currently displayed
if (currentView === 'chat-history') {
await renderConversationList();
}
sendResponse({ success: true });
} else if (message.action === 'checkFocus') {
// Check if sidebar has focus
sendResponse({ hasFocus: document.hasFocus() });
} else if (message.action === 'takeFocus') {
// Focus the sidebar - try workspace textarea first, then current iframe
let focusSuccess = false;
const workspace = document.getElementById('prompt-workspace-text');
if (workspace && currentView === 'prompt-library') {
workspace.focus();
focusSuccess = true;
} else if (currentProvider && loadedIframes.has(currentProvider)) {
// Focus the current provider's iframe
const iframe = loadedIframes.get(currentProvider);
if (iframe) {
iframe.focus();
focusSuccess = true;
}
}
sendResponse({ success: focusSuccess });
}
} catch (error) {
console.error('Error handling message:', error);
sendResponse({ success: false, error: error.message });
}
})();
return true; // Keep channel open for async response
});
// T026: Listen for settings changes to re-render tabs
chrome.storage.onChanged.addListener(async (changes, namespace) => {
if (changes.enabledProviders) {
renderProviderTabs();
// If current provider was disabled, switch to first enabled provider
const newEnabledProviders = changes.enabledProviders.newValue;
if (currentProvider && !newEnabledProviders.includes(currentProvider)) {
switchProvider(newEnabledProviders[0]);
}
}
// Listen for language changes and re-translate
if (changes.language) {
await initializeLanguage(changes.language.newValue);
translatePage();
// Re-render provider tabs to update tooltips
await renderProviderTabs();
}
});
}
// Wait for iframe to be fully loaded and ready
async function waitForIframeReady(providerId) {
const iframe = loadedIframes.get(providerId);
if (!iframe) {
throw new Error(`Iframe for provider ${providerId} not found`);
}
const state = loadedIframesState.get(providerId);
// If already ready, return immediately
if (state === 'ready') {
return;
}
// If loading, wait for load event
return new Promise((resolve) => {
const checkReady = () => {
if (loadedIframesState.get(providerId) === 'ready') {
resolve();
} else {
// Check again after a short delay
setTimeout(checkReady, 100);
}
};
checkReady();
});
}
// Inject selected text into provider iframe
async function injectTextIntoProvider(providerId, text) {
if (!text || !providerId) {
return;
}
try {
// Wait for iframe to be ready (event-driven, no fixed delay)
await waitForIframeReady(providerId);
const iframe = loadedIframes.get(providerId);
if (!iframe || !iframe.contentWindow) {
console.warn('Provider iframe not found or not ready:', providerId);
return;
}
// Send message to content script inside the iframe
iframe.contentWindow.postMessage(
{
type: 'INJECT_TEXT',
text: text
},
'*' // We're posting to same-origin AI provider domains
);
} catch (error) {
console.error('Error sending text injection message:', error);
}
}
// T019: Show/hide error message
function showError(message) {
const errorEl = document.getElementById('error');
errorEl.textContent = message;
errorEl.style.display = 'flex';
hideLoading();
}
function hideError() {
document.getElementById('error').style.display = 'none';
}
// T020: Show/hide loading indicator
function showLoading(message = 'Loading AI provider...') {
const loadingEl = document.getElementById('loading');
loadingEl.textContent = message;
loadingEl.style.display = 'flex';
}
function hideLoading() {
document.getElementById('loading').style.display = 'none';
}
// T045-T049: Prompt Library Implementation
function setupPromptLibrary() {
// Note: Prompt library tab is now created in renderProviderTabs()
// No need to add event listener here as it's done during creation
// Search functionality
const searchInput = document.getElementById('prompt-search');
let searchTimeout;
searchInput.addEventListener('input', (e) => {
clearTimeout(searchTimeout);
searchTimeout = setTimeout(() => {
if (e.target.value.trim()) {
filterPrompts('search', e.target.value);
} else {
renderPromptList();
}
}, 300);
});
// Category filter button
const categoryBtn = document.getElementById('category-filter-btn');
const categoryPopup = document.getElementById('category-popup');
categoryBtn.addEventListener('click', (e) => {
e.stopPropagation();
const isVisible = categoryPopup.style.display === 'block';
categoryPopup.style.display = isVisible ? 'none' : 'block';
categoryBtn.classList.toggle('active', !isVisible);
});
// Close popup when clicking outside
document.addEventListener('click', (e) => {
if (!categoryBtn.contains(e.target) && !categoryPopup.contains(e.target)) {
categoryPopup.style.display = 'none';
categoryBtn.classList.remove('active');
}
});
// Handle category selection from popup
categoryPopup.addEventListener('click', (e) => {
if (e.target.classList.contains('category-popup-item')) {
const value = e.target.dataset.value;
// Update selected state
categoryPopup.querySelectorAll('.category-popup-item').forEach(item => {
item.classList.remove('selected');
});
e.target.classList.add('selected');
// Filter prompts
if (value) {
filterPrompts('category', value);
} else {
renderPromptList();
}
// Close popup
categoryPopup.style.display = 'none';
categoryBtn.classList.remove('active');
}
});
// T071: Sort buttons with toggle groups
document.querySelectorAll('.sort-btn').forEach(btn => {
btn.addEventListener('click', (e) => {
// Check if this is a toggle button
if (btn.classList.contains('sort-toggle')) {
const currentSort = btn.dataset.sort;
const altSort = btn.dataset.altSort;
// Define icon and title mappings (Material Symbols)
const sortConfig = {
'alphabetical': { icon: 'sort_by_alpha', title: 'A-Z' },
'reverse-alphabetical': { icon: 'sort_by_alpha', title: 'Z-A' },
'newest': { icon: 'new_releases', title: 'Newest First' },
'oldest': { icon: 'history', title: 'Oldest First' }
};
// If button is already active, toggle it
if (btn.classList.contains('active')) {
// Swap the sort orders
btn.dataset.sort = altSort;
btn.dataset.altSort = currentSort;
// Update icon and title
const iconSpan = btn.querySelector('.material-symbols-outlined');
if (iconSpan) {
iconSpan.textContent = sortConfig[altSort].icon;
}
btn.title = sortConfig[altSort].title;
// Update current sort order
currentSortOrder = altSort;
} else {
// Activate this button
document.querySelectorAll('.sort-btn').forEach(b => b.classList.remove('active'));
btn.classList.add('active');
currentSortOrder = currentSort;
}
} else {
// For non-toggle buttons (recent, most-used), behave normally
const sortOrder = btn.dataset.sort;
document.querySelectorAll('.sort-btn').forEach(b => b.classList.remove('active'));
btn.classList.add('active');
currentSortOrder = sortOrder;
}
renderPromptList();
});
});
// Favorites filter
const favoritesBtn = document.getElementById('show-favorites');
if (favoritesBtn) {
favoritesBtn.addEventListener('click', (e) => {
e.stopPropagation();
isShowingFavorites = !isShowingFavorites;
// Toggle icon between filled and unfilled star (Material Symbols)
const iconSpan = favoritesBtn.querySelector('.material-symbols-outlined');
if (iconSpan) {
if (isShowingFavorites) {
iconSpan.classList.add('filled');
} else {
iconSpan.classList.remove('filled');
}
}
favoritesBtn.title = isShowingFavorites ? 'Show all prompts' : 'Show favorites only';
favoritesBtn.classList.toggle('active', isShowingFavorites);
if (isShowingFavorites) {
filterPrompts('favorites');
} else {
renderPromptList();
}
});
}
// Modal controls
document.getElementById('close-editor').addEventListener('click', closePromptEditor);
document.getElementById('cancel-edit-btn').addEventListener('click', closePromptEditor);
document.getElementById('save-prompt-btn').addEventListener('click', savePromptFromEditor);
// Close modal on outside click
document.getElementById('prompt-editor-modal').addEventListener('click', (e) => {
if (e.target.id === 'prompt-editor-modal') {
closePromptEditor();
}
});
// T071: Insert Prompt Modal listeners
document.getElementById('close-insert-modal').addEventListener('click', closeInsertPromptModal);
document.getElementById('insert-beginning-btn').addEventListener('click', () => insertPromptToWorkspace('beginning'));
document.getElementById('insert-end-btn').addEventListener('click', () => insertPromptToWorkspace('end'));
document.getElementById('replace-workspace-btn').addEventListener('click', () => insertPromptToWorkspace('replace'));
// Close insert modal on outside click
document.getElementById('insert-prompt-modal').addEventListener('click', (e) => {
if (e.target.id === 'insert-prompt-modal') {
closeInsertPromptModal();
}
});
// Workspace button listeners
document.getElementById('workspace-send-btn').addEventListener('click', sendWorkspaceToProvider);
document.getElementById('workspace-copy-btn').addEventListener('click', copyWorkspaceText);
document.getElementById('workspace-save-btn').addEventListener('click', saveWorkspaceAsPrompt);
document.getElementById('workspace-clear-btn').addEventListener('click', clearWorkspace);
// Workspace provider selector
const workspaceProviderBtn = document.getElementById('workspace-provider-btn');
const workspaceProviderPopup = document.getElementById('workspace-provider-popup');
workspaceProviderBtn.addEventListener('click', (e) => {
e.stopPropagation();
const isVisible = workspaceProviderPopup.style.display === 'block';
workspaceProviderPopup.style.display = isVisible ? 'none' : 'block';
workspaceProviderBtn.classList.toggle('active', !isVisible);
});
// Close popup when clicking outside
document.addEventListener('click', (e) => {
if (!workspaceProviderBtn.contains(e.target) && !workspaceProviderPopup.contains(e.target)) {
workspaceProviderPopup.style.display = 'none';
workspaceProviderBtn.classList.remove('active');
}
});
// Handle provider selection from popup - delegate to dynamically added items
workspaceProviderPopup.addEventListener('click', async (e) => {
const item = e.target.closest('.workspace-provider-popup-item');
if (item) {
const providerId = item.dataset.providerId;
// Update selected provider
selectedWorkspaceProvider = providerId;
// Update selected state in popup
workspaceProviderPopup.querySelectorAll('.workspace-provider-popup-item').forEach(popupItem => {
popupItem.classList.remove('selected');
});
item.classList.add('selected');
// Update button icon
const icon = workspaceProviderBtn.querySelector('.provider-icon-small');
const selectedIcon = item.querySelector('.provider-icon-small');
if (icon && selectedIcon) {
icon.src = selectedIcon.src;
icon.alt = selectedIcon.alt;
}
// Close popup
workspaceProviderPopup.style.display = 'none';
workspaceProviderBtn.classList.remove('active');
}
});
// T071: Quick Access Panel toggle listeners
document.getElementById('toggle-recent').addEventListener('click', (e) => {
e.stopPropagation();
toggleQuickAccessSection('recent');
});
document.getElementById('toggle-favorites').addEventListener('click', (e) => {
e.stopPropagation();
toggleQuickAccessSection('favorites');
});
}
function switchToView(view) {
currentView = view;
// Hide all views first
document.getElementById('provider-container').style.display = 'none';
document.getElementById('prompt-library').style.display = 'none';
document.getElementById('chat-history').style.display = 'none';
// Deactivate all tabs
document.querySelectorAll('#provider-tabs button').forEach(btn => {
btn.classList.remove('active');
});
if (view === 'chat-history') {
document.getElementById('chat-history').style.display = 'flex';
document.getElementById('chat-history-tab').classList.add('active');
renderConversationList();
updateProviderFilter();
} else if (view === 'prompt-library') {
document.getElementById('prompt-library').style.display = 'flex';
document.getElementById('prompt-library-tab').classList.add('active');
renderPromptList();
renderQuickAccessPanel(); // T071: Render quick access panel
updateCategoryFilter();
updateWorkspaceProviderSelector(); // Initialize provider selector with icons
} else {
// Switch back to providers view
document.getElementById('provider-container').style.display = 'flex';
if (currentProvider) {
const providerTab = document.querySelector(`#provider-tabs button[data-provider-id="${currentProvider}"]`);
if (providerTab) providerTab.classList.add('active');
}
}
}
async function renderPromptList(prompts = null) {
const listContainer = document.getElementById('prompt-list');
if (!prompts) {
prompts = await getAllPrompts();
}
if (prompts.length === 0) {
listContainer.innerHTML = `
<div class="prompt-list-empty">
<p><span class="material-symbols-outlined" style="font-size: 48px; opacity: 0.5;">description</span></p>
<p>No prompts yet</p>
<p>Click "+ New" to create your first prompt</p>
</div>
`;
return;
}
// T071: Sort based on currentSortOrder
prompts.sort((a, b) => {
switch (currentSortOrder) {
case 'most-used':
return (b.useCount || 0) - (a.useCount || 0);
case 'alphabetical':
return a.title.localeCompare(b.title);
case 'reverse-alphabetical':
return b.title.localeCompare(a.title);
case 'newest':
return b.createdAt - a.createdAt;
case 'oldest':
return a.createdAt - b.createdAt;
case 'recent':
default:
// Recently used first, then by created date
if (a.lastUsed && b.lastUsed) return b.lastUsed - a.lastUsed;
if (a.lastUsed) return -1;
if (b.lastUsed) return 1;
return b.createdAt - a.createdAt;
}
});
listContainer.innerHTML = prompts.map(prompt => `
<div class="prompt-item" data-prompt-id="${prompt.id}">
<div class="prompt-item-header">
<h4 class="prompt-item-title">${escapeHtml(prompt.title)}</h4>
<div class="prompt-item-actions">
<button class="favorite-btn" data-id="${prompt.id}" title="Toggle favorite">
<span class="material-symbols-outlined ${prompt.isFavorite ? 'filled' : ''}">star</span>
</button>
<button class="insert-btn" data-id="${prompt.id}" title="Insert to workspace"><span class="material-symbols-outlined">input_circle</span></button>
<button class="copy-btn" data-id="${prompt.id}" title="Copy to clipboard"><span class="material-symbols-outlined">content_copy</span></button>
<button class="edit-btn" data-id="${prompt.id}" title="Edit"><span class="material-symbols-outlined">edit</span></button>
<button class="delete-btn" data-id="${prompt.id}" title="Delete"><span class="material-symbols-outlined">delete</span></button>
</div>
</div>
<div class="prompt-item-content">${escapeHtml(prompt.content)}</div>
<div class="prompt-item-meta">
<span class="prompt-item-category">${escapeHtml(prompt.category)}</span>
${prompt.tags.length > 0 ? `
<div class="prompt-item-tags">
${prompt.tags.map(tag => `<span class="prompt-item-tag">${escapeHtml(tag)}</span>`).join('')}
</div>
` : ''}
${prompt.useCount > 0 ? `<span>Used ${prompt.useCount}×</span>` : ''}
</div>
</div>
`).join('');
// Add event listeners
listContainer.querySelectorAll('.prompt-item').forEach(item => {
const id = parseInt(item.dataset.promptId);
// Click on item to use prompt
item.addEventListener('click', async (e) => {
if (e.target.closest('button')) return; // Don't trigger on button clicks
await usePrompt(id);
});
});
listContainer.querySelectorAll('.favorite-btn').forEach(btn => {
btn.addEventListener('click', async (e) => {
e.stopPropagation();
await togglePromptFavorite(parseInt(btn.dataset.id));
});
});
listContainer.querySelectorAll('.copy-btn').forEach(btn => {
btn.addEventListener('click', async (e) => {
e.stopPropagation();
await copyPromptToClipboard(parseInt(btn.dataset.id));
});
});
listContainer.querySelectorAll('.insert-btn').forEach(btn => {
btn.addEventListener('click', (e) => {
e.stopPropagation();
openInsertPromptModal(parseInt(btn.dataset.id));
});
});
listContainer.querySelectorAll('.edit-btn').forEach(btn => {
btn.addEventListener('click', (e) => {
e.stopPropagation();
openPromptEditor(parseInt(btn.dataset.id));
});
});
listContainer.querySelectorAll('.delete-btn').forEach(btn => {
btn.addEventListener('click', async (e) => {
e.stopPropagation();
await deletePromptWithConfirm(parseInt(btn.dataset.id));
});
});
}
async function filterPrompts(filterType, value) {
let prompts;
if (filterType === 'search') {
prompts = await searchPrompts(value);
} else if (filterType === 'category') {
prompts = await getPromptsByCategory(value);
} else if (filterType === 'favorites') {
prompts = await getFavoritePrompts();
}
renderPromptList(prompts);
}
async function updateCategoryFilter() {
const categories = await getAllCategories();
const popup = document.getElementById('category-popup');
popup.innerHTML = '<div class="category-popup-item selected" data-value="">All Categories</div>' +
categories.map(cat => `<div class="category-popup-item" data-value="${escapeHtml(cat)}">${escapeHtml(cat)}</div>`).join('');
}
function openPromptEditor(promptId = null) {
const modal = document.getElementById('prompt-editor-modal');
const title = document.getElementById('editor-title');
currentEditingPromptId = promptId;
if (promptId) {
// Edit existing prompt
title.textContent = 'Edit Prompt';
loadPromptIntoEditor(promptId);
} else {
// New prompt
title.textContent = 'New Prompt';
clearEditorFields();
}
modal.style.display = 'flex';
}
async function loadPromptIntoEditor(promptId) {
const prompt = await getAllPrompts();
const targetPrompt = prompt.find(p => p.id === promptId);
if (!targetPrompt) return;
document.getElementById('prompt-title-input').value = targetPrompt.title;
document.getElementById('prompt-content-input').value = targetPrompt.content;
document.getElementById('prompt-category-input').value = targetPrompt.category;
document.getElementById('prompt-tags-input').value = targetPrompt.tags.join(', ');
document.getElementById('prompt-favorite-input').checked = targetPrompt.isFavorite;
}
function clearEditorFields() {
document.getElementById('prompt-title-input').value = '';
document.getElementById('prompt-content-input').value = '';
document.getElementById('prompt-category-input').value = 'General';
document.getElementById('prompt-tags-input').value = '';
document.getElementById('prompt-favorite-input').checked = false;
}
function closePromptEditor() {
document.getElementById('prompt-editor-modal').style.display = 'none';
currentEditingPromptId = null;
clearEditorFields();
}
async function savePromptFromEditor() {
const title = document.getElementById('prompt-title-input').value.trim();
const content = document.getElementById('prompt-content-input').value.trim();
const category = document.getElementById('prompt-category-input').value.trim() || 'General';
const tagsInput = document.getElementById('prompt-tags-input').value.trim();
const isFavorite = document.getElementById('prompt-favorite-input').checked;
if (!content) {
alert('Please enter prompt content');
return;
}
const tags = tagsInput ? tagsInput.split(',').map(t => t.trim()).filter(t => t) : [];
const promptData = {
title: title || 'Untitled Prompt',
content,
category,
tags,
isFavorite
};
try {
if (currentEditingPromptId) {
await updatePrompt(currentEditingPromptId, promptData);
} else {
await savePrompt(promptData);
}
closePromptEditor();
renderPromptList();
updateCategoryFilter();
} catch (error) {
console.error('Error saving prompt:', error);
alert('Failed to save prompt. Please try again.');
}
}
async function usePrompt(promptId) {
try {
const prompts = await getAllPrompts();
const prompt = prompts.find(p => p.id === promptId);
if (!prompt) return;
// Copy to clipboard
await navigator.clipboard.writeText(prompt.content);
// Record usage
await recordPromptUsage(promptId);
// Show feedback
showToast('Prompt copied to clipboard!');
// Re-render to update use count
renderPromptList();
renderQuickAccessPanel(); // T071: Update quick access panel
} catch (error) {
console.error('Error using prompt:', error);
showToast('Failed to copy prompt');
}
}
async function togglePromptFavorite(promptId) {
try {
await toggleFavorite(promptId);
// Re-render current view
if (isShowingFavorites) {
filterPrompts('favorites');
} else {
renderPromptList();
}
} catch (error) {
console.error('Error toggling favorite:', error);
}
}
async function copyPromptToClipboard(promptId) {
try {
const prompts = await getAllPrompts();
const prompt = prompts.find(p => p.id === promptId);
if (!prompt) return;
await navigator.clipboard.writeText(prompt.content);
showToast('Copied to clipboard!');
} catch (error) {
console.error('Error copying to clipboard:', error);
showToast('Failed to copy');
}
}
async function deletePromptWithConfirm(promptId) {
const prompts = await getAllPrompts();
const prompt = prompts.find(p => p.id === promptId);
if (!prompt) return;
if (confirm(`Delete prompt "${prompt.title}"?`)) {
try {
await deletePrompt(promptId);
renderPromptList();
updateCategoryFilter();
showToast('Prompt deleted');
} catch (error) {
console.error('Error deleting prompt:', error);
showToast('Failed to delete prompt');
}
}
}
// T071: Insert Prompt Modal functions
async function openInsertPromptModal(promptId) {
const prompts = await getAllPrompts();
const prompt = prompts.find(p => p.id === promptId);
if (!prompt) return;
currentInsertPromptId = promptId;
// Show prompt preview in modal
const previewEl = document.getElementById('insert-prompt-preview');
previewEl.textContent = prompt.content;
// Update workspace provider selector (workspace is always visible now)
await updateWorkspaceProviderSelector();
// Show modal
const modal = document.getElementById('insert-prompt-modal');
modal.style.display = 'flex';
}
function closeInsertPromptModal() {
document.getElementById('insert-prompt-modal').style.display = 'none';
currentInsertPromptId = null;
}
async function insertPromptToWorkspace(position) {
if (!currentInsertPromptId) return;
const prompts = await getAllPrompts();
const prompt = prompts.find(p => p.id === currentInsertPromptId);
if (!prompt) return;
const textarea = document.getElementById('prompt-workspace-text');
const currentText = textarea.value.trim();
const promptContent = prompt.content.trim();
let newText;
if (position === 'beginning') {
newText = currentText
? `${promptContent}\n\n${currentText}`
: promptContent;
} else if (position === 'end') {
newText = currentText
? `${currentText}\n\n${promptContent}`
: promptContent;
} else if (position === 'replace') {
newText = promptContent;
}
// Update textarea
textarea.value = newText;
// Record usage
await recordPromptUsage(currentInsertPromptId);
// Close modal
closeInsertPromptModal();
// Show feedback
showToast('Prompt inserted to workspace!');
// Re-render prompt list to update use count
renderPromptList();
// Re-render quick access panel to update counts
renderQuickAccessPanel();
}
// T071: Quick Access Panel functions
async function renderQuickAccessPanel() {
const recentPrompts = await getRecentlyUsedPrompts(5);
const topFavorites = await getTopFavorites(5);
// Show panel only if there's content to display
const panel = document.getElementById('quick-access-panel');
if (recentPrompts.length === 0 && topFavorites.length === 0) {
panel.style.display = 'none';
return;
}
panel.style.display = 'block';
// Render recently used
renderQuickAccessSection('recent-prompts-list', recentPrompts, 'recently used');
// Render top favorites
renderQuickAccessSection('top-favorites-list', topFavorites, 'favorite');
}
function renderQuickAccessSection(containerId, prompts, emptyMessage) {
const container = document.getElementById(containerId);
if (prompts.length === 0) {
container.innerHTML = `<div class="quick-access-empty">No ${emptyMessage} prompts yet</div>`;
return;
}
container.innerHTML = prompts.map(prompt => {
const lastUsedText = prompt.lastUsed
? formatRelativeTime(prompt.lastUsed)
: '';
return `
<div class="quick-access-item" data-prompt-id="${prompt.id}">
<div class="quick-access-item-title">${escapeHtml(prompt.title)}</div>
<div class="quick-access-item-content">${escapeHtml(prompt.content)}</div>
<div class="quick-access-item-meta">
${prompt.useCount > 0 ? `<span>Used ${prompt.useCount}×</span>` : ''}
${lastUsedText ? `<span>${lastUsedText}</span>` : ''}
${prompt.isFavorite ? '<span class="material-symbols-outlined filled" style="font-size: 14px;">star</span>' : ''}
</div>
</div>
`;
}).join('');
// Add click listeners
container.querySelectorAll('.quick-access-item').forEach(item => {
const promptId = parseInt(item.dataset.promptId);
item.addEventListener('click', () => {
openInsertPromptModal(promptId);
});
});
}
function toggleQuickAccessSection(section) {
const listId = section === 'recent' ? 'recent-prompts-list' : 'top-favorites-list';
const toggleBtnId = section === 'recent' ? 'toggle-recent' : 'toggle-favorites';
const list = document.getElementById(listId);
const toggleBtn = document.getElementById(toggleBtnId);
list.classList.toggle('collapsed');
// Update Material Symbols icon
const iconSpan = toggleBtn.querySelector('.material-symbols-outlined');
if (iconSpan) {
iconSpan.textContent = list.classList.contains('collapsed') ? 'add' : 'remove';
}
}
function formatRelativeTime(timestamp) {
const now = Date.now();
const diff = now - timestamp;
const seconds = Math.floor(diff / 1000);
const minutes = Math.floor(seconds / 60);
const hours = Math.floor(minutes / 60);
const days = Math.floor(hours / 24);
if (days > 0) return `${days}d ago`;
if (hours > 0) return `${hours}h ago`;
if (minutes > 0) return `${minutes}m ago`;
return 'just now';
}
function showToast(message) {
// Simple toast notification
const toast = document.createElement('div');
toast.textContent = message;
toast.style.cssText = `
position: fixed;
bottom: 80px;
left: 50%;
transform: translateX(-50%);
background: #333;
color: white;
padding: 12px 24px;
border-radius: 4px;
font-size: 14px;
z-index: 10000;
box-shadow: 0 2px 8px rgba(0,0,0,0.3);
`;
document.body.appendChild(toast);
setTimeout(() => {
toast.style.opacity = '0';
toast.style.transition = 'opacity 0.3s';
setTimeout(() => toast.remove(), 300);
}, 2000);
}
function isEdgeBrowser() {
const uaData = navigator.userAgentData;
if (uaData && Array.isArray(uaData.brands)) {
return uaData.brands.some(brand => /Edge/i.test(brand.brand));
}
return navigator.userAgent.includes('Edg/');
}
async function checkEdgeShortcutReminder() {
if (!isEdgeBrowser()) return;
try {
const settings = await chrome.storage.sync.get({
[EDGE_SHORTCUT_STORAGE_KEY]: false,
keyboardShortcutEnabled: true
});
if (settings.keyboardShortcutEnabled === false) {
return;
}
if (!settings[EDGE_SHORTCUT_STORAGE_KEY]) {
showEdgeShortcutReminder();
}
} catch (error) {
console.warn('Unable to read shortcut reminder state:', error);
}
}
function showEdgeShortcutReminder() {
const banner = document.createElement('div');
banner.style.cssText = `
position: fixed;
top: 16px;
left: 50%;
transform: translateX(-50%);
background: rgba(22, 22, 22, 0.92);
color: #fff;
padding: 16px 20px;
border-radius: 8px;
box-shadow: 0 4px 12px rgba(0,0,0,0.25);
display: flex;
align-items: center;
gap: 12px;
z-index: 10000;
max-width: 420px;
font-size: 14px;
`;
const message = document.createElement('div');
message.textContent = 'Enable the insidebar.ai shortcut: confirm it in edge://extensions/shortcuts';
const actions = document.createElement('div');
actions.style.display = 'flex';
actions.style.gap = '8px';
const openButton = document.createElement('button');
openButton.textContent = 'Open settings';
openButton.style.cssText = `
background: #4c8bf5;
border: none;
color: #fff;
padding: 6px 12px;
border-radius: 4px;
cursor: pointer;
`;
openButton.addEventListener('click', async () => {
openBrowserShortcutSettings('edge');
await dismissEdgeShortcutReminder();
banner.remove();
});
const dismissButton = document.createElement('button');
dismissButton.textContent = 'Dismiss';
dismissButton.style.cssText = `
background: transparent;
border: 1px solid rgba(255,255,255,0.6);
color: #fff;
padding: 6px 12px;
border-radius: 4px;
cursor: pointer;
`;
dismissButton.addEventListener('click', async () => {
await dismissEdgeShortcutReminder();
banner.remove();
});
actions.appendChild(openButton);
actions.appendChild(dismissButton);
banner.appendChild(message);
banner.appendChild(actions);
document.body.appendChild(banner);
}
async function dismissEdgeShortcutReminder() {
try {
await chrome.storage.sync.set({ [EDGE_SHORTCUT_STORAGE_KEY]: true });
} catch (error) {
console.warn('Unable to persist shortcut reminder state:', error);
}
}
function openBrowserShortcutSettings(browser) {
const url = browser === 'edge'
? 'edge://extensions/shortcuts'
: 'chrome://extensions/shortcuts';
try {
chrome.tabs.create({ url });
} catch (error) {
console.warn('Unable to open shortcut settings via chrome.tabs, falling back to window.open', error);
window.open(url, '_blank');
}
}
// Workspace helper functions
let selectedWorkspaceProvider = null;
async function updateWorkspaceProviderSelector() {
const btn = document.getElementById('workspace-provider-btn');
const popup = document.getElementById('workspace-provider-popup');
if (!btn || !popup) return;
const enabledProviders = await getEnabledProviders();
const useDarkIcons = isDarkTheme();
// Set current provider as default if available
selectedWorkspaceProvider = currentProvider || enabledProviders[0]?.id || '';
// Update button icon
const currentProviderData = enabledProviders.find(p => p.id === selectedWorkspaceProvider);
if (currentProviderData) {
const icon = btn.querySelector('.provider-icon-small');
icon.src = useDarkIcons && currentProviderData.iconDark ? currentProviderData.iconDark : currentProviderData.icon;
icon.alt = currentProviderData.name;
}
// Populate popup with providers
popup.innerHTML = enabledProviders.map(provider => `
<div class="workspace-provider-popup-item ${provider.id === selectedWorkspaceProvider ? 'selected' : ''}" data-provider-id="${provider.id}">
<img class="provider-icon-small" src="${useDarkIcons && provider.iconDark ? provider.iconDark : provider.icon}" alt="${escapeHtml(provider.name)}">
<span>${escapeHtml(provider.name)}</span>
</div>
`).join('');
}
function showWorkspaceWithText(text) {
const textarea = document.getElementById('prompt-workspace-text');
if (!textarea) return;
textarea.value = text;
// Update provider selector (workspace is always visible now)
updateWorkspaceProviderSelector();
}
async function copyWorkspaceText() {
const textarea = document.getElementById('prompt-workspace-text');
const text = textarea.value.trim();
if (!text) {
showToast('Workspace is empty');
return;
}
try {
await navigator.clipboard.writeText(text);
showToast('Copied to clipboard!');
} catch (error) {
console.error('Error copying to clipboard:', error);
showToast('Failed to copy');
}
}
function saveWorkspaceAsPrompt() {
const textarea = document.getElementById('prompt-workspace-text');
const text = textarea.value.trim();
if (!text) {
showToast('Workspace is empty');
return;
}
// Open prompt editor with the workspace text pre-filled
openPromptEditor(null);
// Pre-fill the content field with workspace text
setTimeout(() => {
document.getElementById('prompt-content-input').value = text;
}, 50);
}
function clearWorkspace() {
const textarea = document.getElementById('prompt-workspace-text');
textarea.value = '';
// Workspace stays visible - no longer hide it
}
async function sendWorkspaceToProvider() {
const textarea = document.getElementById('prompt-workspace-text');
const providerId = selectedWorkspaceProvider;
const text = textarea.value.trim();
if (!providerId) {
showToast('Please select a provider');
return;
}
if (!text) {
showToast('Workspace is empty');
return;
}
try {
// Switch to the selected provider
await switchProvider(providerId);
// Inject the text into the provider (now waits for iframe to be ready)
await injectTextIntoProvider(providerId, text);
// Get provider name for toast
const provider = await getProviderByIdWithSettings(providerId);
showToast(`Text sent to ${provider.name}!`);
// Optionally clear workspace after sending
// clearWorkspace();
} catch (error) {
console.error('Error sending workspace text to provider:', error);
showToast('Failed to send text');
}
}
function escapeHtml(text) {
const div = document.createElement('div');
div.textContent = text;
return div.innerHTML;
}
// Sanitize HTML to prevent XSS attacks
// Allows safe markdown-generated tags but strips dangerous attributes and scripts
function sanitizeHtml(html) {
const temp = document.createElement('div');
temp.innerHTML = html;
// Remove script tags and event handlers
const scripts = temp.querySelectorAll('script');
scripts.forEach(script => script.remove());
// Remove potentially dangerous attributes
const allElements = temp.querySelectorAll('*');
allElements.forEach(el => {
// Remove event handler attributes
Array.from(el.attributes).forEach(attr => {
if (attr.name.startsWith('on')) {
el.removeAttribute(attr.name);
}
});
// Remove dangerous attributes
['javascript:', 'data:', 'vbscript:'].forEach(protocol => {
['href', 'src', 'action', 'formaction'].forEach(attr => {
const value = el.getAttribute(attr);
if (value && value.toLowerCase().includes(protocol)) {
el.removeAttribute(attr);
}
});
});
});
return temp.innerHTML;
}
// Chat History state
let currentSearchQuery = '';
const SEARCH_HISTORY_KEY = 'insidebar_search_history';
const MAX_SEARCH_HISTORY = 10;
// Chat History Implementation
function setupChatHistory() {
// Search functionality
const searchInput = document.getElementById('history-search');
let searchTimeout;
searchInput.addEventListener('input', (e) => {
clearTimeout(searchTimeout);
searchTimeout = setTimeout(() => {
const query = e.target.value.trim();
currentSearchQuery = query;
if (query) {
filterConversations('search', query);
// Save to search history
saveSearchHistory(query);
} else {
renderConversationList();
hideSearchResultCount();
}
}, 300);
});
// Load search history into datalist
loadSearchHistory();
// Search tips button
const searchTipsBtn = document.getElementById('history-show-search-tips');
if (searchTipsBtn) {
searchTipsBtn.addEventListener('click', () => {
const helper = document.getElementById('search-helper');
if (helper.style.display === 'none') {
helper.style.display = 'flex';
} else {
helper.style.display = 'none';
}
});
}
// Close search helper button
const closeHelperBtn = document.getElementById('close-search-helper');
if (closeHelperBtn) {
closeHelperBtn.addEventListener('click', () => {
document.getElementById('search-helper').style.display = 'none';
});
}
// Provider filter button
const providerBtn = document.getElementById('provider-filter-btn');
const providerPopup = document.getElementById('provider-popup');
providerBtn.addEventListener('click', (e) => {
e.stopPropagation();
const isVisible = providerPopup.style.display === 'block';
providerPopup.style.display = isVisible ? 'none' : 'block';
providerBtn.classList.toggle('active', !isVisible);
});
// Close popup when clicking outside
document.addEventListener('click', (e) => {
if (!providerBtn.contains(e.target) && !providerPopup.contains(e.target)) {
providerPopup.style.display = 'none';
providerBtn.classList.remove('active');
}
});
// Handle provider selection from popup
providerPopup.addEventListener('click', (e) => {
if (e.target.classList.contains('provider-popup-item')) {
const value = e.target.dataset.value;
// Update selected state
providerPopup.querySelectorAll('.provider-popup-item').forEach(item => {
item.classList.remove('selected');
});
e.target.classList.add('selected');
// Filter conversations
if (value) {
filterConversations('provider', value);
} else {
renderConversationList();
}
// Close popup
providerPopup.style.display = 'none';
providerBtn.classList.remove('active');
}
});
// Favorites filter
const favoritesBtn = document.getElementById('history-show-favorites');
if (favoritesBtn) {
favoritesBtn.addEventListener('click', (e) => {
e.stopPropagation();
isShowingHistoryFavorites = !isShowingHistoryFavorites;
const iconSpan = favoritesBtn.querySelector('.material-symbols-outlined');
if (iconSpan) {
if (isShowingHistoryFavorites) {
iconSpan.classList.add('filled');
} else {
iconSpan.classList.remove('filled');
}
}
favoritesBtn.title = isShowingHistoryFavorites ? 'Show all conversations' : 'Show favorites only';
favoritesBtn.classList.toggle('active', isShowingHistoryFavorites);
if (isShowingHistoryFavorites) {
filterConversations('favorites');
} else {
renderConversationList();
}
});
}
// Save conversation modal controls
document.getElementById('close-save-conversation').addEventListener('click', closeSaveConversationModal);
document.getElementById('cancel-save-conversation-btn').addEventListener('click', closeSaveConversationModal);
document.getElementById('save-conversation-submit-btn').addEventListener('click', saveConversationFromModal);
// View conversation modal controls
document.getElementById('close-view-conversation').addEventListener('click', closeViewConversationModal);
document.getElementById('copy-conversation-btn').addEventListener('click', copyConversationContent);
document.getElementById('delete-conversation-from-view-btn').addEventListener('click', deleteConversationFromView);
// Close modals on outside click
document.getElementById('save-conversation-modal').addEventListener('click', (e) => {
if (e.target.id === 'save-conversation-modal') {
closeSaveConversationModal();
}
});
document.getElementById('view-conversation-modal').addEventListener('click', (e) => {
if (e.target.id === 'view-conversation-modal') {
closeViewConversationModal();
}
});
}
async function renderConversationList(conversations = null) {
const listContainer = document.getElementById('conversation-list');
if (!conversations) {
conversations = await getAllConversations();
}
if (conversations.length === 0) {
listContainer.innerHTML = `
<div class="conversation-list-empty">
<p><span class="material-symbols-outlined" style="font-size: 48px; opacity: 0.5;">chat_bubble</span></p>
<p>No conversations yet</p>
<p>Click "+" to save a conversation</p>
</div>
`;
return;
}
// Sort by modification time (most recently modified first), fallback to timestamp
conversations.sort((a, b) => {
const aTime = a.modifiedAt || a.timestamp;
const bTime = b.modifiedAt || b.timestamp;
return bTime - aTime;
});
// Get dark theme status
const useDarkIcons = isDarkTheme();
listContainer.innerHTML = conversations.map(conv => {
const preview = conv.content.length > 150
? conv.content.substring(0, 150) + '...'
: conv.content;
const date = new Date(conv.timestamp).toLocaleDateString();
const time = new Date(conv.timestamp).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
// Use URL field directly if available
const conversationUrl = conv.url;
// Create date/time display - make it a link if URL is available
const dateTimeDisplay = conversationUrl
? `<a href="${escapeHtml(conversationUrl)}" target="_blank" class="conversation-item-date conversation-item-link" title="Open original conversation">${date} ${time}</a>`
: `<span class="conversation-item-date">${date} ${time}</span>`;
// Get provider data for icon
const provider = getProviderById(conv.provider.toLowerCase());
const providerIconSrc = provider
? (useDarkIcons && provider.iconDark ? provider.iconDark : provider.icon)
: '';
const providerIconHtml = providerIconSrc
? `<img class="provider-icon-small" src="${providerIconSrc}" alt="${escapeHtml(conv.provider)}">`
: '';
return `
<div class="conversation-item" data-conversation-id="${conv.id}">
<div class="conversation-item-header">
<h4 class="conversation-item-title">${escapeHtml(conv.title)}</h4>
<div class="conversation-item-actions">
<button class="favorite-btn" data-id="${conv.id}" title="Toggle favorite">
<span class="material-symbols-outlined ${conv.isFavorite ? 'filled' : ''}">star</span>
</button>
<button class="delete-conversation-btn" data-id="${conv.id}" title="Delete"><span class="material-symbols-outlined">delete</span></button>
</div>
</div>
<div class="conversation-item-preview">${escapeHtml(preview)}</div>
<div class="conversation-item-meta">
<span class="conversation-item-provider">${providerIconHtml}${escapeHtml(conv.provider)}</span>
${dateTimeDisplay}
${conv.tags.length > 0 ? `
<div class="conversation-item-tags">
${conv.tags.map(tag => `<span class="conversation-item-tag">${escapeHtml(tag)}</span>`).join('')}
</div>
` : ''}
</div>
</div>
`;
}).join('');
// Add event listeners
listContainer.querySelectorAll('.conversation-item').forEach(item => {
const id = parseInt(item.dataset.conversationId);
// Click on item to view full conversation
item.addEventListener('click', async (e) => {
// Don't trigger on button clicks or link clicks
if (e.target.closest('button') || e.target.closest('a.conversation-item-link')) return;
await viewConversation(id);
});
});
listContainer.querySelectorAll('.favorite-btn').forEach(btn => {
btn.addEventListener('click', async (e) => {
e.stopPropagation();
await toggleConversationFavoriteStatus(parseInt(btn.dataset.id));
});
});
listContainer.querySelectorAll('.delete-conversation-btn').forEach(btn => {
btn.addEventListener('click', async (e) => {
e.stopPropagation();
await deleteConversationWithConfirm(parseInt(btn.dataset.id));
});
});
}
async function filterConversations(filterType, value) {
let conversations;
if (filterType === 'search') {
conversations = await searchConversations(value);
// Show search result count
showSearchResultCount(conversations.length);
} else if (filterType === 'provider') {
conversations = await getConversationsByProvider(value);
} else if (filterType === 'favorites') {
conversations = await getFavoriteConversations();
}
renderConversationList(conversations);
}
async function updateProviderFilter() {
const enabledProviders = await getEnabledProviders();
const popup = document.getElementById('provider-popup');
// Filter out DeepSeek since we don't have a reliable extractor for it
const filterableProviders = enabledProviders.filter(p => p.id !== 'deepseek');
popup.innerHTML = '<div class="provider-popup-item selected" data-value="">All Providers</div>' +
filterableProviders.map(provider =>
`<div class="provider-popup-item" data-value="${escapeHtml(provider.id)}">${escapeHtml(provider.name)}</div>`
).join('');
}
function closeSaveConversationModal() {
document.getElementById('save-conversation-modal').style.display = 'none';
currentEditingConversationId = null;
}
async function saveConversationFromModal() {
const title = document.getElementById('conversation-title-input').value.trim();
const content = document.getElementById('conversation-content-input').value.trim();
const provider = document.getElementById('conversation-provider-input').value.trim() || 'unknown';
const tagsInput = document.getElementById('conversation-tags-input').value.trim();
const notes = document.getElementById('conversation-notes-input').value.trim();
const isFavorite = document.getElementById('conversation-favorite-input').checked;
if (!content) {
alert('Please enter conversation content');
return;
}
const tags = tagsInput ? tagsInput.split(',').map(t => t.trim()).filter(t => t) : [];
const conversationData = {
title: title || generateAutoTitle(content),
content,
provider,
tags,
notes,
isFavorite
};
try {
if (currentEditingConversationId) {
await updateConversation(currentEditingConversationId, conversationData);
showToast('Conversation updated!');
} else {
await saveConversation(conversationData);
showToast('Conversation saved!');
}
closeSaveConversationModal();
renderConversationList();
} catch (error) {
console.error('Error saving conversation:', error);
alert('Failed to save conversation. ' + error.message);
}
}
// Handle extracted conversation from ChatGPT content script
async function handleExtractedConversation(conversationData) {
try {
// Switch to chat history view
switchToView('chat-history');
const conversationToSave = {
title: conversationData.title || generateAutoTitle(conversationData.content),
content: conversationData.content,
provider: conversationData.provider || 'ChatGPT',
timestamp: conversationData.timestamp || Date.now(),
tags: [],
notes: conversationData.url ? `Extracted from: ${conversationData.url}` : '',
isFavorite: false,
conversationId: conversationData.conversationId,
url: conversationData.url,
overwriteId: conversationData.overwriteId
};
// Save conversation directly to database
await saveConversation(conversationToSave);
// Refresh conversation list and show success
await renderConversationList();
showToast('Conversation saved successfully!');
} catch (error) {
console.error('[Sidebar] Error in handleExtractedConversation:', error);
showToast('Failed to save conversation: ' + error.message);
throw error;
}
}
async function viewConversation(id) {
const conversation = await getConversation(id);
if (!conversation) return;
currentViewingConversationId = id;
// Update modal content
document.getElementById('view-conversation-title').textContent = conversation.title;
const contentEl = document.getElementById('view-conversation-content');
// Render markdown content using marked library
if (typeof marked !== 'undefined' && conversation.content) {
try {
// Configure marked for better security
marked.setOptions({
breaks: true, // Convert line breaks to <br>
gfm: true, // GitHub Flavored Markdown
headerIds: false, // Don't add IDs to headers
mangle: false // Don't escape email addresses
});
// Parse and render markdown
const htmlContent = marked.parse(conversation.content);
contentEl.innerHTML = sanitizeHtml(htmlContent);
contentEl.classList.add('markdown-content');
} catch (error) {
console.error('[Sidebar] Error rendering markdown:', error);
// Fallback to plain text if markdown parsing fails
contentEl.textContent = conversation.content;
contentEl.classList.remove('markdown-content');
}
} else {
// Fallback if marked is not available
contentEl.textContent = conversation.content;
contentEl.classList.remove('markdown-content');
}
const providerEl = document.getElementById('view-conversation-provider');
// Get provider data for icon
const useDarkIcons = isDarkTheme();
const provider = getProviderById(conversation.provider.toLowerCase());
const providerIconSrc = provider
? (useDarkIcons && provider.iconDark ? provider.iconDark : provider.icon)
: '';
const providerIconHtml = providerIconSrc
? `<img class="provider-icon-small" src="${providerIconSrc}" alt="${escapeHtml(conversation.provider)}">`
: '';
providerEl.innerHTML = `<strong>Provider:</strong> ${providerIconHtml}${escapeHtml(conversation.provider)}`;
// Use URL field directly if available
const conversationUrl = conversation.url;
// Create timestamp display - make it a link if URL is available
const timestampEl = document.getElementById('view-conversation-timestamp');
const date = new Date(conversation.timestamp).toLocaleString();
if (conversationUrl) {
timestampEl.innerHTML = `<strong>Date:</strong> <a href="${escapeHtml(conversationUrl)}" target="_blank" class="conversation-link" title="Open original conversation">${date}</a>`;
} else {
timestampEl.innerHTML = `<strong>Date:</strong> ${date}`;
}
const tagsEl = document.getElementById('view-conversation-tags');
if (conversation.tags.length > 0) {
tagsEl.innerHTML = `<strong>Tags:</strong> ${conversation.tags.map(tag => `<span class="conversation-tag">${escapeHtml(tag)}</span>`).join(' ')}`;
} else {
tagsEl.innerHTML = '';
}
// Hide notes section if it only contains the "Extracted from:" URL
const notesEl = document.getElementById('view-conversation-notes');
if (conversation.notes && !conversation.notes.startsWith('Extracted from: ')) {
notesEl.innerHTML = `<strong>Notes:</strong> ${escapeHtml(conversation.notes)}`;
notesEl.style.display = 'block';
} else {
notesEl.innerHTML = '';
notesEl.style.display = 'none';
}
// Show modal
document.getElementById('view-conversation-modal').style.display = 'flex';
}
function closeViewConversationModal() {
document.getElementById('view-conversation-modal').style.display = 'none';
currentViewingConversationId = null;
}
async function copyConversationContent() {
if (!currentViewingConversationId) return;
const conversation = await getConversation(currentViewingConversationId);
if (!conversation) return;
try {
await navigator.clipboard.writeText(conversation.content);
showToast('Copied to clipboard!');
} catch (error) {
console.error('Error copying to clipboard:', error);
showToast('Failed to copy');
}
}
async function editConversationFromView() {
if (!currentViewingConversationId) return;
const conversation = await getConversation(currentViewingConversationId);
if (!conversation) return;
// Close view modal
closeViewConversationModal();
// Open save modal with conversation data
currentEditingConversationId = currentViewingConversationId;
document.getElementById('conversation-title-input').value = conversation.title;
document.getElementById('conversation-content-input').value = conversation.content;
document.getElementById('conversation-provider-input').value = conversation.provider;
document.getElementById('conversation-tags-input').value = conversation.tags.join(', ');
document.getElementById('conversation-notes-input').value = conversation.notes || '';
document.getElementById('conversation-favorite-input').checked = conversation.isFavorite;
document.getElementById('save-conversation-modal').style.display = 'flex';
}
async function deleteConversationFromView() {
if (!currentViewingConversationId) return;
const conversation = await getConversation(currentViewingConversationId);
if (!conversation) return;
if (confirm(`Delete conversation "${conversation.title}"?`)) {
try {
await deleteConversation(currentViewingConversationId);
closeViewConversationModal();
renderConversationList();
showToast('Conversation deleted');
} catch (error) {
console.error('Error deleting conversation:', error);
showToast('Failed to delete conversation');
}
}
}
async function toggleConversationFavoriteStatus(id) {
try {
await toggleConversationFavorite(id);
// Re-render current view
if (isShowingHistoryFavorites) {
filterConversations('favorites');
} else {
renderConversationList();
}
} catch (error) {
console.error('Error toggling favorite:', error);
}
}
async function deleteConversationWithConfirm(id) {
const conversation = await getConversation(id);
if (!conversation) return;
if (confirm(`Delete conversation "${conversation.title}"?`)) {
try {
await deleteConversation(id);
renderConversationList();
showToast('Conversation deleted');
} catch (error) {
console.error('Error deleting conversation:', error);
showToast('Failed to delete conversation');
}
}
}
// Search history management
function saveSearchHistory(query) {
if (!query || query.trim().length === 0) return;
try {
const history = JSON.parse(localStorage.getItem(SEARCH_HISTORY_KEY) || '[]');
// Remove duplicate if exists
const filtered = history.filter(item => item !== query);
// Add to beginning
filtered.unshift(query);
// Keep only MAX_SEARCH_HISTORY items
const trimmed = filtered.slice(0, MAX_SEARCH_HISTORY);
localStorage.setItem(SEARCH_HISTORY_KEY, JSON.stringify(trimmed));
// Reload datalist
loadSearchHistory();
} catch (error) {
console.error('Error saving search history:', error);
}
}
function loadSearchHistory() {
try {
const history = JSON.parse(localStorage.getItem(SEARCH_HISTORY_KEY) || '[]');
const datalist = document.getElementById('search-history-list');
if (!datalist) return;
datalist.innerHTML = history.map(query =>
`<option value="${escapeHtml(query)}">`
).join('');
} catch (error) {
console.error('Error loading search history:', error);
}
}
// Search result count display
function showSearchResultCount(count) {
const countEl = document.getElementById('search-result-count');
if (countEl) {
countEl.textContent = `Found ${count} conversation${count !== 1 ? 's' : ''}`;
countEl.style.display = 'block';
}
}
function hideSearchResultCount() {
const countEl = document.getElementById('search-result-count');
if (countEl) {
countEl.style.display = 'none';
}
}
// Initialize on load
init();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/history-manager.test.js | JavaScript | import { describe, it, expect, beforeEach, vi } from 'vitest';
import { generateAutoTitle } from '../modules/history-manager.js';
/**
* Tests for modules/history-manager.js
*
* These tests cover critical conversation history functionality:
* - Auto-title generation
* - Input validation and sanitization
* - IndexedDB operations (save, retrieve, update, delete)
* - Search functionality
* - Tags and favorites management
*/
describe('history-manager', () => {
describe('generateAutoTitle', () => {
it('should generate title from first line', () => {
const content = 'This is the first line\nThis is the second line';
const title = generateAutoTitle(content);
expect(title).toBe('This is the first line');
});
it('should truncate long first lines', () => {
const longLine = 'a'.repeat(100);
const content = `${longLine}\nSecond line`;
const title = generateAutoTitle(content, 60);
expect(title.length).toBeLessThanOrEqual(63); // 60 + '...'
expect(title.endsWith('...')).toBe(true);
});
it('should handle single-line content', () => {
const content = 'Single line content';
const title = generateAutoTitle(content);
expect(title).toBe('Single line content');
});
it('should handle empty content', () => {
const title = generateAutoTitle('');
expect(title).toBe('Untitled Conversation');
});
it('should handle content with only whitespace', () => {
const title = generateAutoTitle(' \n \n ');
expect(title).toBe('Untitled Conversation');
});
it('should respect custom maxLength', () => {
const content = 'This is a very long title that should be truncated';
const title = generateAutoTitle(content, 20);
expect(title.length).toBeLessThanOrEqual(23); // 20 + '...'
});
it('should trim whitespace from first line', () => {
const content = ' Leading whitespace \nSecond line';
const title = generateAutoTitle(content);
expect(title).toBe('Leading whitespace');
});
});
describe('Input Validation', () => {
it('should validate required content field', () => {
const data = {
title: 'Test',
provider: 'chatgpt'
// content missing
};
// Validation would fail - content is required
expect(data.content).toBeUndefined();
});
it('should reject empty content', () => {
const data = {
title: 'Test',
content: ' ', // whitespace only
provider: 'chatgpt'
};
expect(data.content.trim().length).toBe(0);
});
it('should handle valid conversation data', () => {
const data = {
title: 'Test Conversation',
content: 'This is test content',
provider: 'chatgpt',
tags: ['test', 'example'],
notes: 'Test notes'
};
expect(data.content.trim().length).toBeGreaterThan(0);
expect(data.tags.length).toBeLessThanOrEqual(20);
});
it('should reject too many tags', () => {
const tooManyTags = Array(25).fill('tag');
expect(tooManyTags.length).toBeGreaterThan(20);
});
it('should handle missing optional fields', () => {
const data = {
content: 'Required content',
provider: 'claude'
// title, tags, notes optional
};
expect(data.content).toBeTruthy();
expect(data.title).toBeUndefined();
expect(data.tags).toBeUndefined();
expect(data.notes).toBeUndefined();
});
});
describe('Data Sanitization', () => {
it('should sanitize string input', () => {
const input = ' Test String ';
const sanitized = input.trim();
expect(sanitized).toBe('Test String');
});
it('should truncate strings exceeding max length', () => {
const longString = 'a'.repeat(300);
const maxLength = 200;
const truncated = longString.slice(0, maxLength);
expect(truncated.length).toBe(maxLength);
});
it('should handle non-string input', () => {
const input = 123;
const result = typeof input === 'string' ? input : '';
expect(result).toBe('');
});
it('should preserve valid string content', () => {
const input = 'Valid content with special chars: @#$%^&*()';
const sanitized = input.trim();
expect(sanitized).toBe(input.trim());
});
});
describe('Conversation Structure', () => {
it('should create conversation with required fields', () => {
const conversation = {
id: Date.now(),
title: 'Test',
content: 'Content',
provider: 'chatgpt',
timestamp: Date.now(),
tags: [],
favorite: false
};
expect(conversation.id).toBeTruthy();
expect(conversation.content).toBeTruthy();
expect(conversation.provider).toBeTruthy();
expect(conversation.timestamp).toBeTruthy();
expect(Array.isArray(conversation.tags)).toBe(true);
expect(typeof conversation.favorite).toBe('boolean');
});
it('should include optional fields', () => {
const conversation = {
id: Date.now(),
content: 'Content',
provider: 'claude',
timestamp: Date.now(),
tags: ['tag1', 'tag2'],
favorite: true,
notes: 'Some notes',
conversationId: 'conv-123',
url: 'https://example.com'
};
expect(conversation.notes).toBe('Some notes');
expect(conversation.conversationId).toBe('conv-123');
expect(conversation.url).toBe('https://example.com');
expect(conversation.tags.length).toBe(2);
});
it('should handle empty tags array', () => {
const conversation = {
id: Date.now(),
content: 'Content',
provider: 'gemini',
timestamp: Date.now(),
tags: []
};
expect(Array.isArray(conversation.tags)).toBe(true);
expect(conversation.tags.length).toBe(0);
});
});
describe('Search Functionality', () => {
it('should generate searchable text from conversation', () => {
const conversation = {
title: 'Test Title',
content: 'Test Content',
provider: 'chatgpt',
notes: 'Test Notes',
tags: ['tag1', 'tag2']
};
const searchText = [
conversation.title,
conversation.content,
conversation.provider,
conversation.notes,
...conversation.tags
].join(' ').toLowerCase();
expect(searchText).toContain('test title');
expect(searchText).toContain('test content');
expect(searchText).toContain('chatgpt');
expect(searchText).toContain('tag1');
expect(searchText).toContain('tag2');
});
it('should handle missing optional fields in search text', () => {
const conversation = {
title: '',
content: 'Content',
provider: 'claude',
notes: '',
tags: []
};
const searchText = [
conversation.title,
conversation.content,
conversation.provider,
conversation.notes || '',
...conversation.tags
].join(' ').toLowerCase();
expect(searchText).toContain('content');
expect(searchText).toContain('claude');
});
it('should be case-insensitive', () => {
const conversation = {
title: 'UPPERCASE Title',
content: 'MixedCase Content',
provider: 'ChatGPT',
notes: '',
tags: ['Tag1']
};
const searchText = [
conversation.title,
conversation.content,
conversation.provider,
conversation.notes || '',
...conversation.tags
].join(' ').toLowerCase();
expect(searchText).toBe(searchText.toLowerCase());
expect(searchText).toContain('uppercase title');
expect(searchText).toContain('mixedcase content');
});
});
describe('Quota and Error Handling', () => {
it('should detect quota exceeded error', () => {
const quotaError = { name: 'QuotaExceededError' };
expect(quotaError.name).toBe('QuotaExceededError');
});
it('should detect quota exceeded by code', () => {
const quotaError = { code: 22 };
expect(quotaError.code).toBe(22);
});
it('should handle non-quota errors', () => {
const otherError = { name: 'NetworkError' };
expect(otherError.name).not.toBe('QuotaExceededError');
expect(otherError.code).toBeUndefined();
});
it('should build appropriate quota error message', () => {
const errorMessage = 'Storage quota exceeded. Delete old conversations to free space.';
expect(errorMessage).toContain('quota exceeded');
expect(errorMessage).toContain('Delete old conversations');
});
});
describe('Favorites and Tags', () => {
it('should toggle favorite status', () => {
let favorite = false;
favorite = !favorite;
expect(favorite).toBe(true);
favorite = !favorite;
expect(favorite).toBe(false);
});
it('should handle tag arrays', () => {
const tags = ['javascript', 'testing', 'vitest'];
expect(Array.isArray(tags)).toBe(true);
expect(tags.length).toBe(3);
expect(tags.includes('testing')).toBe(true);
});
it('should add tag to conversation', () => {
const tags = ['existing'];
const newTag = 'new';
if (!tags.includes(newTag)) {
tags.push(newTag);
}
expect(tags).toContain('new');
expect(tags.length).toBe(2);
});
it('should remove tag from conversation', () => {
const tags = ['tag1', 'tag2', 'tag3'];
const filtered = tags.filter(t => t !== 'tag2');
expect(filtered).toEqual(['tag1', 'tag3']);
expect(filtered.length).toBe(2);
});
it('should prevent duplicate tags', () => {
const tags = ['existing'];
const newTag = 'existing';
if (!tags.includes(newTag)) {
tags.push(newTag);
}
expect(tags.length).toBe(1);
});
});
describe('Provider Filtering', () => {
it('should filter conversations by provider', () => {
const conversations = [
{ provider: 'chatgpt', content: 'Test 1' },
{ provider: 'claude', content: 'Test 2' },
{ provider: 'chatgpt', content: 'Test 3' }
];
const chatgptConvs = conversations.filter(c => c.provider === 'chatgpt');
expect(chatgptConvs.length).toBe(2);
expect(chatgptConvs.every(c => c.provider === 'chatgpt')).toBe(true);
});
it('should handle invalid provider filter', () => {
const conversations = [
{ provider: 'chatgpt', content: 'Test 1' },
{ provider: 'claude', content: 'Test 2' }
];
const filtered = conversations.filter(c => c.provider === 'nonexistent');
expect(filtered.length).toBe(0);
});
});
describe('Timestamp Handling', () => {
it('should create timestamp on save', () => {
const timestamp = Date.now();
const conversation = {
content: 'Test',
provider: 'chatgpt',
timestamp
};
expect(typeof conversation.timestamp).toBe('number');
expect(conversation.timestamp).toBeGreaterThan(0);
});
it('should sort conversations by timestamp', () => {
const conversations = [
{ timestamp: 3000, content: 'Third' },
{ timestamp: 1000, content: 'First' },
{ timestamp: 2000, content: 'Second' }
];
const sorted = conversations.sort((a, b) => b.timestamp - a.timestamp);
expect(sorted[0].content).toBe('Third');
expect(sorted[1].content).toBe('Second');
expect(sorted[2].content).toBe('First');
});
});
describe('Conversation ID Handling', () => {
it('should handle conversations with conversationId', () => {
const conversation = {
content: 'Test',
provider: 'chatgpt',
conversationId: 'chat-123456',
timestamp: Date.now()
};
expect(conversation.conversationId).toBe('chat-123456');
});
it('should find conversation by conversationId', () => {
const conversations = [
{ conversationId: 'chat-1', content: 'Test 1' },
{ conversationId: 'chat-2', content: 'Test 2' },
{ conversationId: 'chat-3', content: 'Test 3' }
];
const found = conversations.find(c => c.conversationId === 'chat-2');
expect(found).toBeDefined();
expect(found.content).toBe('Test 2');
});
it('should handle duplicate detection', () => {
const existing = { conversationId: 'chat-123' };
const newConv = { conversationId: 'chat-123' };
const isDuplicate = existing.conversationId === newConv.conversationId;
expect(isDuplicate).toBe(true);
});
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/html-utils.test.js | JavaScript | // Tests for HTML utility functions - Critical for XSS protection
import { describe, it, expect } from 'vitest';
import { escapeHtml, html, unsafeHtml, renderList } from '../modules/html-utils.js';
describe('html-utils module', () => {
describe('escapeHtml', () => {
it('should escape HTML special characters', () => {
expect(escapeHtml('<script>alert("xss")</script>')).toBe(
'<script>alert("xss")</script>'
);
});
it('should escape ampersands', () => {
expect(escapeHtml('Tom & Jerry')).toBe('Tom & Jerry');
});
it('should NOT escape quotes (DOM textContent approach)', () => {
// Note: escapeHtml uses DOM textContent which doesn't escape quotes
// This is safe because quotes only matter in attribute contexts
expect(escapeHtml('"quoted"')).toBe('"quoted"');
expect(escapeHtml("it's")).toBe("it's");
});
it('should handle null and undefined', () => {
expect(escapeHtml(null)).toBe('');
expect(escapeHtml(undefined)).toBe('');
});
it('should convert non-strings to strings', () => {
expect(escapeHtml(123)).toBe('123');
expect(escapeHtml(true)).toBe('true');
});
it('should escape HTML tags and ampersands', () => {
// Quotes are NOT escaped by DOM textContent (only tags and ampersands)
expect(escapeHtml('<div class="test" data-value=\'123\'>A & B</div>')).toBe(
'<div class="test" data-value=\'123\'>A & B</div>'
);
});
});
describe('html tagged template', () => {
it('should escape interpolated values by default', () => {
const userInput = '<script>alert("xss")</script>';
const result = html`<div>${userInput}</div>`;
expect(result).toBe('<div><script>alert("xss")</script></div>');
});
it('should handle multiple interpolations', () => {
const name = '<script>evil</script>';
const message = 'Hello & goodbye';
const result = html`<div>${name}: ${message}</div>`;
expect(result).toBe(
'<div><script>evil</script>: Hello & goodbye</div>'
);
});
it('should handle null and undefined values', () => {
const result = html`<div>${null} ${undefined}</div>`;
expect(result).toBe('<div> </div>');
});
it('should not escape safe HTML marked with unsafeHtml', () => {
const safeContent = unsafeHtml('<strong>Bold</strong>');
const result = html`<div>${safeContent}</div>`;
expect(result).toBe('<div><strong>Bold</strong></div>');
});
it('should escape regular values but not safe HTML', () => {
const unsafe = '<script>bad</script>';
const safe = unsafeHtml('<em>emphasized</em>');
const result = html`<div>${unsafe} ${safe}</div>`;
expect(result).toBe(
'<div><script>bad</script> <em>emphasized</em></div>'
);
});
});
describe('unsafeHtml', () => {
it('should mark HTML as safe', () => {
const safe = unsafeHtml('<strong>test</strong>');
expect(safe).toHaveProperty('__isSafeHtml', true);
expect(safe).toHaveProperty('html', '<strong>test</strong>');
});
it('should not escape dangerous content when used', () => {
const dangerous = '<script>alert(1)</script>';
const marked = unsafeHtml(dangerous);
const result = html`${marked}`;
expect(result).toBe(dangerous);
});
});
describe('renderList', () => {
it('should render array with template function', () => {
const items = ['apple', 'banana', 'cherry'];
const result = renderList(items, item => html`<li>${item}</li>`);
expect(result).toBe('<li>apple</li><li>banana</li><li>cherry</li>');
});
it('should escape values in template function', () => {
const items = ['<script>bad</script>', 'safe'];
const result = renderList(items, item => html`<li>${item}</li>`);
expect(result).toBe(
'<li><script>bad</script></li><li>safe</li>'
);
});
it('should use custom separator', () => {
const items = ['a', 'b', 'c'];
const result = renderList(items, item => html`<span>${item}</span>`, ', ');
expect(result).toBe('<span>a</span>, <span>b</span>, <span>c</span>');
});
it('should return empty string for non-array', () => {
expect(renderList(null, item => item)).toBe('');
expect(renderList(undefined, item => item)).toBe('');
expect(renderList('not an array', item => item)).toBe('');
});
it('should handle empty array', () => {
expect(renderList([], item => html`<li>${item}</li>`)).toBe('');
});
});
describe('XSS attack prevention', () => {
it('should NOT prevent attribute injection (by design - dont interpolate in attributes)', () => {
// IMPORTANT: This test documents a LIMITATION, not a feature!
// The escapeHtml function escapes angle brackets but NOT quotes.
// This means you MUST NOT interpolate user input into HTML attributes!
const malicious = '" onload="alert(1)"';
const result = html`<img src="${malicious}">`;
// The quotes are NOT escaped, so the malicious payload works
expect(result).toBe('<img src="" onload="alert(1)"">');
// SECURITY: Never do this in production! Use setAttribute() or data attributes
});
it('should escape event handler tags in content', () => {
const malicious = '<div onclick="alert(1)">Click me</div>';
const result = html`${malicious}`;
// The < and > are escaped, making the onclick harmless text
expect(result).toContain('<div onclick='); // onclick is now just text
expect(result).toContain('</div>');
});
it('should NOT prevent javascript: URLs (limitation - dont interpolate URLs)', () => {
// IMPORTANT: This documents a LIMITATION!
// The function doesn't filter dangerous URLs
const malicious = 'javascript:alert(1)';
const result = html`<a href="${malicious}">Link</a>`;
expect(result).toBe('<a href="javascript:alert(1)">Link</a>');
// Note: The function escapes content but doesn't filter URLs
// URL filtering should happen at a different layer
});
it('should escape tags in data: URLs but not prevent the URL', () => {
// The function escapes < and > within the URL string, but doesn't block data: URLs
const malicious = 'data:text/html,<script>alert(1)</script>';
const result = html`<iframe src="${malicious}"></iframe>`;
expect(result).toBe(
'<iframe src="data:text/html,<script>alert(1)</script>"></iframe>'
);
// Note: Tags are escaped but the data: URL itself is allowed
});
it('should handle nested XSS attempts', () => {
const nested = '<<script>alert(1)</script>';
const result = html`<div>${nested}</div>`;
expect(result).not.toContain('<script>');
expect(result).toContain('<<script>');
});
});
describe('Real-world usage patterns', () => {
it('should safely render user-generated content', () => {
const userComment = '<script>steal(document.cookie)</script>Nice post!';
const username = 'Admin<script>alert(1)</script>';
const result = html`
<div class="comment">
<strong>${username}</strong>: ${userComment}
</div>
`;
expect(result).not.toContain('<script>');
expect(result).toContain('<script>');
});
it('should safely render search results with special chars', () => {
const searchTerm = 'Tom & Jerry <3';
const result = html`<div>Results for: "${searchTerm}"</div>`;
expect(result).toBe(
'<div>Results for: "Tom & Jerry <3"</div>'
);
});
it('should handle markdown-like content', () => {
const markdown = '**bold** <em>italic</em>';
const result = html`<p>${markdown}</p>`;
expect(result).toBe('<p>**bold** <em>italic</em></p>');
});
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/messaging.test.js | JavaScript | import { describe, it, expect, vi, beforeEach } from 'vitest';
import { sendMessageWithTimeout, notifyMessage } from '../modules/messaging.js';
describe('messaging module', () => {
beforeEach(() => {
vi.clearAllMocks();
chrome.runtime.lastError = null;
});
describe('sendMessageWithTimeout', () => {
it('should send message and resolve with response', async () => {
const mockResponse = { success: true, data: 'test' };
chrome.runtime.sendMessage.mockImplementation((msg, callback) => {
callback(mockResponse);
});
const result = await sendMessageWithTimeout({ action: 'test' });
expect(result).toEqual(mockResponse);
expect(chrome.runtime.sendMessage).toHaveBeenCalledWith(
{ action: 'test' },
expect.any(Function)
);
});
it('should reject on timeout', async () => {
chrome.runtime.sendMessage.mockImplementation(() => {
// Never call callback - simulate timeout
});
await expect(
sendMessageWithTimeout({ action: 'test' }, { timeout: 100 })
).rejects.toThrow('Message timeout: test');
});
it('should reject on chrome.runtime.lastError', async () => {
chrome.runtime.lastError = { message: 'Extension context invalidated' };
chrome.runtime.sendMessage.mockImplementation((msg, callback) => {
callback(null);
});
await expect(
sendMessageWithTimeout({ action: 'test' })
).rejects.toThrow('Extension context invalidated');
});
it('should resolve immediately when expectResponse is false', async () => {
const result = await sendMessageWithTimeout(
{ action: 'notify' },
{ expectResponse: false }
);
expect(result).toBeUndefined();
});
it('should use custom timeout value', async () => {
vi.useFakeTimers();
chrome.runtime.sendMessage.mockImplementation(() => {
// Never respond
});
const promise = sendMessageWithTimeout(
{ action: 'test' },
{ timeout: 5000 }
);
vi.advanceTimersByTime(4999);
await Promise.resolve(); // Let promises flush
// Should not have timed out yet
vi.advanceTimersByTime(1);
await expect(promise).rejects.toThrow('Message timeout');
vi.useRealTimers();
});
});
describe('notifyMessage', () => {
it('should send message without expecting response', async () => {
await notifyMessage({ action: 'notify', data: 'test' });
expect(chrome.runtime.sendMessage).toHaveBeenCalledWith(
{ action: 'notify', data: 'test' },
expect.any(Function)
);
});
it('should resolve immediately', async () => {
const result = await notifyMessage({ action: 'notify' });
expect(result).toBeUndefined();
});
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/prompt-manager.test.js | JavaScript | // Tests for Prompt Manager - Validation and Security
// NOTE: Full IndexedDB integration tests require fake-indexeddb package
// These tests focus on critical validation that prevents XSS and data corruption
import { describe, it, expect } from 'vitest';
import {
savePrompt,
importPrompts,
importDefaultLibrary
} from '../modules/prompt-manager.js';
describe('prompt-manager input validation', () => {
describe('Required field validation', () => {
it('should reject prompt with empty content', async () => {
await expect(savePrompt({ content: '' })).rejects.toThrow(
'Prompt content is required'
);
});
it('should reject prompt with only whitespace content', async () => {
await expect(savePrompt({ content: ' ' })).rejects.toThrow(
'Prompt content is required'
);
});
it('should reject prompt with null content', async () => {
await expect(savePrompt({ content: null })).rejects.toThrow(
'Prompt content is required'
);
});
it('should reject prompt with undefined content', async () => {
await expect(savePrompt({})).rejects.toThrow(
'Prompt content is required'
);
});
});
describe('Length validation', () => {
it('should reject content exceeding 50000 characters', async () => {
const longContent = 'a'.repeat(50001);
await expect(savePrompt({ content: longContent })).rejects.toThrow(
'Prompt content must be less than 50000 characters'
);
});
it('should accept content at exactly 50000 characters', async () => {
const maxContent = 'a'.repeat(50000);
// This will fail if we can't connect to DB, but validates the length check happens first
await expect(savePrompt({ content: maxContent })).rejects.not.toThrow(
'Prompt content must be less than 50000 characters'
);
});
it('should reject title exceeding 200 characters', async () => {
const longTitle = 'a'.repeat(201);
await expect(
savePrompt({ content: 'valid', title: longTitle })
).rejects.toThrow('Title must be less than 200 characters');
});
it('should reject category exceeding 50 characters', async () => {
const longCategory = 'a'.repeat(51);
await expect(
savePrompt({ content: 'valid', category: longCategory })
).rejects.toThrow('Category must be less than 50 characters');
});
it('should reject more than 20 tags', async () => {
const tooManyTags = Array(21).fill('tag');
await expect(
savePrompt({ content: 'valid', tags: tooManyTags })
).rejects.toThrow('Maximum 20 tags allowed');
});
});
describe('Security: XSS attempt validation', () => {
it('should not reject HTML/script content (storage layer accepts all text)', async () => {
// XSS protection happens at render time, not storage time
// This ensures we don't accidentally reject legitimate content containing < or >
const htmlContent = '<script>alert("xss")</script>';
// Will fail due to DB connection, not validation
await expect(savePrompt({ content: htmlContent })).rejects.not.toThrow(
'Prompt content is required'
);
await expect(savePrompt({ content: htmlContent })).rejects.not.toThrow(
'must be less than'
);
});
it('should accept mathematical expressions with angle brackets', async () => {
const mathContent = '5 < 10 and 20 > 15';
await expect(savePrompt({ content: mathContent })).rejects.not.toThrow(
'Prompt content is required'
);
});
});
});
describe('prompt-manager import validation', () => {
describe('Import data structure validation', () => {
it('should reject invalid import data format', async () => {
await expect(importPrompts(null)).rejects.toThrow('Invalid import data format');
await expect(importPrompts({})).rejects.toThrow('Invalid import data format');
await expect(importPrompts({ prompts: 'not-array' })).rejects.toThrow(
'Invalid import data format'
);
});
it('should reject invalid default library format', async () => {
await expect(importDefaultLibrary(null)).rejects.toThrow(
'Invalid library data format'
);
await expect(importDefaultLibrary({})).rejects.toThrow(
'Invalid library data format'
);
await expect(importDefaultLibrary({ prompts: 'not-array' })).rejects.toThrow(
'Invalid library data format'
);
});
});
describe('Import strategy validation', () => {
it('should accept valid import data structure for skip strategy', async () => {
const validData = {
version: '1.0',
prompts: []
};
// Will fail due to DB but validates structure is accepted
const result = await importPrompts(validData, 'skip');
expect(result).toHaveProperty('imported');
expect(result).toHaveProperty('skipped');
expect(result).toHaveProperty('errors');
});
it('should accept valid import data structure for overwrite strategy', async () => {
const validData = {
version: '1.0',
prompts: []
};
const result = await importPrompts(validData, 'overwrite');
expect(result).toHaveProperty('imported');
expect(result).toHaveProperty('skipped');
expect(result).toHaveProperty('errors');
});
});
});
describe('prompt-manager validation edge cases', () => {
it('should handle empty string after trimming', async () => {
await expect(savePrompt({ content: ' ' })).rejects.toThrow(
'Prompt content is required'
);
});
it('should validate multiple errors at once', async () => {
const invalid = {
content: '', // Empty - error
title: 'a'.repeat(201), // Too long - error
category: 'a'.repeat(51), // Too long - error
tags: Array(21).fill('tag') // Too many - error
};
try {
await savePrompt(invalid);
// Should not reach here
expect(true).toBe(false);
} catch (error) {
// Should contain at least the content error
expect(error.message).toContain('Prompt content is required');
}
});
it('should handle non-string content type', async () => {
await expect(savePrompt({ content: 12345 })).rejects.toThrow(
'Prompt content is required'
);
await expect(savePrompt({ content: true })).rejects.toThrow(
'Prompt content is required'
);
await expect(savePrompt({ content: [] })).rejects.toThrow(
'Prompt content is required'
);
});
it('should handle boundary conditions for tags array', async () => {
// Exactly 20 tags should be OK
const exactlyTwentyTags = Array(20).fill('tag').map((t, i) => `${t}${i}`);
await expect(
savePrompt({ content: 'valid', tags: exactlyTwentyTags })
).rejects.not.toThrow('Maximum 20 tags allowed');
});
it('should handle special characters in content', async () => {
const specialChars = '!@#$%^&*()_+-=[]{}|;:",./<>?`~';
// Should not reject special characters
await expect(savePrompt({ content: specialChars })).rejects.not.toThrow(
'Prompt content is required'
);
});
it('should handle unicode characters', async () => {
const unicode = '你好世界 🌍 مرحبا العالم';
await expect(savePrompt({ content: unicode })).rejects.not.toThrow(
'Prompt content is required'
);
});
it('should handle newlines and tabs', async () => {
const multiline = 'Line 1\nLine 2\tTabbed\r\nWindows Line';
await expect(savePrompt({ content: multiline })).rejects.not.toThrow(
'Prompt content is required'
);
});
});
describe('prompt-manager validation prevents injection attacks', () => {
it('should not break on SQL-like injection attempts', async () => {
const sqlInjection = "'; DROP TABLE prompts; --";
await expect(savePrompt({ content: sqlInjection })).rejects.not.toThrow(
'Prompt content is required'
);
});
it('should not break on JavaScript injection in title', async () => {
const jsInjection = 'javascript:alert(1)';
await expect(
savePrompt({ content: 'valid', title: jsInjection })
).rejects.not.toThrow('Title must be less than');
});
it('should handle data: URLs in content', async () => {
const dataUrl = 'data:text/html,<script>alert(1)</script>';
await expect(savePrompt({ content: dataUrl })).rejects.not.toThrow(
'Prompt content is required'
);
});
it('should handle event handlers in content', async () => {
const eventHandler = '<img src=x onerror=alert(1)>';
await expect(savePrompt({ content: eventHandler })).rejects.not.toThrow(
'Prompt content is required'
);
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/providers.test.js | JavaScript | import { describe, it, expect, vi, beforeEach } from 'vitest';
import {
PROVIDERS,
getProviderById,
getProviderByIdWithSettings,
getEnabledProviders,
} from '../modules/providers.js';
describe('providers module', () => {
beforeEach(() => {
vi.clearAllMocks();
});
describe('PROVIDERS constant', () => {
it('should contain all expected providers', () => {
expect(PROVIDERS).toHaveLength(6);
const providerIds = PROVIDERS.map((p) => p.id);
expect(providerIds).toEqual([
'chatgpt',
'claude',
'gemini',
'google',
'grok',
'deepseek',
]);
});
it('should have required properties for each provider', () => {
PROVIDERS.forEach((provider) => {
expect(provider).toHaveProperty('id');
expect(provider).toHaveProperty('name');
expect(provider).toHaveProperty('url');
expect(provider).toHaveProperty('icon');
expect(provider).toHaveProperty('iconDark');
expect(provider).toHaveProperty('enabled');
});
});
});
describe('getProviderById', () => {
it('should return provider by id', () => {
const provider = getProviderById('chatgpt');
expect(provider).toBeDefined();
expect(provider.id).toBe('chatgpt');
expect(provider.name).toBe('ChatGPT');
});
it('should return undefined for non-existent provider', () => {
const provider = getProviderById('nonexistent');
expect(provider).toBeUndefined();
});
});
describe('getProviderByIdWithSettings', () => {
it('should return provider with default URL', async () => {
chrome.storage.sync.get.mockResolvedValue({});
const provider = await getProviderByIdWithSettings('chatgpt');
expect(provider).toBeDefined();
expect(provider.url).toBe('https://chatgpt.com');
});
it('should return null for non-existent provider', async () => {
const provider = await getProviderByIdWithSettings('nonexistent');
expect(provider).toBeNull();
});
});
describe('getEnabledProviders', () => {
it('should return enabled providers from settings', async () => {
chrome.storage.sync.get.mockResolvedValue({
enabledProviders: ['chatgpt', 'claude'],
});
const providers = await getEnabledProviders();
expect(providers).toHaveLength(2);
expect(providers[0].id).toBe('chatgpt');
expect(providers[1].id).toBe('claude');
});
it('should use default settings when not provided', async () => {
chrome.storage.sync.get.mockImplementation((defaults) =>
Promise.resolve(defaults)
);
const providers = await getEnabledProviders();
expect(providers.length).toBeGreaterThan(0);
});
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/service-worker.test.js | JavaScript | import { describe, it, expect, beforeEach, vi } from 'vitest';
/**
* Tests for background/service-worker.js
*
* These tests cover critical background script functionality:
* - Context menu creation and updates
* - Message handling (save conversation, check duplicates, version check)
* - Side panel state management
* - Keyboard shortcut handling
*/
describe('service-worker', () => {
// Mock Chrome APIs
beforeEach(() => {
global.chrome = {
runtime: {
onInstalled: { addListener: vi.fn() },
onStartup: { addListener: vi.fn() },
onMessage: { addListener: vi.fn() },
getManifest: vi.fn(() => ({ version: '1.6.0' }))
},
storage: {
sync: {
get: vi.fn((defaults) => Promise.resolve(defaults)),
set: vi.fn(() => Promise.resolve())
},
onChanged: { addListener: vi.fn() }
},
contextMenus: {
create: vi.fn(),
removeAll: vi.fn(() => Promise.resolve()),
onClicked: { addListener: vi.fn() }
},
sidePanel: {
open: vi.fn(() => Promise.resolve()),
setPanelBehavior: vi.fn(() => Promise.resolve())
},
action: {
onClicked: { addListener: vi.fn() }
},
commands: {
onCommand: { addListener: vi.fn() }
},
windows: {
onRemoved: { addListener: vi.fn() }
},
tabs: {
sendMessage: vi.fn(() => Promise.resolve({ success: true }))
}
};
});
describe('Context Menu Creation', () => {
it('should create context menus with enabled providers', async () => {
const { chrome } = global;
// Simulate enabled providers
chrome.storage.sync.get = vi.fn(() => Promise.resolve({
enabledProviders: ['chatgpt', 'claude', 'gemini']
}));
// Note: We would need to import and test createContextMenus function
// For now, we verify the Chrome API setup
expect(chrome.contextMenus).toBeDefined();
expect(chrome.contextMenus.create).toBeDefined();
expect(chrome.contextMenus.removeAll).toBeDefined();
});
it('should handle context menu API errors gracefully', async () => {
const { chrome } = global;
chrome.contextMenus.create = vi.fn(() => {
throw new Error('Context menu error');
});
// Should not throw
expect(() => {
chrome.contextMenus.create({ id: 'test', title: 'Test' });
}).toThrow();
});
});
describe('Message Handling', () => {
it('should register message listener', () => {
const { chrome } = global;
expect(chrome.runtime.onMessage.addListener).toBeDefined();
});
it('should handle saveConversationFromPage message', async () => {
const { chrome } = global;
const mockSendResponse = vi.fn();
const mockSender = { tab: { id: 1, windowId: 1 } };
const message = {
action: 'saveConversationFromPage',
payload: {
title: 'Test Conversation',
content: 'Test content',
provider: 'chatgpt'
}
};
// Verify message structure
expect(message.action).toBe('saveConversationFromPage');
expect(message.payload).toBeDefined();
expect(message.payload.provider).toBe('chatgpt');
});
it('should handle checkDuplicateConversation message', async () => {
const message = {
action: 'checkDuplicateConversation',
payload: {
conversationId: 'test-123'
}
};
expect(message.action).toBe('checkDuplicateConversation');
expect(message.payload.conversationId).toBe('test-123');
});
it('should handle fetchLatestCommit message', async () => {
const message = {
action: 'fetchLatestCommit'
};
expect(message.action).toBe('fetchLatestCommit');
});
it('should validate message payload structure', () => {
const validMessage = {
action: 'saveConversationFromPage',
payload: {
title: 'Test',
content: 'Content',
provider: 'chatgpt'
}
};
expect(validMessage.action).toBeTruthy();
expect(validMessage.payload).toBeTruthy();
expect(typeof validMessage.payload).toBe('object');
});
});
describe('Side Panel State Management', () => {
it('should track side panel state per window', () => {
const sidePanelState = new Map();
sidePanelState.set(1, true);
sidePanelState.set(2, false);
expect(sidePanelState.get(1)).toBe(true);
expect(sidePanelState.get(2)).toBe(false);
expect(sidePanelState.get(3)).toBeUndefined();
});
it('should clean up state when window closes', () => {
const sidePanelState = new Map();
sidePanelState.set(1, true);
sidePanelState.set(2, true);
// Simulate window close
sidePanelState.delete(1);
expect(sidePanelState.has(1)).toBe(false);
expect(sidePanelState.has(2)).toBe(true);
});
it('should open side panel for window', async () => {
const { chrome } = global;
const windowId = 1;
await chrome.sidePanel.open({ windowId });
expect(chrome.sidePanel.open).toHaveBeenCalledWith({ windowId: 1 });
});
});
describe('Settings Management', () => {
it('should load keyboard shortcut setting', async () => {
const { chrome } = global;
chrome.storage.sync.get = vi.fn(() => Promise.resolve({
keyboardShortcutEnabled: true
}));
const result = await chrome.storage.sync.get({ keyboardShortcutEnabled: true });
expect(result.keyboardShortcutEnabled).toBe(true);
});
it('should handle missing settings with defaults', async () => {
const { chrome } = global;
chrome.storage.sync.get = vi.fn((defaults) => Promise.resolve(defaults));
const result = await chrome.storage.sync.get({
keyboardShortcutEnabled: true,
enabledProviders: ['chatgpt', 'claude']
});
expect(result.keyboardShortcutEnabled).toBe(true);
expect(result.enabledProviders).toEqual(['chatgpt', 'claude']);
});
});
describe('Error Handling', () => {
it('should handle storage errors gracefully', async () => {
const { chrome } = global;
chrome.storage.sync.get = vi.fn(() => Promise.reject(new Error('Storage error')));
await expect(chrome.storage.sync.get({})).rejects.toThrow('Storage error');
});
it('should handle side panel open errors', async () => {
const { chrome } = global;
chrome.sidePanel.open = vi.fn(() => Promise.reject(new Error('Side panel not available')));
await expect(chrome.sidePanel.open({ windowId: 1 })).rejects.toThrow();
});
it('should handle context menu creation errors', async () => {
const { chrome } = global;
chrome.contextMenus.removeAll = vi.fn(() => Promise.reject(new Error('Context menu error')));
await expect(chrome.contextMenus.removeAll()).rejects.toThrow();
});
});
describe('GitHub API Version Check', () => {
it('should fetch latest commit information', async () => {
global.fetch = vi.fn(() => Promise.resolve({
ok: true,
json: () => Promise.resolve({
sha: 'abc123def456',
commit: {
committer: { date: '2025-10-19T00:00:00Z' },
message: 'Update feature'
}
})
}));
const response = await fetch('https://api.github.com/repos/xiaolai/insidebar-ai/commits/main');
const data = await response.json();
expect(data.sha).toBe('abc123def456');
expect(data.commit.message).toBe('Update feature');
});
it('should handle GitHub API errors', async () => {
global.fetch = vi.fn(() => Promise.resolve({
ok: false,
status: 404
}));
const response = await fetch('https://api.github.com/repos/xiaolai/insidebar-ai/commits/main');
expect(response.ok).toBe(false);
expect(response.status).toBe(404);
});
});
describe('Integration Tests', () => {
it('should handle complete flow of opening sidebar and switching provider', async () => {
const { chrome } = global;
const windowId = 1;
const providerId = 'chatgpt';
// Open side panel
await chrome.sidePanel.open({ windowId });
expect(chrome.sidePanel.open).toHaveBeenCalledWith({ windowId });
// Verify the flow can complete without errors
expect(true).toBe(true);
});
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/settings.test.js | JavaScript | import { describe, it, expect, vi, beforeEach } from 'vitest';
import {
getSettings,
getSetting,
saveSetting,
saveSettings,
resetSettings,
exportSettings,
importSettings,
} from '../modules/settings.js';
describe('settings module', () => {
beforeEach(() => {
vi.clearAllMocks();
});
describe('getSettings', () => {
it('should return settings from chrome.storage.sync', async () => {
const mockSettings = {
enabledProviders: ['chatgpt', 'claude'],
defaultProvider: 'chatgpt',
theme: 'dark',
};
chrome.storage.sync.get.mockResolvedValue(mockSettings);
const result = await getSettings();
expect(result).toEqual(mockSettings);
expect(chrome.storage.sync.get).toHaveBeenCalled();
});
it('should fallback to local storage if sync fails', async () => {
chrome.storage.sync.get.mockRejectedValue(new Error('Sync unavailable'));
chrome.storage.local.get.mockResolvedValue({ theme: 'light' });
const result = await getSettings();
expect(chrome.storage.local.get).toHaveBeenCalled();
});
});
describe('getSetting', () => {
it('should return specific setting value', async () => {
chrome.storage.sync.get.mockResolvedValue({
theme: 'dark',
defaultProvider: 'claude',
});
const result = await getSetting('theme');
expect(result).toBe('dark');
});
});
describe('saveSetting', () => {
it('should save single setting to chrome.storage.sync', async () => {
await saveSetting('theme', 'dark');
expect(chrome.storage.sync.set).toHaveBeenCalledWith({ theme: 'dark' });
});
it('should fallback to local storage if sync fails', async () => {
chrome.storage.sync.set.mockRejectedValue(new Error('Sync unavailable'));
await saveSetting('theme', 'dark');
expect(chrome.storage.local.set).toHaveBeenCalledWith({ theme: 'dark' });
});
});
describe('saveSettings', () => {
it('should save multiple settings', async () => {
const settings = {
theme: 'dark',
enabledProviders: ['chatgpt'],
};
await saveSettings(settings);
expect(chrome.storage.sync.set).toHaveBeenCalledWith(settings);
});
});
describe('resetSettings', () => {
it('should clear and restore default settings', async () => {
await resetSettings();
expect(chrome.storage.sync.clear).toHaveBeenCalled();
expect(chrome.storage.sync.set).toHaveBeenCalledWith(
expect.objectContaining({
enabledProviders: expect.any(Array),
defaultProvider: 'chatgpt',
theme: 'auto',
})
);
});
});
describe('importSettings', () => {
it('should import valid settings', async () => {
const settings = {
theme: 'dark',
enabledProviders: ['chatgpt', 'claude'],
};
const result = await importSettings(settings);
expect(result.success).toBe(true);
expect(result.imported).toEqual(['theme', 'enabledProviders']);
expect(result.skipped).toEqual([]);
});
it('should skip invalid setting keys', async () => {
const settings = {
theme: 'dark',
invalidKey: 'value',
};
const result = await importSettings(settings);
expect(result.imported).toEqual(['theme']);
expect(result.skipped).toEqual(['invalidKey']);
expect(result.errors).toHaveProperty('invalidKey');
});
});
describe('exportSettings', () => {
it('should export current settings', async () => {
const mockSettings = {
theme: 'dark',
enabledProviders: ['chatgpt'],
};
chrome.storage.sync.get.mockResolvedValue(mockSettings);
const result = await exportSettings();
expect(result).toEqual(mockSettings);
});
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/setup.js | JavaScript | // Test setup file for Vitest
// Mock Chrome extension APIs
global.chrome = {
runtime: {
sendMessage: vi.fn((message, callback) => {
if (callback) callback({ success: true });
return Promise.resolve({ success: true });
}),
onMessage: {
addListener: vi.fn(),
},
lastError: null,
},
storage: {
sync: {
get: vi.fn((keys) => Promise.resolve(typeof keys === 'object' ? keys : {})),
set: vi.fn(() => Promise.resolve()),
clear: vi.fn(() => Promise.resolve()),
},
local: {
get: vi.fn((keys) => Promise.resolve(typeof keys === 'object' ? keys : {})),
set: vi.fn(() => Promise.resolve()),
clear: vi.fn(() => Promise.resolve()),
},
onChanged: {
addListener: vi.fn(),
},
},
sidePanel: {
open: vi.fn(() => Promise.resolve()),
setPanelBehavior: vi.fn(() => Promise.resolve()),
},
contextMenus: {
create: vi.fn(),
removeAll: vi.fn(() => Promise.resolve()),
onClicked: {
addListener: vi.fn(),
},
},
action: {
onClicked: {
addListener: vi.fn(),
},
},
tabs: {
create: vi.fn(() => Promise.resolve({ id: 1 })),
},
};
// Helper to create mock IndexedDB request that triggers callbacks
const createMockRequest = (shouldSucceed = false, result = null, error = null) => {
const request = {
onsuccess: null,
onerror: null,
result: result,
error: error || new Error('IndexedDB not available in test environment'),
};
// Trigger callbacks asynchronously to simulate real IndexedDB behavior
setTimeout(() => {
if (shouldSucceed && request.onsuccess) {
request.onsuccess({ target: request });
} else if (!shouldSucceed && request.onerror) {
request.onerror({ target: request });
}
}, 0);
return request;
};
// Mock indexedDB
global.indexedDB = {
open: vi.fn(() => {
const request = {
onsuccess: null,
onerror: null,
onupgradeneeded: null,
result: {
objectStoreNames: [],
createObjectStore: vi.fn(() => ({
createIndex: vi.fn(),
})),
transaction: vi.fn(() => ({
objectStore: vi.fn(() => ({
get: vi.fn(() => createMockRequest(false)),
getAll: vi.fn(() => createMockRequest(false)),
add: vi.fn(() => createMockRequest(false)),
put: vi.fn(() => createMockRequest(false)),
delete: vi.fn(() => createMockRequest(false)),
clear: vi.fn(() => createMockRequest(false)),
index: vi.fn(() => ({
getAll: vi.fn(() => createMockRequest(false)),
})),
})),
})),
onclose: null,
},
};
setTimeout(() => {
if (request.onsuccess) request.onsuccess({ target: request });
}, 0);
return request;
}),
};
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/text-injector.test.js | JavaScript | // Tests for text injection into provider textareas
import { describe, it, expect, beforeEach } from 'vitest';
import { findTextInputElement, injectTextIntoElement } from '../modules/text-injector.js';
describe('text-injector module', () => {
describe('findTextInputElement', () => {
let container;
beforeEach(() => {
container = document.createElement('div');
document.body.appendChild(container);
});
it('should find a simple textarea by selector', () => {
const textarea = document.createElement('textarea');
textarea.id = 'prompt-textarea';
container.appendChild(textarea);
const found = findTextInputElement('#prompt-textarea');
expect(found).toBe(textarea);
});
it('should find a contenteditable element', () => {
const div = document.createElement('div');
div.className = 'ql-editor';
div.setAttribute('contenteditable', 'true');
container.appendChild(div);
const found = findTextInputElement('.ql-editor');
expect(found).toBe(div);
});
it('should return null if element not found', () => {
const found = findTextInputElement('.non-existent');
expect(found).toBeNull();
});
it('should find element with role attribute', () => {
const div = document.createElement('div');
div.className = 'ProseMirror';
div.setAttribute('role', 'textbox');
div.setAttribute('contenteditable', 'true');
container.appendChild(div);
const found = findTextInputElement('.ProseMirror[role="textbox"]');
expect(found).toBe(div);
});
});
describe('injectTextIntoElement', () => {
it('should inject text into a textarea', () => {
const textarea = document.createElement('textarea');
textarea.value = '';
document.body.appendChild(textarea);
const result = injectTextIntoElement(textarea, 'Hello world');
expect(result).toBe(true);
expect(textarea.value).toBe('Hello world');
textarea.remove();
});
it('should inject text into contenteditable element', () => {
const div = document.createElement('div');
div.setAttribute('contenteditable', 'true');
div.textContent = '';
document.body.appendChild(div);
const result = injectTextIntoElement(div, 'Test content');
expect(result).toBe(true);
expect(div.textContent).toBe('Test content');
div.remove();
});
it('should append to existing content in textarea', () => {
const textarea = document.createElement('textarea');
textarea.value = 'Existing text\n';
document.body.appendChild(textarea);
const result = injectTextIntoElement(textarea, 'New text');
expect(result).toBe(true);
expect(textarea.value).toBe('Existing text\nNew text');
textarea.remove();
});
it('should append to existing content in contenteditable', () => {
const div = document.createElement('div');
div.setAttribute('contenteditable', 'true');
div.textContent = 'Existing';
document.body.appendChild(div);
const result = injectTextIntoElement(div, ' Added');
expect(result).toBe(true);
expect(div.textContent).toBe('Existing Added');
div.remove();
});
it('should return false for null element', () => {
const result = injectTextIntoElement(null, 'text');
expect(result).toBe(false);
});
it('should return false for empty text', () => {
const textarea = document.createElement('textarea');
const result = injectTextIntoElement(textarea, '');
expect(result).toBe(false);
});
it('should trigger input event on textarea', () => {
const textarea = document.createElement('textarea');
document.body.appendChild(textarea);
let inputFired = false;
textarea.addEventListener('input', () => {
inputFired = true;
});
injectTextIntoElement(textarea, 'Test');
expect(inputFired).toBe(true);
textarea.remove();
});
it('should trigger input event on contenteditable', () => {
const div = document.createElement('div');
div.setAttribute('contenteditable', 'true');
document.body.appendChild(div);
let inputFired = false;
div.addEventListener('input', () => {
inputFired = true;
});
injectTextIntoElement(div, 'Test');
expect(inputFired).toBe(true);
div.remove();
});
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
tests/url-validator.test.js | JavaScript | import { describe, it, expect } from 'vitest';
import { isValidUrl, sanitizeUrl } from '../modules/url-validator.js';
describe('url-validator module', () => {
describe('isValidUrl', () => {
it('should accept valid HTTP URLs', () => {
expect(isValidUrl('http://localhost:3000')).toBe(true);
expect(isValidUrl('http://127.0.0.1:8080')).toBe(true);
expect(isValidUrl('http://example.com')).toBe(true);
});
it('should accept valid HTTPS URLs', () => {
expect(isValidUrl('https://localhost:3000')).toBe(true);
expect(isValidUrl('https://example.com')).toBe(true);
});
it('should reject javascript: URLs', () => {
expect(isValidUrl('javascript:alert(1)')).toBe(false);
});
it('should reject data: URLs', () => {
expect(isValidUrl('data:text/html,<script>alert(1)</script>')).toBe(false);
});
it('should reject file: URLs', () => {
expect(isValidUrl('file:///etc/passwd')).toBe(false);
});
it('should reject invalid URL formats', () => {
expect(isValidUrl('not-a-url')).toBe(false);
expect(isValidUrl('')).toBe(false);
expect(isValidUrl(null)).toBe(false);
expect(isValidUrl(undefined)).toBe(false);
});
it('should reject URLs without hostname', () => {
expect(isValidUrl('http://')).toBe(false);
});
});
describe('sanitizeUrl', () => {
it('should return cleaned URL for valid inputs', () => {
expect(sanitizeUrl('http://localhost:3000')).toBe('http://localhost:3000/');
expect(sanitizeUrl(' http://example.com ')).toBe('http://example.com/');
});
it('should return null for invalid URLs', () => {
expect(sanitizeUrl('javascript:alert(1)')).toBe(null);
expect(sanitizeUrl('not-a-url')).toBe(null);
expect(sanitizeUrl('')).toBe(null);
});
it('should normalize URLs', () => {
const result = sanitizeUrl('http://example.com:80');
expect(result).toBeTruthy();
expect(result.startsWith('http://')).toBe(true);
});
});
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
vitest.config.js | JavaScript | import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
environment: 'happy-dom',
globals: true,
coverage: {
provider: 'v8',
reporter: ['text', 'json', 'html'],
exclude: [
'node_modules/',
'tests/',
'**/*.config.js',
'**/dist/',
'**/.{idea,git,cache,output,temp}/',
],
},
setupFiles: ['./tests/setup.js'],
},
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
install.sh | Shell | #!/usr/bin/env bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
PYTHON_VERSION="3.11.13"
VENV_DIR=".venv"
# Helper functions
print_info() {
echo -e "${BLUE}ℹ${NC} $1"
}
print_success() {
echo -e "${GREEN}✓${NC} $1"
}
print_warning() {
echo -e "${YELLOW}⚠${NC} $1"
}
print_error() {
echo -e "${RED}✗${NC} $1"
}
# Detect OS
detect_os() {
if [[ "$OSTYPE" == "darwin"* ]]; then
OS="macos"
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
OS="linux"
else
print_error "Unsupported OS: $OSTYPE"
exit 1
fi
print_info "Detected OS: $OS"
}
# Install pyenv
install_pyenv() {
if command -v pyenv &> /dev/null; then
print_success "pyenv is already installed"
return 0
fi
print_info "Installing pyenv..."
if [[ "$OS" == "macos" ]]; then
if ! command -v brew &> /dev/null; then
print_error "Homebrew is not installed. Please install it from https://brew.sh"
exit 1
fi
brew update
brew install pyenv
elif [[ "$OS" == "linux" ]]; then
curl https://pyenv.run | bash
# Add pyenv to PATH for current session
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
# Add to shell config
SHELL_CONFIG=""
if [[ -f "$HOME/.bashrc" ]]; then
SHELL_CONFIG="$HOME/.bashrc"
elif [[ -f "$HOME/.zshrc" ]]; then
SHELL_CONFIG="$HOME/.zshrc"
fi
if [[ -n "$SHELL_CONFIG" ]]; then
if ! grep -q 'PYENV_ROOT' "$SHELL_CONFIG"; then
echo '' >> "$SHELL_CONFIG"
echo 'export PYENV_ROOT="$HOME/.pyenv"' >> "$SHELL_CONFIG"
echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> "$SHELL_CONFIG"
echo 'eval "$(pyenv init -)"' >> "$SHELL_CONFIG"
print_warning "Added pyenv to $SHELL_CONFIG. Please restart your shell or run: source $SHELL_CONFIG"
fi
fi
fi
print_success "pyenv installed successfully"
}
# Install system dependencies
install_system_deps() {
print_info "Installing system dependencies..."
if [[ "$OS" == "macos" ]]; then
if ! command -v brew &> /dev/null; then
print_error "Homebrew is required. Install it from https://brew.sh"
exit 1
fi
brew install libxml2 libxslt ffmpeg
print_success "Installed libxml2, libxslt, and ffmpeg via Homebrew"
elif [[ "$OS" == "linux" ]]; then
if command -v apt-get &> /dev/null; then
sudo apt-get update
sudo apt-get install -y libxml2-dev libxslt-dev ffmpeg build-essential libssl-dev zlib1g-dev \
libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \
libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev
print_success "Installed dependencies via apt-get"
elif command -v yum &> /dev/null; then
sudo yum install -y libxml2-devel libxslt-devel ffmpeg gcc zlib-devel bzip2 bzip2-devel \
readline-devel sqlite sqlite-devel openssl-devel tk-devel libffi-devel xz-devel
print_success "Installed dependencies via yum"
else
print_error "No supported package manager found (apt-get or yum)"
exit 1
fi
fi
}
# Install Python version
install_python() {
# Initialize pyenv for current session
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)" 2>/dev/null || true
if pyenv versions --bare | grep -q "^${PYTHON_VERSION}$"; then
print_success "Python $PYTHON_VERSION is already installed"
return 0
fi
print_info "Installing Python $PYTHON_VERSION via pyenv..."
pyenv install "$PYTHON_VERSION"
print_success "Python $PYTHON_VERSION installed successfully"
}
# Create virtual environment
create_venv() {
# Initialize pyenv for current session
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)" 2>/dev/null || true
if [[ -d "$VENV_DIR" ]]; then
print_warning "Virtual environment already exists at $VENV_DIR"
read -p "Do you want to recreate it? (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
rm -rf "$VENV_DIR"
else
print_info "Using existing virtual environment"
return 0
fi
fi
print_info "Creating virtual environment with Python $PYTHON_VERSION..."
# Use pyenv's Python to create venv
PYTHON_PATH="$HOME/.pyenv/versions/$PYTHON_VERSION/bin/python"
if [[ ! -f "$PYTHON_PATH" ]]; then
print_error "Python $PYTHON_VERSION not found at $PYTHON_PATH"
exit 1
fi
"$PYTHON_PATH" -m venv "$VENV_DIR"
print_success "Virtual environment created at $VENV_DIR"
}
# Install tepub
install_tepub() {
print_info "Installing TEPUB in development mode..."
source "$VENV_DIR/bin/activate"
# Upgrade pip
pip install --upgrade pip
# Install tepub with dev dependencies
pip install -e ".[dev]"
print_success "TEPUB installed successfully"
}
# Verify installation
verify_installation() {
print_info "Verifying installation..."
source "$VENV_DIR/bin/activate"
if command -v tepub &> /dev/null; then
print_success "tepub command is available"
tepub --version || true
else
print_error "tepub command not found"
exit 1
fi
}
# Print next steps
print_next_steps() {
echo ""
echo -e "${GREEN}═══════════════════════════════════════════════════════════${NC}"
echo -e "${GREEN} TEPUB Installation Complete!${NC}"
echo -e "${GREEN}═══════════════════════════════════════════════════════════${NC}"
echo ""
echo -e "${YELLOW}Next Steps:${NC}"
echo ""
echo "1. Activate the virtual environment:"
echo -e " ${BLUE}source $VENV_DIR/bin/activate${NC}"
echo ""
echo "2. Set up your API key (choose one):"
echo -e " ${BLUE}export OPENAI_API_KEY=\"sk-...\"${NC}"
echo -e " ${BLUE}export ANTHROPIC_API_KEY=\"sk-ant-...\"${NC}"
echo -e " ${BLUE}export GEMINI_API_KEY=\"...\"${NC}"
echo ""
echo " Or create a .env file:"
echo -e " ${BLUE}echo 'OPENAI_API_KEY=sk-...' > .env${NC}"
echo ""
echo "3. (Optional) Create global config:"
echo -e " ${BLUE}mkdir -p ~/.tepub${NC}"
echo -e " ${BLUE}cp config.example.yaml ~/.tepub/config.yaml${NC}"
echo ""
echo "4. Test TEPUB:"
echo -e " ${BLUE}tepub --help${NC}"
echo ""
echo "5. Quick start:"
echo -e " ${BLUE}tepub extract book.epub${NC}"
echo -e " ${BLUE}tepub translate book.epub --to \"Simplified Chinese\"${NC}"
echo -e " ${BLUE}tepub export book.epub --epub${NC}"
echo ""
echo -e "${GREEN}Documentation:${NC} README.md"
echo ""
}
# Main installation flow
main() {
echo ""
echo -e "${BLUE}═══════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE} TEPUB Installation Script${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════${NC}"
echo ""
detect_os
install_system_deps
install_pyenv
install_python
create_venv
install_tepub
verify_installation
print_next_steps
}
# Run main function
main
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/__init__.py | Python | """Project package root with compatibility aliases."""
import sys
from importlib import import_module
_aliases = {
"config": "config",
"debug_tools": "debug_tools",
"epub_io": "epub_io",
"extraction": "extraction",
"injection": "injection",
"logging_utils": "logging_utils",
"state": "state",
"translation": "translation",
"translation.providers": "translation.providers",
"webbuilder": "webbuilder",
"web_templates": "web_templates",
}
for alias, target in _aliases.items():
module = import_module(f"{__name__}.{target}")
sys.modules.setdefault(alias, module)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/__init__.py | Python | from .controller import run_audiobook
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/assembly.py | Python | from __future__ import annotations
import asyncio
import logging
import random
import re
import shutil
import subprocess
from io import BytesIO
from pathlib import Path
from ebooklib import ITEM_IMAGE
try:
from mutagen.mp4 import MP4, MP4Chapter, MP4Cover
except ImportError:
from mutagen.mp4 import MP4, MP4Cover
MP4Chapter = None
from PIL import Image
from pydub import AudioSegment
from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn
from config import AppSettings
from console_singleton import get_console
from epub_io.reader import EpubReader
from epub_io.resources import get_item_by_href
from epub_io.toc_utils import parse_toc_to_dict
from state.models import Segment
from state.store import load_segments
from .cover import find_spine_cover_candidate
from .models import AudioSegmentStatus, AudioSessionConfig
from .mp4chapters import write_chapter_markers
from .state import load_state
logger = logging.getLogger(__name__)
console = get_console()
def _slugify(value: str) -> str:
value = value.strip()
value = re.sub(r"\s+", "_", value)
value = re.sub(r"[^A-Za-z0-9_\-]", "", value)
return value or "audiobook"
def _get_audio_duration(audio_path: Path) -> float:
"""Get accurate audio duration in seconds using ffprobe.
Uses ffprobe instead of pydub's duration_seconds because pydub
underreports duration for M4A/AAC files (VBR encoding issue).
Args:
audio_path: Path to audio file (M4A, MP3, etc.)
Returns:
Duration in seconds as float
"""
result = subprocess.run(
[
"ffprobe",
"-v", "error",
"-show_entries", "format=duration",
"-of", "default=noprint_wrappers=1:nokey=1",
str(audio_path),
],
capture_output=True,
text=True,
check=True,
)
return float(result.stdout.strip())
def _extract_narrator_name(voice_id: str) -> str:
"""Extract friendly narrator name from voice ID.
Examples:
en-US-GuyNeural -> Guy
en-US-JennyNeural -> Jenny
alloy -> alloy
"""
# Remove language prefix (e.g., "en-US-")
parts = voice_id.split("-")
if len(parts) >= 3:
name_part = "-".join(parts[2:])
else:
name_part = voice_id
# Remove common suffixes
for suffix in ["Neural", "Multilingual", "Turbo"]:
if name_part.endswith(suffix):
name_part = name_part[:-len(suffix)]
# Clean up any remaining hyphens or underscores
name = name_part.strip("-_")
return name if name else voice_id
def _book_title(reader: EpubReader) -> str:
title_meta = reader.book.get_metadata("DC", "title")
if title_meta:
return title_meta[0][0]
return reader.epub_path.stem
def _book_authors(reader: EpubReader) -> list[str]:
authors = []
for author, _attrs in reader.book.get_metadata("DC", "creator"):
if author:
authors.append(author)
return authors
def _document_titles(reader: EpubReader) -> dict[str, str]:
titles: dict[str, str] = {}
for document in reader.iter_documents():
tree = document.tree
if tree is None:
continue
candidates = tree.xpath("//h1")
title = ""
if candidates:
title = (candidates[0].text_content() or "").strip()
if not title:
title_nodes = tree.xpath("//title")
if title_nodes:
title = (title_nodes[0].text_content() or "").strip()
titles[document.path.as_posix()] = title or document.path.stem
return titles
def _find_cover_item(reader: EpubReader):
for meta, attrs in reader.book.get_metadata("OPF", "meta"):
if isinstance(attrs, dict) and attrs.get("name") == "cover":
cover_id = attrs.get("content")
if cover_id:
item = reader.book.get_item_with_id(cover_id)
if item:
return item
spine_candidate = find_spine_cover_candidate(reader)
if spine_candidate:
try:
return get_item_by_href(reader.book, spine_candidate.href)
except KeyError:
pass
for item in reader.book.get_items():
if item.get_type() == ITEM_IMAGE and "cover" in item.get_name().lower():
return item
for item in reader.book.get_items():
if item.get_type() == ITEM_IMAGE:
return item
return None
def _generate_statement_audio(
text: str,
session: AudioSessionConfig,
output_path: Path,
) -> Path | None:
"""Generate audio for opening/closing statement using the configured TTS engine.
Matches the renderer.py workflow: generate TTS output, then convert to M4A.
Args:
text: Statement text to synthesize
session: Audio session config with TTS provider and voice settings
output_path: Where to save the M4A audio file
Returns:
Path to generated M4A audio file, or None if generation failed
"""
if not text or not text.strip():
return None
try:
# Use the same TTS engine as the main audiobook
from .tts import create_tts_engine
engine = create_tts_engine(
provider=session.tts_provider,
voice=session.voice,
rate=None, # Edge TTS only
volume=None, # Edge TTS only
model=session.tts_model,
speed=session.tts_speed,
)
# Determine temp file extension based on provider
# OpenAI outputs AAC, Edge outputs MP3
temp_ext = ".aac" if session.tts_provider == "openai" else ".mp3"
temp_file = output_path.with_suffix(temp_ext)
# Generate TTS output
engine.synthesize(text.strip(), temp_file)
# Always convert to M4A (matching renderer.py approach)
audio = AudioSegment.from_file(temp_file)
audio.export(
output_path,
format="mp4",
codec="aac",
parameters=["-movflags", "+faststart", "-movie_timescale", "24000"],
)
temp_file.unlink() # Remove temporary file
return output_path
except Exception as exc:
logger.warning("Failed to generate statement audio: %s", exc)
return None
def _prepare_cover(
output_root: Path,
reader: EpubReader,
explicit_cover: Path | None = None,
) -> Path | None:
try:
if explicit_cover:
image = Image.open(explicit_cover)
# Preserve original format if PNG
original_format = image.format # 'PNG', 'JPEG', etc.
else:
cover_item = _find_cover_item(reader)
if not cover_item:
return None
image = Image.open(BytesIO(cover_item.get_content()))
original_format = image.format
except Exception:
return None
with image:
# Only convert if necessary
if image.mode not in ("RGB", "RGBA"):
image = image.convert("RGB")
width, height = image.size
if width == 0 or height == 0:
return None
output_root.mkdir(parents=True, exist_ok=True)
# Preserve PNG format for transparency, otherwise use JPEG
if original_format == "PNG" and image.mode == "RGBA":
cover_path = output_root / "cover.png"
image.save(cover_path, format="PNG")
else:
# Convert RGBA to RGB for JPEG (no transparency support)
if image.mode == "RGBA":
image = image.convert("RGB")
cover_path = output_root / "cover.jpg"
image.save(cover_path, format="JPEG", quality=95)
return cover_path
def _chapter_title(
file_path: str,
toc_map: dict[str, str],
doc_titles: dict[str, str],
custom_map: dict[str, str] | None = None,
) -> str:
# Priority: custom > TOC > document title > filename
if custom_map and file_path in custom_map:
return custom_map[file_path]
title = toc_map.get(file_path)
if title:
return title
return doc_titles.get(file_path, Path(file_path).stem)
def _build_spine_to_toc_map(
reader: EpubReader, toc_map: dict[str, str]
) -> dict[int, tuple[str, str]]:
"""Build mapping from spine index to (toc_file, toc_title).
Maps each spine item to its governing TOC entry. Files between TOC entries
are mapped to the previous TOC entry. Files before the first TOC entry or
after the last TOC entry are not included in the map.
Returns:
Dict mapping spine_index -> (toc_file_path, toc_title)
"""
# Build spine index lookup
spine_items = reader.book.spine
spine_lookup: dict[str, int] = {}
for idx, (item_id, _linear) in enumerate(spine_items):
item = reader.book.get_item_with_id(item_id)
if item:
spine_lookup[item.get_name()] = idx
# Find spine indices for TOC entries
toc_entries: list[tuple[int, str, str]] = [] # (spine_index, file_path, title)
for file_path, title in toc_map.items():
spine_idx = spine_lookup.get(file_path)
if spine_idx is not None:
toc_entries.append((spine_idx, file_path, title))
if not toc_entries:
# No TOC entries found, return empty map
return {}
# Sort by spine index
toc_entries.sort(key=lambda x: x[0])
# Build the mapping: spine_index -> (toc_file, toc_title)
result: dict[int, tuple[str, str]] = {}
# Get first and last TOC spine indices
first_toc_idx = toc_entries[0][0]
last_toc_idx = toc_entries[-1][0]
# Map spine indices to their governing TOC entry
current_toc_idx = 0
for spine_idx in range(len(spine_items)):
# Skip files before first TOC entry
if spine_idx < first_toc_idx:
continue
# Skip files after last TOC entry
if spine_idx > last_toc_idx:
continue
# Find the appropriate TOC entry for this spine index
while (
current_toc_idx < len(toc_entries) - 1
and spine_idx >= toc_entries[current_toc_idx + 1][0]
):
current_toc_idx += 1
toc_spine_idx, toc_file, toc_title = toc_entries[current_toc_idx]
result[spine_idx] = (toc_file, toc_title)
return result
def assemble_audiobook(
settings: AppSettings,
input_epub: Path,
session: AudioSessionConfig,
state_path: Path,
output_root: Path,
) -> Path | None:
audio_state = load_state(state_path)
segments_doc = load_segments(settings.segments_file)
reader = EpubReader(input_epub, settings)
toc_map = parse_toc_to_dict(reader)
doc_titles = _document_titles(reader)
book_title = _book_title(reader)
authors = _book_authors(reader)
author_str = ", ".join(authors) if authors else "Unknown"
# Check for custom chapter titles from chapters.yaml
custom_chapters_map: dict[str, str] = {} # file_path -> custom_title
chapters_yaml_path = settings.work_dir / "chapters.yaml"
if chapters_yaml_path.exists():
try:
from .chapters import read_chapters_yaml
chapters, metadata = read_chapters_yaml(chapters_yaml_path)
console.print(f"[cyan]Loading custom chapter titles from chapters.yaml[/cyan]")
# Build map from segment files to custom titles
# Each chapter has a list of segment files
for chapter in chapters:
if chapter.segments:
# All segments in this chapter get the same title
for seg_file in chapter.segments:
custom_chapters_map[seg_file] = chapter.title
except Exception as exc:
logger.warning(f"Failed to load chapters.yaml: {exc}")
console.print(f"[yellow]Warning: Could not load chapters.yaml: {exc}[/yellow]")
# Generate opening and closing statement audio
opening_audio_path: Path | None = None
closing_audio_path: Path | None = None
if settings.audiobook_opening_statement or settings.audiobook_closing_statement:
narrator_name = _extract_narrator_name(session.voice)
# Generate opening statement
if settings.audiobook_opening_statement:
opening_text = settings.audiobook_opening_statement.format(
book_name=book_title,
author=author_str,
narrator_name=narrator_name,
)
opening_audio_path = output_root / "opening_statement.m4a"
try:
result = _generate_statement_audio(
opening_text,
session,
opening_audio_path,
)
if result:
console.print("[cyan]Generated opening statement audio[/cyan]")
except Exception as exc:
logger.warning("Failed to generate opening statement: %s", exc)
# Generate closing statement
if settings.audiobook_closing_statement:
closing_text = settings.audiobook_closing_statement.format(
book_name=book_title,
author=author_str,
narrator_name=narrator_name,
)
closing_audio_path = output_root / "closing_statement.m4a"
try:
result = _generate_statement_audio(
closing_text,
session,
closing_audio_path,
)
if result:
console.print("[cyan]Generated closing statement audio[/cyan]")
except Exception as exc:
logger.warning("Failed to generate closing statement: %s", exc)
# Build spine-to-TOC mapping for chapter grouping
spine_to_toc = _build_spine_to_toc_map(reader, toc_map)
# Group segments by TOC chapter (or by file if no TOC)
chapter_map: dict[str, list[Segment]] = {}
for segment in segments_doc.segments:
seg_state = audio_state.segments.get(segment.segment_id)
if not seg_state:
continue
if seg_state.status != AudioSegmentStatus.COMPLETED:
continue
if not seg_state.audio_path:
continue
audio_path = Path(seg_state.audio_path)
if not audio_path.exists():
continue
# Determine chapter key based on TOC mapping
if spine_to_toc:
# Use TOC-driven grouping
spine_idx = segment.metadata.spine_index
toc_entry = spine_to_toc.get(spine_idx)
if toc_entry is None:
# File is before first TOC or after last TOC - skip
continue
toc_file, toc_title = toc_entry
# Use TOC file as chapter key (groups all files under same TOC entry)
key = toc_file
else:
# Fallback to file-based grouping for EPUBs without TOC
key = segment.file_path.as_posix()
chapter_map.setdefault(key, []).append(segment)
if not chapter_map:
return None
sorted_chapters = sorted(
chapter_map.items(),
key=lambda item: (
min(seg.metadata.spine_index for seg in item[1]),
min(seg.metadata.order_in_file for seg in item[1]),
),
)
chapters_dir = output_root / "chapters"
chapters_dir.mkdir(parents=True, exist_ok=True)
chapter_audios: list[tuple[str, Path, float]] = []
segment_pause_range = getattr(session, "segment_pause_range", (2.0, 4.0))
# Prepare cover image (needed for both chapter files and final audiobook)
explicit_cover = session.cover_path
cover_path = _prepare_cover(output_root, reader, explicit_cover=explicit_cover)
cover_data = None
cover_format = MP4Cover.FORMAT_JPEG # Default format
if cover_path and cover_path.exists():
cover_data = cover_path.read_bytes()
# Detect format from file extension
if cover_path.suffix.lower() == ".png":
cover_format = MP4Cover.FORMAT_PNG
# Check if chapter files already exist
expected_chapters = []
for index, (file_path, segments) in enumerate(sorted_chapters, start=1):
title = _chapter_title(file_path, toc_map, doc_titles, custom_chapters_map)
chapter_path = chapters_dir / f"{index:03d}-{_slugify(title)}.m4a"
expected_chapters.append((title, chapter_path, file_path, segments))
all_chapters_exist = all(path.exists() for _, path, _, _ in expected_chapters)
if all_chapters_exist:
console.print(f"[cyan]Found existing {len(expected_chapters)} chapter files, skipping recombination…[/cyan]")
# Load existing chapter files and calculate durations
for title, chapter_path, _, _ in expected_chapters:
try:
duration = _get_audio_duration(chapter_path)
chapter_audios.append((title, chapter_path, duration))
except Exception:
# If any file is invalid, we'll need to regenerate all
chapter_audios = []
all_chapters_exist = False
break
if not all_chapters_exist:
console.print(f"[cyan]Combining {len(sorted_chapters)} chapters into final audiobook…[/cyan]")
progress = Progress(
TextColumn("Combining"),
BarColumn(),
TextColumn("{task.completed}/{task.total}"),
TimeElapsedColumn(),
console=console,
)
with progress:
combine_task = progress.add_task("chapter-mix", total=len(sorted_chapters))
for title, chapter_path, file_path, segments in expected_chapters:
rng = random.Random(hash(file_path))
# Get available segment files
available_segment_paths = []
for segment in sorted(segments, key=lambda s: s.metadata.order_in_file):
seg_state = audio_state.segments.get(segment.segment_id)
if not seg_state or not seg_state.audio_path:
continue
audio_path = Path(seg_state.audio_path)
if not audio_path.exists():
continue
available_segment_paths.append(audio_path)
if not available_segment_paths:
progress.advance(combine_task)
continue
# Create silence files for pauses between segments
chapter_temp_dir = chapters_dir / f"temp_{title[:20]}"
chapter_temp_dir.mkdir(parents=True, exist_ok=True)
# Build concat list with segments and silence
concat_list_path = chapter_temp_dir / "concat_list.txt"
with open(concat_list_path, "w", encoding="utf-8") as f:
for idx, segment_path in enumerate(available_segment_paths):
f.write(f"file '{segment_path.absolute()}'\n")
# Add pause between segments (except after last segment)
if idx < len(available_segment_paths) - 1:
pause_seconds = rng.uniform(*segment_pause_range)
pause_path = chapter_temp_dir / f"pause_{idx}.m4a"
pause_silence = AudioSegment.silent(duration=int(pause_seconds * 1000))
pause_silence.export(
pause_path,
format="mp4",
codec="aac",
parameters=["-movflags", "+faststart", "-movie_timescale", "24000"],
)
f.write(f"file '{pause_path.absolute()}'\n")
# Add chapter gap at the end (except for last chapter)
index = expected_chapters.index((title, chapter_path, file_path, segments)) + 1
if index < len(sorted_chapters):
chapter_gap_rng = random.Random(0xA10D10 + index)
chapter_gap_seconds = chapter_gap_rng.uniform(2.0, 4.0)
gap_path = chapter_temp_dir / "chapter_gap.m4a"
gap_silence = AudioSegment.silent(duration=int(chapter_gap_seconds * 1000))
gap_silence.export(
gap_path,
format="mp4",
codec="aac",
parameters=["-movflags", "+faststart", "-movie_timescale", "24000"],
)
with open(concat_list_path, "a", encoding="utf-8") as f:
f.write(f"file '{gap_path.absolute()}'\n")
# Use ffmpeg to concatenate M4A files without re-encoding
subprocess.run(
[
"ffmpeg",
"-f", "concat",
"-safe", "0",
"-i", str(concat_list_path),
"-vn", # Ignore video streams (cover art)
"-c:a", "copy",
"-y",
str(chapter_path),
],
check=True,
capture_output=True,
)
# Clean up temp directory
import shutil as shutil_module
shutil_module.rmtree(chapter_temp_dir, ignore_errors=True)
# Add cover art to chapter M4A file
if cover_data:
try:
audio = MP4(chapter_path)
audio["covr"] = [MP4Cover(cover_data, imageformat=cover_format)]
audio.save()
except Exception:
pass # Silently ignore cover art failures
# Get actual duration from the created file (using ffprobe for accuracy)
try:
actual_duration = _get_audio_duration(chapter_path)
except Exception:
# Fallback: estimate based on file size (very rough)
actual_duration = 0.0
logger.warning(f"Could not determine duration for chapter: {title}")
chapter_audios.append((title, chapter_path, actual_duration))
progress.advance(combine_task)
if not chapter_audios:
return None
# Use ffmpeg concat to avoid 4GB WAV limit and memory issues
audiobook_dir = output_root
audiobook_dir.mkdir(parents=True, exist_ok=True)
provider_suffix = "edgetts" if session.tts_provider == "edge" else "openaitts"
final_name = f"{_slugify(book_title)}@{provider_suffix}.m4a"
workspace_path = audiobook_dir / final_name
# Create concat file list for ffmpeg
concat_file = audiobook_dir / "concat_list.txt"
chapter_markers: list[tuple[int, str]] = []
current_position_seconds = 0.0 # Use float for precision
with open(concat_file, "w", encoding="utf-8") as f:
# Add opening statement if available
if opening_audio_path and opening_audio_path.exists():
f.write(f"file '{opening_audio_path.absolute()}'\n")
# Add silence after opening
opening_silence_path = audiobook_dir / "opening_silence.m4a"
planned_silence_duration = random.Random(0xDEADBEEF).uniform(2.0, 4.0)
opening_silence = AudioSegment.silent(duration=int(planned_silence_duration * 1000))
opening_silence.export(
opening_silence_path,
format="mp4",
codec="aac",
parameters=["-movflags", "+faststart", "-movie_timescale", "24000"],
)
f.write(f"file '{opening_silence_path.absolute()}'\n")
# Read actual durations from files using ffprobe (accurate for M4A)
try:
opening_duration_seconds = _get_audio_duration(opening_audio_path)
actual_silence_duration = _get_audio_duration(opening_silence_path)
current_position_seconds += opening_duration_seconds + actual_silence_duration
except Exception as exc:
logger.warning(f"Could not load opening statement duration, chapter timestamps may be offset: {exc}")
for idx, (title, chapter_path, duration_seconds) in enumerate(chapter_audios):
safe_title = title.strip() if isinstance(title, str) else ""
if not safe_title:
safe_title = f"Chapter {idx + 1}"
# Record chapter marker at current position (convert to ms only here)
chapter_markers.append((int(current_position_seconds * 1000), safe_title))
# Add chapter file to concat list
f.write(f"file '{chapter_path.absolute()}'\n")
# Update position with chapter duration (already includes silence at end)
current_position_seconds += duration_seconds
# Add closing statement if available
if closing_audio_path and closing_audio_path.exists():
# Add 2-4 seconds silence before closing
# (This silence is added as a separate silent M4A)
silence_path = audiobook_dir / "closing_silence.m4a"
silence_duration = random.Random(0xC105ED).uniform(2.0, 4.0)
silence_segment = AudioSegment.silent(duration=int(silence_duration * 1000))
silence_segment.export(
silence_path,
format="mp4",
codec="aac",
parameters=["-movflags", "+faststart", "-movie_timescale", "24000"],
)
f.write(f"file '{silence_path.absolute()}'\n")
f.write(f"file '{closing_audio_path.absolute()}'\n")
# Use ffmpeg to concatenate M4A files without re-encoding
# All input files are already M4A/AAC, so we can use -c:a copy for instant concat
console.print("[cyan]Creating final M4A audiobook…[/cyan]")
subprocess.run(
[
"ffmpeg",
"-f", "concat",
"-safe", "0",
"-i", str(concat_file),
"-vn", # Ignore video streams (cover art)
"-c:a", "copy", # Copy audio stream without re-encoding (instant!)
"-y", # Overwrite output file
str(workspace_path),
],
check=True,
capture_output=True,
)
# Clean up concat file
concat_file.unlink()
# Clean up statement audio files (already included in final audiobook)
if opening_audio_path and opening_audio_path.exists():
opening_audio_path.unlink()
opening_silence_path = audiobook_dir / "opening_silence.m4a"
if opening_silence_path.exists():
opening_silence_path.unlink()
if closing_audio_path and closing_audio_path.exists():
closing_audio_path.unlink()
closing_silence_path = audiobook_dir / "closing_silence.m4a"
if closing_silence_path.exists():
closing_silence_path.unlink()
# Cover was already prepared earlier for chapter files
mp4 = MP4(workspace_path)
mp4["©nam"] = [book_title]
mp4["©alb"] = [book_title]
if authors:
mp4["©ART"] = [", ".join(authors)]
if cover_path and cover_path.exists():
cover_bytes = cover_path.read_bytes()
# Detect cover format from extension
cover_img_format = MP4Cover.FORMAT_PNG if cover_path.suffix.lower() == ".png" else MP4Cover.FORMAT_JPEG
mp4["covr"] = [MP4Cover(cover_bytes, imageformat=cover_img_format)]
native_chapters = False
if MP4Chapter:
chapters = []
for start_ms, title in chapter_markers:
chapters.append(MP4Chapter(start_ms, title=title))
if chapters and hasattr(mp4, "chapters"):
mp4.chapters = chapters
native_chapters = True
mp4.save()
if not native_chapters and chapter_markers:
try:
write_chapter_markers(workspace_path, chapter_markers)
except Exception as exc: # noqa: BLE001
logger.warning("Chapter marker injection failed: %s", exc)
# Keep audiobook in work_dir structure instead of moving to EPUB parent
return workspace_path
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/chapters.py | Python | """Chapter management utilities for audiobooks."""
from __future__ import annotations
from datetime import datetime, timezone
from pathlib import Path
import yaml
from mutagen.mp4 import MP4
from config import AppSettings
from epub_io.reader import EpubReader
from epub_io.toc_utils import parse_toc_to_dict
from state.models import Segment
from state.store import load_segments
from .mp4chapters import write_chapter_markers
def _parse_timestamp(value: str | int | float) -> float:
"""Parse timestamp to seconds.
Accepts:
- "1:23:45" or "01:23:45" -> 5025.0 seconds (h:mm:ss)
- "1:9:2" -> 4142.0 seconds (flexible, no zero-padding required)
- "23:45" -> 1425.0 seconds (mm:ss, assumes no hours)
- "45" -> 45.0 seconds (ss only)
- 5025.0 -> 5025.0 (numeric seconds, backward compat)
Returns:
float: Timestamp in seconds
"""
if isinstance(value, (int, float)):
return float(value)
parts = str(value).split(":")
if len(parts) == 3: # h:mm:ss
return int(parts[0]) * 3600 + int(parts[1]) * 60 + int(parts[2])
elif len(parts) == 2: # mm:ss
return int(parts[0]) * 60 + int(parts[1])
else: # ss
return float(value)
def _format_timestamp(seconds: float) -> str:
"""Format seconds as h:mm:ss string with zero-padding.
Examples:
- 5025.0 -> "1:23:45"
- 1425.0 -> "0:23:45"
- 45.5 -> "0:00:45"
Args:
seconds: Timestamp in seconds
Returns:
str: Formatted timestamp as "h:mm:ss"
"""
h = int(seconds // 3600)
m = int((seconds % 3600) // 60)
s = int(seconds % 60)
return f"{h}:{m:02d}:{s:02d}"
class ChapterInfo:
"""Chapter information with optional timestamp."""
def __init__(self, title: str, start: float | None = None, segments: list[str] | None = None):
self.title = title
self.start = start # seconds, None if not yet generated
self.segments = segments or [] # file paths for preview mode
def to_dict(self) -> dict:
"""Convert to dictionary for YAML export."""
result = {"title": self.title}
if self.start is not None:
result["start"] = _format_timestamp(self.start)
if self.segments:
result["segments"] = self.segments
return result
@staticmethod
def from_dict(data: dict) -> ChapterInfo:
"""Create from dictionary loaded from YAML."""
return ChapterInfo(
title=data["title"],
start=_parse_timestamp(data["start"]) if "start" in data else None,
segments=data.get("segments", []),
)
def extract_chapters_from_epub(
input_epub: Path, settings: AppSettings
) -> tuple[list[ChapterInfo], dict]:
"""Extract chapter structure from EPUB (preview mode, no timestamps).
Returns:
Tuple of (chapters list, metadata dict)
"""
reader = EpubReader(input_epub, settings)
toc_map = parse_toc_to_dict(reader)
segments_doc = load_segments(settings.segments_file)
# Build spine to TOC mapping (reuse logic from assembly.py)
from .assembly import _build_spine_to_toc_map, _document_titles
spine_to_toc = _build_spine_to_toc_map(reader, toc_map)
doc_titles = _document_titles(reader)
# Group segments by chapter
chapter_map: dict[str, list[Segment]] = {}
for segment in segments_doc.segments:
if spine_to_toc:
spine_idx = segment.metadata.spine_index
toc_entry = spine_to_toc.get(spine_idx)
if toc_entry is None:
continue
toc_file, toc_title = toc_entry
key = toc_file
else:
# Fallback to file-based grouping
key = segment.file_path.as_posix()
if key not in chapter_map:
chapter_map[key] = []
chapter_map[key].append(segment)
# Sort chapters by spine order
sorted_chapters = sorted(
chapter_map.items(),
key=lambda item: (
min(seg.metadata.spine_index for seg in item[1]),
min(seg.metadata.order_in_file for seg in item[1]),
),
)
# Build chapter info list
chapters = []
for file_path, segments in sorted_chapters:
# Determine title
if toc_map and file_path in toc_map:
title = toc_map[file_path]
else:
title = doc_titles.get(file_path, Path(file_path).stem)
# Collect segment file paths for this chapter
segment_files = sorted(
set(seg.file_path.as_posix() for seg in segments),
key=lambda f: min(
seg.metadata.spine_index
for seg in segments
if seg.file_path.as_posix() == f
),
)
chapters.append(ChapterInfo(title=title, start=None, segments=segment_files))
# Build metadata
metadata = {
"source": str(input_epub),
"generated_at": datetime.now(timezone.utc).isoformat(),
"mode": "preview",
"note": "Timestamps will be determined during audiobook generation",
}
return chapters, metadata
def extract_chapters_from_mp4(mp4_path: Path) -> tuple[list[ChapterInfo], dict]:
"""Extract chapter information from existing M4A audiobook.
Returns:
Tuple of (chapters list, metadata dict)
"""
mp4 = MP4(mp4_path)
if not mp4.chapters:
raise ValueError(f"No chapter information found in {mp4_path}")
chapters = []
for chapter in mp4.chapters:
# mutagen.mp4.Chapter.start is already in seconds (not milliseconds)
start_seconds = chapter.start
chapters.append(ChapterInfo(title=chapter.title, start=start_seconds))
# Get duration
duration = mp4.info.length if hasattr(mp4.info, "length") else None
metadata = {
"source": str(mp4_path),
"generated_at": datetime.now(timezone.utc).isoformat(),
"mode": "from_audiobook",
"duration": round(duration, 3) if duration else None,
}
return chapters, metadata
def write_chapters_yaml(
chapters: list[ChapterInfo], metadata: dict, output_path: Path
) -> None:
"""Write chapters to YAML config file."""
data = {
"# Chapter information": None,
"metadata": metadata,
"chapters": [ch.to_dict() for ch in chapters],
}
# Custom YAML formatting
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
# Write header comment
f.write(f"# Chapter information for audiobook\n")
f.write(f"# Source: {metadata.get('source', 'unknown')}\n")
f.write(f"# Generated: {metadata.get('generated_at', 'unknown')}\n")
if metadata.get("mode") == "preview":
f.write(f"# {metadata.get('note', '')}\n")
f.write("\n")
# Write metadata
f.write("metadata:\n")
for key, value in metadata.items():
if value is not None:
if isinstance(value, str):
f.write(f' {key}: "{value}"\n')
else:
f.write(f" {key}: {value}\n")
f.write("\n")
# Write chapters
f.write("chapters:\n")
for ch in chapters:
f.write(f' - title: "{ch.title}"\n')
if ch.start is not None:
f.write(f' start: "{_format_timestamp(ch.start)}"\n')
if ch.segments:
f.write(f" # Segments: {', '.join(ch.segments[:3])}")
if len(ch.segments) > 3:
f.write(f" (and {len(ch.segments) - 3} more)")
f.write("\n")
def read_chapters_yaml(yaml_path: Path) -> tuple[list[ChapterInfo], dict]:
"""Read chapters from YAML config file.
Returns:
Tuple of (chapters list, metadata dict)
"""
with open(yaml_path, encoding="utf-8") as f:
data = yaml.safe_load(f)
if not data or "chapters" not in data:
raise ValueError(f"Invalid chapters YAML file: {yaml_path}")
chapters = [ChapterInfo.from_dict(ch) for ch in data["chapters"]]
metadata = data.get("metadata", {})
return chapters, metadata
def validate_chapters(
chapters: list[ChapterInfo], duration: float | None = None
) -> list[str]:
"""Validate chapter data.
Args:
chapters: List of chapter info
duration: Optional audiobook duration to validate against
Returns:
List of validation error messages (empty if valid)
"""
errors = []
if not chapters:
errors.append("No chapters defined")
return errors
# Check for valid titles
for i, ch in enumerate(chapters):
if not ch.title or not ch.title.strip():
errors.append(f"Chapter {i + 1}: Empty title")
# If timestamps exist, validate them
has_timestamps = any(ch.start is not None for ch in chapters)
if has_timestamps:
# Check all have timestamps
missing = [i + 1 for i, ch in enumerate(chapters) if ch.start is None]
if missing:
errors.append(f"Chapters {missing} missing timestamps")
# Check timestamps are in order and non-negative
for i, ch in enumerate(chapters):
if ch.start is None:
continue
if ch.start < 0:
errors.append(f"Chapter {i + 1}: Negative timestamp {ch.start}")
if i > 0 and chapters[i - 1].start is not None:
if ch.start <= chapters[i - 1].start:
errors.append(
f"Chapter {i + 1}: Timestamp {ch.start} not after previous chapter"
)
# Check against duration
if duration is not None:
for i, ch in enumerate(chapters):
if ch.start is not None and ch.start > duration:
errors.append(
f"Chapter {i + 1}: Timestamp {ch.start}s exceeds audiobook duration {duration}s"
)
return errors
def update_mp4_chapters(mp4_path: Path, chapters: list[ChapterInfo]) -> None:
"""Update M4A audiobook with new chapter markers.
Args:
mp4_path: Path to M4A audiobook file
chapters: List of chapter info with timestamps
"""
# Validate all chapters have timestamps
if any(ch.start is None for ch in chapters):
raise ValueError("All chapters must have timestamps to update audiobook")
# Get audiobook duration for validation
mp4 = MP4(mp4_path)
duration = mp4.info.length if hasattr(mp4.info, "length") else None
# Validate chapters
errors = validate_chapters(chapters, duration)
if errors:
raise ValueError(f"Chapter validation failed:\n" + "\n".join(errors))
# Convert to markers format (milliseconds, title)
markers = [(int(ch.start * 1000), ch.title) for ch in chapters]
# Write chapter markers using existing functionality
write_chapter_markers(mp4_path, markers)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/controller.py | Python | from __future__ import annotations
import signal
import sys
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timedelta
from pathlib import Path
from rich.console import Group
from rich.live import Live
from rich.panel import Panel
from rich.progress import BarColumn, Progress, TaskID, TextColumn, TimeElapsedColumn
from rich.table import Table
from config import AppSettings
from console_singleton import get_console
from state.models import Segment, SegmentStatus
from state.store import load_segments
from state.store import load_state as load_translation_state
from .assembly import assemble_audiobook
from .models import AudioSegmentStatus
from .preprocess import segment_to_text, split_sentences
from .renderer import SegmentRenderer
from .state import (
ensure_state,
get_or_create_segment,
mark_status,
reset_error_segments,
save_state,
set_consecutive_failures,
set_cooldown,
)
from .state import (
load_state as load_audio_state,
)
from .tts import create_tts_engine
console = get_console()
def _build_audiobook_dashboard(
*,
total_segments: int,
completed_segments: int,
skipped_segments: int,
error_segments: int,
pending_segments: int,
preview_lines: list[str],
active_workers: int = 0,
max_workers: int = 1,
in_cooldown: bool = False,
cooldown_remaining: str = "",
) -> Panel:
"""Build dashboard panel for audiobook synthesis."""
stats = Table.grid(padding=(1, 1))
stats.add_column(style="bold cyan", justify="left", no_wrap=True)
stats.add_column(justify="left", no_wrap=True)
stats.add_row(
"segments",
f"total {total_segments}, completed {completed_segments}, skipped {skipped_segments}",
)
stats.add_row(
"",
f"errors {error_segments}, pending {pending_segments}",
)
# Always show workers row (fixed height)
if max_workers > 1:
stats.add_row("workers", f"active {active_workers}/{max_workers}")
else:
stats.add_row("workers", f"single worker")
# Always show cooldown row (fixed height)
if in_cooldown:
stats.add_row("cooldown", f"⏸ waiting {cooldown_remaining} (3 consecutive fails)")
else:
stats.add_row("cooldown", "") # Empty row to maintain height
# ALWAYS show exactly max_workers preview lines (fixed height)
for i in range(max_workers):
label = f"w{i+1}" if max_workers > 1 else "current"
# Truncate preview text to 60 chars to prevent wrapping
text = preview_lines[i] if i < len(preview_lines) else ""
if text and len(text) > 60:
text = text[:59] + "…"
stats.add_row(label, text or "")
return Panel(stats, border_style="magenta", title="Dashboard", padding=(1, 1))
def _truncate_text(text: str, max_length: int = 80) -> str:
"""Truncate text to max_length, adding ellipsis if needed."""
if len(text) <= max_length:
return text
return text[: max_length - 1] + "…"
class SynthesisResult:
"""Result of synthesizing audio for a single segment."""
__slots__ = ("segment_id", "audio_path", "duration", "error", "attempts", "text_preview")
def __init__(
self,
segment_id: str,
audio_path: Path | None = None,
duration: float | None = None,
error: Exception | None = None,
attempts: int = 0,
text_preview: str = "",
):
self.segment_id = segment_id
self.audio_path = audio_path
self.duration = duration
self.error = error
self.attempts = attempts
self.text_preview = text_preview
def _synthesize_segment(
work: "SegmentWork",
renderer: SegmentRenderer,
output_dir: Path,
max_attempts: int = 3,
) -> SynthesisResult:
"""Synthesize audio for a single segment. Thread-safe worker function.
Args:
work: SegmentWork containing segment and sentences
renderer: SegmentRenderer instance
output_dir: Output directory for audio files
max_attempts: Maximum number of retry attempts
Returns:
SynthesisResult with audio_path/duration or error
"""
segment_id = work.segment.segment_id
last_error: Exception | None = None
# Get text preview from first sentence (for dashboard display)
text_preview = " ".join(work.sentences[:1]) if work.sentences else ""
for attempt in range(1, max_attempts + 1):
try:
audio_path, duration = renderer.render_segment(
segment_id, work.sentences, output_dir
)
return SynthesisResult(
segment_id=segment_id,
audio_path=audio_path,
duration=duration,
attempts=attempt,
text_preview=text_preview,
)
except Exception as exc: # noqa: BLE001
last_error = exc
if attempt < max_attempts:
time.sleep(60) # Wait before retry
# All attempts failed
return SynthesisResult(
segment_id=segment_id,
error=last_error,
attempts=max_attempts,
text_preview=text_preview,
)
class SegmentWork:
__slots__ = ("segment", "sentences")
def __init__(self, segment: Segment, sentences: list[str]) -> None:
self.segment = segment
self.sentences = sentences
class AudiobookRunner:
def __init__(
self,
settings: AppSettings,
input_epub: Path,
voice: str,
language: str | None = None,
rate: str | None = None,
volume: str | None = None,
cover_path: Path | None = None,
cover_only: bool = False,
tts_provider: str | None = None,
tts_model: str | None = None,
tts_speed: float | None = None,
) -> None:
self.settings = settings
self.input_epub = input_epub
self.voice = voice
self.language = language
self.rate = rate
self.volume = volume
self.cover_path = cover_path
self.cover_only = cover_only
# TTS provider settings (from config or parameters)
self.tts_provider = tts_provider or settings.audiobook_tts_provider
self.tts_model = tts_model or settings.audiobook_tts_model
self.tts_speed = tts_speed if tts_speed is not None else settings.audiobook_tts_speed
# Provider-specific output directories
provider_suffix = "edgetts" if self.tts_provider == "edge" else "openaitts"
self.output_root = settings.work_dir / f"audiobook@{provider_suffix}"
self.output_root.mkdir(parents=True, exist_ok=True)
self.segment_audio_dir = self.output_root / "segments"
self.segment_audio_dir.mkdir(parents=True, exist_ok=True)
self.state_path = self.output_root / "audio_state.json"
# Open EPUB reader for footnote filtering during re-extraction
from epub_io.reader import EpubReader
self.epub_reader = EpubReader(input_epub, settings)
def _load_translation_state(self):
if self.settings.state_file.exists():
return load_translation_state(self.settings.state_file)
return None
def _segments_to_process(self) -> list[SegmentWork]:
segments_doc = load_segments(self.settings.segments_file)
# Filter segments based on audiobook_files inclusion list or skip metadata
original_count = len(segments_doc.segments)
if self.settings.audiobook_files is not None:
# Explicit inclusion list takes precedence
allowed_files = set(self.settings.audiobook_files)
segments_doc.segments = [
seg
for seg in segments_doc.segments
if seg.file_path.as_posix() in allowed_files
]
else:
# No inclusion list: filter out segments with skip metadata
segments_doc.segments = [
seg
for seg in segments_doc.segments
if seg.skip_reason is None
]
filtered_count = original_count - len(segments_doc.segments)
if filtered_count > 0:
filter_type = "inclusion list" if self.settings.audiobook_files is not None else "skip rules"
console.print(
f"[cyan]Filtered {filtered_count} segments from {original_count} "
f"based on {filter_type}[/cyan]"
)
translation_state = self._load_translation_state()
translation_skips = set()
if translation_state:
translation_skips = {
seg_id
for seg_id, record in translation_state.segments.items()
if record.status == SegmentStatus.SKIPPED
}
ordered_segments = sorted(
segments_doc.segments,
key=lambda seg: (seg.metadata.spine_index, seg.metadata.order_in_file),
)
works: list[SegmentWork] = []
for segment in ordered_segments:
if segment.segment_id in translation_skips:
mark_status(
self.state_path,
segment.segment_id,
AudioSegmentStatus.SKIPPED,
last_error="translation skip",
)
continue
text = segment_to_text(segment, reader=self.epub_reader)
if not text:
mark_status(
self.state_path,
segment.segment_id,
AudioSegmentStatus.SKIPPED,
last_error="empty or non-text content",
)
continue
sentences = split_sentences(text)
if not sentences:
mark_status(
self.state_path,
segment.segment_id,
AudioSegmentStatus.SKIPPED,
last_error="could not split sentences",
)
continue
works.append(SegmentWork(segment, sentences))
return works
def run(self) -> None:
state = ensure_state(
self.state_path,
self.segment_audio_dir,
self.voice,
language=self.language,
cover_path=self.cover_path,
tts_provider=self.tts_provider,
tts_model=self.tts_model,
tts_speed=self.tts_speed,
)
if self.cover_path is None and state.session.cover_path:
self.cover_path = state.session.cover_path
if self.cover_path:
cover_fs = Path(self.cover_path)
if cover_fs.exists():
self.cover_path = cover_fs
else:
console.print(
f"[yellow]Cover path {cover_fs} is missing; falling back to automatic selection.[/yellow]"
)
self.cover_path = None
state.session.cover_path = None
save_state(state, self.state_path)
if self.cover_only:
segments_doc = load_segments(self.settings.segments_file)
audio_state = load_audio_state(self.state_path)
missing: list[str] = []
for segment in segments_doc.segments:
seg_state = audio_state.segments.get(segment.segment_id)
if not seg_state or seg_state.status != AudioSegmentStatus.COMPLETED:
missing.append(segment.segment_id)
continue
if not seg_state.audio_path or not Path(seg_state.audio_path).exists():
missing.append(segment.segment_id)
if missing:
console.print(
f"[red]Cannot assemble cover-only audiobook; {len(missing)} segments are incomplete or missing audio files.[/red]"
)
console.print(
"[yellow]Re-run without --cover-only to resynthesise missing segments.[/yellow]"
)
return
final_path = assemble_audiobook(
settings=self.settings,
input_epub=self.input_epub,
session=state.session,
state_path=self.state_path,
output_root=self.output_root,
)
if final_path:
console.print(f"[green]Final audiobook written to {final_path}[/green]")
else:
console.print("[yellow]No completed segments found; nothing to assemble.[/yellow]")
return
# Create TTS engine based on provider
engine = create_tts_engine(
provider=self.tts_provider,
voice=self.voice,
rate=self.rate,
volume=self.volume,
model=self.tts_model,
speed=self.tts_speed,
)
renderer = SegmentRenderer(engine, state.session.sentence_pause_range, epub_reader=self.epub_reader)
works = self._segments_to_process()
work_map = {work.segment.segment_id: work for work in works}
total_segments = len(work_map)
if total_segments == 0:
console.print("[green]No segments require audio synthesis.[/green]")
return
reset_count = len(reset_error_segments(self.state_path))
if reset_count:
console.print(
f"[yellow]Retrying {reset_count} segments left in error state from a previous run.[/yellow]"
)
# Track statistics for dashboard
audio_state = load_audio_state(self.state_path)
completed_segments = sum(
1
for seg_id in work_map
if (
seg_id in audio_state.segments
and audio_state.segments[seg_id].status == AudioSegmentStatus.COMPLETED
)
)
skipped_segments = sum(
1
for seg_id in work_map
if (
seg_id in audio_state.segments
and audio_state.segments[seg_id].status == AudioSegmentStatus.SKIPPED
)
)
error_segments = sum(
1
for seg_id in work_map
if (
seg_id in audio_state.segments
and audio_state.segments[seg_id].status == AudioSegmentStatus.ERROR
)
)
pending_segments = total_segments - completed_segments - skipped_segments
max_workers = self.settings.audiobook_workers
# Fixed-size array to prevent height changes (always max_workers slots)
preview_lines = [""] * max_workers
preview_index = 0
active_workers = 0
in_cooldown = False
cooldown_remaining = ""
progress = Progress(
TextColumn("{task.description}"),
BarColumn(),
TextColumn("{task.completed}/{task.total}"),
TimeElapsedColumn(),
auto_refresh=False,
)
task_id = progress.add_task("Synthesis", total=total_segments, completed=completed_segments)
cooldown_task: TaskID | None = None
def render_panel() -> Panel:
return _build_audiobook_dashboard(
total_segments=total_segments,
completed_segments=completed_segments,
skipped_segments=skipped_segments,
error_segments=error_segments,
pending_segments=pending_segments,
preview_lines=preview_lines,
active_workers=active_workers,
max_workers=max_workers,
in_cooldown=in_cooldown,
cooldown_remaining=cooldown_remaining,
)
try:
with Live(
Group(render_panel(), progress),
console=console,
refresh_per_second=5,
vertical_overflow="crop", # Prevent height expansion causing scrolling
) as live:
while True:
audio_state = load_audio_state(self.state_path)
pending_queue: list[str] = []
for seg_id in work_map:
seg_state = audio_state.segments.get(seg_id)
if seg_state and seg_state.status in (
AudioSegmentStatus.COMPLETED,
AudioSegmentStatus.SKIPPED,
):
continue
pending_queue.append(seg_id)
if not pending_queue:
break
pass_successes = 0
pass_failures: list[str] = []
interrupted = False
# Use parallel synthesis with ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=max_workers)
try:
# Submit all pending segments
future_to_seg_id = {}
for seg_id in pending_queue:
work = work_map[seg_id]
seg_state = get_or_create_segment(self.state_path, seg_id)
if (
seg_state.status == AudioSegmentStatus.COMPLETED
and seg_state.audio_path
and Path(seg_state.audio_path).exists()
):
continue
future = executor.submit(
_synthesize_segment,
work,
renderer,
self.segment_audio_dir,
max_attempts=3,
)
future_to_seg_id[future] = seg_id
active_workers += 1
# Process results as they complete
try:
for future in as_completed(future_to_seg_id):
active_workers -= 1
seg_id = future_to_seg_id[future]
result = future.result()
if result.error:
mark_status(
self.state_path,
seg_id,
AudioSegmentStatus.ERROR,
attempts=result.attempts,
last_error=str(result.error),
)
error_msg = _truncate_text(str(result.error))
preview_lines[preview_index] = f"[red]{error_msg}[/red]"
error_segments += 1
pending_segments -= 1
pass_failures.append(seg_id)
audio_state = load_audio_state(self.state_path)
consecutive = audio_state.consecutive_failures + 1
set_consecutive_failures(self.state_path, consecutive)
if consecutive >= 3:
in_cooldown = True
cooldown_until = datetime.utcnow() + timedelta(minutes=30)
set_cooldown(self.state_path, cooldown_until)
duration = 30 * 60
remaining = (cooldown_until - datetime.utcnow()).total_seconds()
while remaining > 0:
mins = int(remaining // 60)
secs = int(remaining % 60)
cooldown_remaining = f"{mins}m {secs}s"
live.update(Group(render_panel(), progress))
sleep_for = min(5, remaining)
time.sleep(sleep_for)
remaining = (cooldown_until - datetime.utcnow()).total_seconds()
set_cooldown(self.state_path, None)
set_consecutive_failures(self.state_path, 0)
in_cooldown = False
cooldown_remaining = ""
else:
mark_status(
self.state_path,
seg_id,
AudioSegmentStatus.COMPLETED,
audio_path=result.audio_path,
duration_seconds=result.duration,
attempts=result.attempts,
last_error=None,
)
set_consecutive_failures(self.state_path, 0)
text = _truncate_text(result.text_preview)
preview_lines[preview_index] = f"[green]{text}[/green]"
completed_segments += 1
pending_segments -= 1
pass_successes += 1
# Round-robin through preview slots
preview_index = (preview_index + 1) % max_workers
# Update live dashboard
progress.advance(task_id)
live.update(Group(render_panel(), progress))
except KeyboardInterrupt:
interrupted = True
console.print("\n[yellow]Interrupted by user. Saving progress...[/yellow]")
# Force immediate shutdown without waiting
executor.shutdown(wait=False, cancel_futures=True)
raise
finally:
# Clean shutdown for normal completion only
if not interrupted:
executor.shutdown(wait=True)
if pass_failures:
if pass_successes == 0:
break
# Reset preview lines to "waiting…" for next pass
preview_lines = ["waiting…"] * max_workers
preview_index = 0
reset_error_segments(self.state_path, pass_failures)
continue
final_state = load_audio_state(self.state_path)
outstanding = [
seg_id
for seg_id in work_map
if (
seg_id in final_state.segments
and final_state.segments[seg_id].status == AudioSegmentStatus.ERROR
)
]
if outstanding:
console.print(
f"[red]{len(outstanding)} segments remain in error state; rerun the command to retry them.[/red]"
)
return
final_path = assemble_audiobook(
settings=self.settings,
input_epub=self.input_epub,
session=state.session,
state_path=self.state_path,
output_root=self.output_root,
)
if final_path:
console.print(f"[green]Final audiobook written to {final_path}[/green]")
except KeyboardInterrupt:
console.print(
"[yellow]Progress saved; resume later with tepub audiobook.[/yellow]"
)
sys.exit(0)
def run_audiobook(
settings: AppSettings,
input_epub: Path,
voice: str,
language: str | None = None,
rate: str | None = None,
volume: str | None = None,
cover_path: Path | None = None,
cover_only: bool = False,
tts_provider: str | None = None,
tts_model: str | None = None,
tts_speed: float | None = None,
) -> None:
runner = AudiobookRunner(
settings=settings,
input_epub=input_epub,
voice=voice,
language=language,
rate=rate,
volume=volume,
cover_path=cover_path,
cover_only=cover_only,
tts_provider=tts_provider,
tts_model=tts_model,
tts_speed=tts_speed,
)
runner.run()
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/cover.py | Python | from __future__ import annotations
import posixpath
from dataclasses import dataclass
from pathlib import Path, PurePosixPath
from lxml import etree
from epub_io.reader import EpubReader
from epub_io.resources import get_item_by_href
from epub_io.path_utils import normalize_epub_href
@dataclass
class SpineCoverCandidate:
href: Path
document_href: Path
_IMAGE_ATTRS = (
"src",
"href",
"{http://www.w3.org/1999/xlink}href",
)
def find_spine_cover_candidate(reader: EpubReader) -> SpineCoverCandidate | None:
for document in reader.iter_documents():
tree = document.tree
if tree is None:
continue
for element in tree.iter():
try:
tag_name = etree.QName(element.tag).localname
except (ValueError, AttributeError):
continue
if tag_name not in {"img", "image"}:
continue
href_value = None
for attr in _IMAGE_ATTRS:
href_value = element.get(attr)
if href_value:
break
candidate_href_str = normalize_epub_href(document.path, href_value or "")
if not candidate_href_str:
continue
try:
get_item_by_href(reader.book, Path(candidate_href_str))
except KeyError:
continue
return SpineCoverCandidate(href=Path(candidate_href_str), document_href=document.path)
return None
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/language.py | Python | from __future__ import annotations
from collections import Counter
from collections.abc import Iterable
from langdetect import DetectorFactory, LangDetectException, detect
DetectorFactory.seed = 0
def detect_language(text_samples: Iterable[str]) -> str | None:
cleaned = [txt.strip() for txt in text_samples if txt and txt.strip()]
if not cleaned:
return None
votes = Counter()
for sample in cleaned:
try:
lang = detect(sample)
votes[lang] += 1
except LangDetectException:
continue
if sum(votes.values()) >= 20:
break
if not votes:
return None
return votes.most_common(1)[0][0]
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/models.py | Python | from __future__ import annotations
from datetime import datetime
from enum import Enum
from pathlib import Path
from pydantic import BaseModel, Field
class AudioSegmentStatus(str, Enum):
PENDING = "pending"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
ERROR = "error"
SKIPPED = "skipped"
class AudioSegmentState(BaseModel):
segment_id: str
status: AudioSegmentStatus = AudioSegmentStatus.PENDING
attempts: int = 0
audio_path: Path | None = None
duration_seconds: float | None = None
last_error: str | None = None
updated_at: datetime = Field(default_factory=datetime.utcnow)
class AudioSessionConfig(BaseModel):
voice: str
language: str | None = None
output_dir: Path
sentence_pause_range: tuple[float, float] = (1.0, 2.0)
segment_pause_range: tuple[float, float] = (2.0, 4.0)
cover_path: Path | None = None
# TTS provider settings
tts_provider: str = "edge"
tts_model: str | None = None # For OpenAI: tts-1 or tts-1-hd
tts_speed: float = 1.0 # For OpenAI: 0.25-4.0
class AudioStateDocument(BaseModel):
session: AudioSessionConfig
segments: dict[str, AudioSegmentState]
consecutive_failures: int = 0
cooldown_until: datetime | None = None
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/mp4chapters.py | Python | from __future__ import annotations
import io
import struct
from collections.abc import Iterable, Sequence
from pathlib import Path
from mutagen._util import insert_bytes, resize_bytes
from mutagen.mp4 import MP4, Atom, Atoms, MP4Chapters, MP4Tags
ChapterTuple = tuple[float, str]
def _build_chpl_payload(chapters: Sequence[ChapterTuple], timescale: int) -> bytes:
if timescale <= 0:
timescale = 1000
body = bytearray()
body.append(len(chapters))
for seconds, title in chapters:
safe_title = (title or "").strip()
if not safe_title:
safe_title = "Chapter"
encoded = safe_title.encode("utf-8")[:255]
start = int(round(seconds * timescale * 10000))
body.extend(struct.pack(">Q", start))
body.append(len(encoded))
body.extend(encoded)
header = struct.pack(">I", 0x01000000) + b"\x00\x00\x00\x00"
return header + body
def _movie_timescale(fileobj: io.BufferedRandom, atoms: Atoms) -> int:
try:
mvhd_atom = atoms.path(b"moov", b"mvhd")[-1]
except KeyError:
return 1000
chapters = MP4Chapters()
chapters._parse_mvhd(mvhd_atom, fileobj)
return chapters._timescale or 1000
def _apply_delta(
helper: MP4Tags,
fileobj: io.BufferedRandom,
parents: Iterable[Atom],
atoms: Atoms,
delta: int,
offset: int,
) -> None:
if delta == 0:
return
helper._MP4Tags__update_parents(fileobj, list(parents), delta)
helper._MP4Tags__update_offsets(fileobj, atoms, delta, offset)
def _replace_existing_chpl(
helper: MP4Tags,
fileobj: io.BufferedRandom,
atoms: Atoms,
chpl_atom: bytes,
path: list[Atom],
) -> None:
target = path[-1]
offset = target.offset
original_length = target.length
resize_bytes(fileobj, original_length, len(chpl_atom), offset)
fileobj.seek(offset)
fileobj.write(chpl_atom)
delta = len(chpl_atom) - original_length
_apply_delta(helper, fileobj, path[:-1], atoms, delta, offset)
def _append_to_udta(
helper: MP4Tags,
fileobj: io.BufferedRandom,
atoms: Atoms,
chpl_atom: bytes,
udta_path: list[Atom],
) -> None:
udta_atom = udta_path[-1]
insert_offset = udta_atom.offset + udta_atom.length
insert_bytes(fileobj, len(chpl_atom), insert_offset)
fileobj.seek(insert_offset)
fileobj.write(chpl_atom)
_apply_delta(helper, fileobj, udta_path, atoms, len(chpl_atom), insert_offset)
def _create_udta_with_chpl(
helper: MP4Tags,
fileobj: io.BufferedRandom,
atoms: Atoms,
chpl_atom: bytes,
moov_path: list[Atom],
) -> None:
udta_atom = Atom.render(b"udta", chpl_atom)
insert_offset = moov_path[-1].offset + moov_path[-1].length
insert_bytes(fileobj, len(udta_atom), insert_offset)
fileobj.seek(insert_offset)
fileobj.write(udta_atom)
_apply_delta(helper, fileobj, moov_path, atoms, len(udta_atom), insert_offset)
def write_chapter_markers(mp4_path: Path, markers: Sequence[tuple[int, str]]) -> None:
if not markers:
return
seconds_markers: list[ChapterTuple] = [
(start_ms / 1000.0, title) for start_ms, title in markers
]
with open(mp4_path, "r+b") as fh:
atoms = Atoms(fh)
timescale = _movie_timescale(fh, atoms)
payload = _build_chpl_payload(seconds_markers, timescale)
chpl_atom = Atom.render(b"chpl", payload)
helper = MP4Tags()
try:
path = atoms.path(b"moov", b"udta", b"chpl")
except KeyError:
try:
udta_path = atoms.path(b"moov", b"udta")
except KeyError:
moov_path = atoms.path(b"moov")
_create_udta_with_chpl(helper, fh, atoms, chpl_atom, moov_path)
else:
_append_to_udta(helper, fh, atoms, chpl_atom, udta_path)
else:
_replace_existing_chpl(helper, fh, atoms, chpl_atom, path)
# Optional load to ensure chapters read back; ignore failures silently
try:
mp4 = MP4(mp4_path)
_ = mp4.chapters
except Exception:
pass
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/preprocess.py | Python | from __future__ import annotations
import html
import random
import re
import nltk
from lxml import etree
from lxml import html as lxml_html
from state.models import ExtractMode, Segment
BLOCK_PUNCTUATION = re.compile(r"[.!?…]$")
NON_WORD_RE = re.compile(r"^[^\w]+$")
SENTENCE_SPLIT_RE = re.compile(r"(?<=[.!?。?!])\s+")
LIST_TAGS = {"ul", "ol"}
# Roman numeral pattern and conversion
ROMAN_NUMERAL_PATTERN = re.compile(
r'^(?:(Chapter|Part|Book|Section)\s+)?([IVXLCDM]+)([.:\-—]?)$',
re.IGNORECASE
)
ROMAN_TO_INT = {
'I': 1, 'II': 2, 'III': 3, 'IV': 4, 'V': 5,
'VI': 6, 'VII': 7, 'VIII': 8, 'IX': 9, 'X': 10,
'XI': 11, 'XII': 12, 'XIII': 13, 'XIV': 14, 'XV': 15,
'XVI': 16, 'XVII': 17, 'XVIII': 18, 'XIX': 19, 'XX': 20,
'XXI': 21, 'XXII': 22, 'XXIII': 23, 'XXIV': 24, 'XXV': 25,
'XXVI': 26, 'XXVII': 27, 'XXVIII': 28, 'XXIX': 29, 'XXX': 30,
'XXXI': 31, 'XXXII': 32, 'XXXIII': 33, 'XXXIV': 34, 'XXXV': 35,
'XXXVI': 36, 'XXXVII': 37, 'XXXVIII': 38, 'XXXIX': 39, 'XL': 40,
'XLI': 41, 'XLII': 42, 'XLIII': 43, 'XLIV': 44, 'XLV': 45,
'XLVI': 46, 'XLVII': 47, 'XLVIII': 48, 'XLIX': 49, 'L': 50,
'LX': 60, 'LXX': 70, 'LXXX': 80, 'XC': 90, 'C': 100,
}
INT_TO_WORDS = {
1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five',
6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine', 10: 'Ten',
11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', 15: 'Fifteen',
16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', 19: 'Nineteen', 20: 'Twenty',
21: 'Twenty-one', 22: 'Twenty-two', 23: 'Twenty-three', 24: 'Twenty-four', 25: 'Twenty-five',
26: 'Twenty-six', 27: 'Twenty-seven', 28: 'Twenty-eight', 29: 'Twenty-nine', 30: 'Thirty',
31: 'Thirty-one', 32: 'Thirty-two', 33: 'Thirty-three', 34: 'Thirty-four', 35: 'Thirty-five',
36: 'Thirty-six', 37: 'Thirty-seven', 38: 'Thirty-eight', 39: 'Thirty-nine', 40: 'Forty',
41: 'Forty-one', 42: 'Forty-two', 43: 'Forty-three', 44: 'Forty-four', 45: 'Forty-five',
46: 'Forty-six', 47: 'Forty-seven', 48: 'Forty-eight', 49: 'Forty-nine', 50: 'Fifty',
60: 'Sixty', 70: 'Seventy', 80: 'Eighty', 90: 'Ninety', 100: 'One hundred',
}
ELLIPSIS_PATTERN = re.compile(r"(\.\s+){2,}\.")
def _normalize_ellipsis(text: str) -> str:
def replace(match: re.Match[str]) -> str:
raw = match.group()
return "..." if raw else raw
new_text = text
while True:
updated = ELLIPSIS_PATTERN.sub(replace, new_text)
if updated == new_text:
break
new_text = updated
return new_text
def ensure_punkt() -> None:
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
def _ensure_list_punctuation(root: lxml_html.HtmlElement) -> None:
for li in root.xpath("//li"):
if not li.text_content():
continue
text = li.text or ""
if text and BLOCK_PUNCTUATION.search(text.strip()):
continue
if li.text:
li.text = li.text.rstrip() + ". "
else:
child_text = li.text_content().rstrip()
if child_text and not BLOCK_PUNCTUATION.search(child_text):
li.insert(0, etree.Element("span"))
li.text = child_text + ". "
def _html_to_text(raw_html: str, element_type: str) -> str:
root = lxml_html.fromstring(raw_html)
if element_type in LIST_TAGS:
_ensure_list_punctuation(root)
text = root.text_content()
return html.unescape(" ".join(text.split()))
def _normalize_text(text: str) -> str:
return " ".join(text.strip().split())
def _convert_roman_numeral_to_spoken(text: str, segment: Segment) -> str:
"""Convert Roman numerals in titles to spoken form.
Only converts if:
- Segment is a heading (h1-h6) OR first in file (order_in_file == 1)
- Text matches Roman numeral pattern (standalone or with prefix)
Args:
text: The text content to check
segment: The segment metadata
Returns:
Text with Roman numerals converted to spoken form, or unchanged
"""
# Only apply to headings or first segment in file
is_heading = segment.metadata.element_type in {'h1', 'h2', 'h3', 'h4', 'h5', 'h6'}
is_first_in_file = segment.metadata.order_in_file == 1
if not (is_heading or is_first_in_file):
return text
# Check if text matches Roman numeral pattern
match = ROMAN_NUMERAL_PATTERN.match(text.strip())
if not match:
return text
prefix = match.group(1) or "" # Chapter/Part/Book
roman = match.group(2).upper() # The Roman numeral
suffix = match.group(3) or "" # Punctuation
# Look up the Roman numeral
if roman not in ROMAN_TO_INT:
return text # Invalid Roman numeral, leave unchanged
# Convert to integer then to words
number = ROMAN_TO_INT[roman]
if number not in INT_TO_WORDS:
return text # Number not in our mapping, leave unchanged
words = INT_TO_WORDS[number]
# Reconstruct with spoken form
if prefix:
return f"{prefix} {words}{suffix}"
else:
return f"{words}{suffix}"
def _reextract_filtered(segment: Segment, reader) -> str:
"""Re-extract element from EPUB with footnote filtering.
Args:
segment: Segment to re-extract
reader: EpubReader instance
Returns:
Filtered text content without footnote references
"""
# Load the document
doc = reader.read_document_by_path(segment.file_path)
# Find element by xpath
elements = doc.tree.xpath(segment.xpath)
if not elements:
raise ValueError(f"Element not found at xpath: {segment.xpath}")
element = elements[0]
# Clone to avoid modifying original
clone = lxml_html.fromstring(lxml_html.tostring(element, encoding="unicode"))
# Remove footnote references (a tags with sup/sub children)
# Preserve tail text before removing the element
for link in clone.xpath('.//a[sup or sub]'):
parent = link.getparent()
if parent is not None:
# Preserve the tail text (text after the link element)
if link.tail:
# Find the previous sibling or use parent.text
prev = link.getprevious()
if prev is not None:
prev.tail = (prev.tail or "") + link.tail
else:
parent.text = (parent.text or "") + link.tail
parent.remove(link)
# Extract text
text = " ".join(clone.text_content().split())
return text
def segment_to_text(segment: Segment, reader=None) -> str | None:
"""Convert segment to text, optionally re-extracting from EPUB with footnote filtering.
Args:
segment: Segment to convert
reader: Optional EpubReader for re-extraction with filtering
Returns:
Text content, or None if segment should be skipped
"""
if segment.metadata.element_type in {"table", "figure"}:
return None
# Skip footnote/endnote definition sections based on segment ID or xpath
# Common patterns: ftn*, fn*, note*, endnote*, footnote*
seg_id_lower = segment.segment_id.lower()
xpath_lower = segment.xpath.lower()
footnote_id_patterns = ["ftn", "fn-", "note-", "endnote", "footnote"]
if any(pattern in seg_id_lower for pattern in footnote_id_patterns):
return None
# Check xpath for footnote container divs
footnote_xpath_patterns = ["footnote", "endnote", "notes"]
if any(f"div[@id='{pattern}" in xpath_lower or f"div[@class='{pattern}" in xpath_lower
for pattern in footnote_xpath_patterns):
return None
# If reader provided, re-extract with footnote filtering
if reader is not None:
try:
content = _reextract_filtered(segment, reader)
except Exception:
# Fallback to stored content if re-extraction fails
if segment.extract_mode == ExtractMode.HTML:
content = _html_to_text(segment.source_content, segment.metadata.element_type)
else:
content = _normalize_text(segment.source_content)
else:
# Use stored content
if segment.extract_mode == ExtractMode.HTML:
content = _html_to_text(segment.source_content, segment.metadata.element_type)
else:
content = _normalize_text(segment.source_content)
content = _normalize_ellipsis(content)
if not content:
return None
if NON_WORD_RE.match(content):
return None
# Convert Roman numerals in titles to spoken form
content = _convert_roman_numeral_to_spoken(content, segment)
return content
def split_sentences(text: str, seed: int | None = None) -> list[str]:
normalized = _normalize_ellipsis(text)
ensure_punkt()
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
sentences = tokenizer.tokenize(normalized)
cleaned: list[str] = []
for sentence in sentences:
stripped = sentence.strip()
if not stripped:
continue
# Remove leading punctuation artifacts
stripped = stripped.lstrip(". ")
if not stripped:
continue
if NON_WORD_RE.match(stripped):
continue
cleaned.append(stripped)
if not cleaned:
normalized = normalized.strip()
return [normalized] if normalized else []
return cleaned
def random_pause(range_seconds: tuple[float, float], seed: int | None = None) -> float:
low, high = range_seconds
if seed is not None:
rng = random.Random(seed)
return rng.uniform(low, high)
return random.uniform(low, high)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/renderer.py | Python | from __future__ import annotations
import os
import random
import tempfile
from collections.abc import Sequence
from pathlib import Path
os.environ.setdefault("PYDUB_SIMPLE_AUDIOOP", "1")
from pydub import AudioSegment
from .preprocess import segment_to_text
from .tts import EdgeTTSEngine, OpenAITTSEngine, TTSEngine
class SegmentRenderer:
def __init__(
self,
engine: TTSEngine,
sentence_pause_range: tuple[float, float],
epub_reader=None,
) -> None:
self.engine = engine
self.sentence_pause_range = sentence_pause_range
self.epub_reader = epub_reader
# Determine file extension based on engine type
if isinstance(engine, OpenAITTSEngine):
self.tts_extension = ".aac"
else:
self.tts_extension = ".mp3"
def render_segment(
self, segment_id: str, sentences: Sequence[str], output_dir: Path
) -> tuple[Path, float]:
if not sentences:
raise ValueError("No sentences to render")
output_dir.mkdir(parents=True, exist_ok=True)
segment_seed = hash(segment_id) & 0xFFFFFFFF
rng = random.Random(segment_seed)
audio_parts: list[AudioSegment] = []
with tempfile.TemporaryDirectory(prefix=f"{segment_id}-tts-") as tmp_dir:
tmp_root = Path(tmp_dir)
for idx, sentence in enumerate(sentences):
sentence_file = tmp_root / f"{idx:03d}{self.tts_extension}"
self.engine.synthesize(sentence, sentence_file)
part = AudioSegment.from_file(sentence_file)
audio_parts.append(part)
if idx < len(sentences) - 1:
pause_seconds = rng.uniform(*self.sentence_pause_range)
audio_parts.append(AudioSegment.silent(duration=int(pause_seconds * 1000)))
combined = audio_parts[0]
for part in audio_parts[1:]:
combined += part
output_path = output_dir / f"{segment_id}.m4a"
combined.export(
output_path,
format="mp4",
codec="aac",
parameters=["-movflags", "+faststart", "-movie_timescale", "24000"],
)
return output_path, combined.duration_seconds
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/state.py | Python | from __future__ import annotations
import threading
from datetime import datetime
from pathlib import Path
from state.base import load_generic_state, save_generic_state
from .models import AudioSegmentState, AudioSegmentStatus, AudioSessionConfig, AudioStateDocument
# Thread-safe state file operations
_state_file_locks: dict[str, threading.Lock] = {}
_locks_lock = threading.Lock()
def _get_lock(path: Path) -> threading.Lock:
"""Get or create a lock for a specific state file path."""
path_str = str(path.resolve())
with _locks_lock:
if path_str not in _state_file_locks:
_state_file_locks[path_str] = threading.Lock()
return _state_file_locks[path_str]
def _default_state(
output_dir: Path,
voice: str,
language: str | None = None,
cover_path: Path | None = None,
tts_provider: str = "edge",
tts_model: str | None = None,
tts_speed: float = 1.0,
) -> AudioStateDocument:
session = AudioSessionConfig(
voice=voice,
language=language,
output_dir=output_dir,
cover_path=cover_path,
tts_provider=tts_provider,
tts_model=tts_model,
tts_speed=tts_speed,
)
return AudioStateDocument(session=session, segments={})
def load_state(path: Path) -> AudioStateDocument:
return load_generic_state(path, AudioStateDocument)
def save_state(state: AudioStateDocument, path: Path) -> None:
lock = _get_lock(path)
with lock:
save_generic_state(state, path)
def ensure_state(
path: Path,
output_dir: Path,
voice: str,
language: str | None = None,
cover_path: Path | None = None,
tts_provider: str = "edge",
tts_model: str | None = None,
tts_speed: float = 1.0,
) -> AudioStateDocument:
if path.exists():
state = load_state(path)
changed = False
if language and state.session.language != language:
state.session.language = language
changed = True
if state.session.voice != voice:
state.session.voice = voice
changed = True
if cover_path is not None and state.session.cover_path != cover_path:
state.session.cover_path = cover_path
changed = True
if state.session.tts_provider != tts_provider:
state.session.tts_provider = tts_provider
changed = True
if tts_model is not None and state.session.tts_model != tts_model:
state.session.tts_model = tts_model
changed = True
if state.session.tts_speed != tts_speed:
state.session.tts_speed = tts_speed
changed = True
if changed:
save_state(state, path)
return state
state = _default_state(
output_dir=output_dir,
voice=voice,
language=language,
cover_path=cover_path,
tts_provider=tts_provider,
tts_model=tts_model,
tts_speed=tts_speed,
)
save_state(state, path)
return state
def update_segment_state(
state_path: Path,
segment_id: str,
updater,
) -> AudioSegmentState:
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, AudioStateDocument)
segment = state.segments.get(segment_id)
if segment is None:
segment = AudioSegmentState(segment_id=segment_id)
updated = updater(segment)
state.segments[segment_id] = updated
state.segments[segment_id].updated_at = datetime.utcnow()
save_generic_state(state, state_path)
return updated
def mark_status(
state_path: Path,
segment_id: str,
status: AudioSegmentStatus,
**fields,
) -> AudioSegmentState:
def _updater(segment: AudioSegmentState) -> AudioSegmentState:
payload = segment.model_dump()
payload.update(fields)
payload["status"] = status
payload.setdefault("segment_id", segment_id)
return AudioSegmentState.model_validate(payload)
return update_segment_state(state_path, segment_id, _updater)
def iter_segments_by_status(state: AudioStateDocument, status: AudioSegmentStatus):
for seg in state.segments.values():
if seg.status == status:
yield seg
def get_or_create_segment(state_path: Path, segment_id: str) -> AudioSegmentState:
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, AudioStateDocument)
existing = state.segments.get(segment_id)
if existing:
return existing
new_seg = AudioSegmentState(segment_id=segment_id)
state.segments[segment_id] = new_seg
save_generic_state(state, state_path)
return new_seg
def set_consecutive_failures(state_path: Path, count: int) -> None:
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, AudioStateDocument)
state.consecutive_failures = count
save_generic_state(state, state_path)
def set_cooldown(state_path: Path, until: datetime | None) -> None:
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, AudioStateDocument)
state.cooldown_until = until
save_generic_state(state, state_path)
def reset_error_segments(state_path: Path, segment_ids: list[str] | None = None) -> list[str]:
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, AudioStateDocument)
changed = False
reset_ids: list[str] = []
target_ids = set(segment_ids) if segment_ids else None
for seg_id, segment in state.segments.items():
if target_ids is not None and seg_id not in target_ids:
continue
if segment.status == AudioSegmentStatus.ERROR:
segment.status = AudioSegmentStatus.PENDING
segment.attempts = 0
segment.updated_at = datetime.utcnow()
state.segments[seg_id] = segment
reset_ids.append(seg_id)
changed = True
if changed:
state.consecutive_failures = 0
save_generic_state(state, state_path)
return reset_ids
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/tts.py | Python | from __future__ import annotations
import asyncio
import os
from abc import ABC, abstractmethod
from pathlib import Path
import edge_tts
try:
from openai import OpenAI
HAS_OPENAI = True
except ImportError:
HAS_OPENAI = False
class TTSEngine(ABC):
"""Base class for text-to-speech engines."""
@abstractmethod
def synthesize(self, text: str, output_path: Path) -> None:
"""Synthesize text to audio and save to output_path.
Args:
text: Text to convert to speech
output_path: Path where audio file should be saved
"""
pass
class EdgeTTSEngine(TTSEngine):
"""Microsoft Edge TTS engine (free, 57+ voices)."""
def __init__(self, voice: str, rate: str | None = None, volume: str | None = None) -> None:
self.voice = voice
self.rate = rate
self.volume = volume
async def _synthesize_async(self, text: str, output_path: Path) -> None:
kwargs = {"voice": self.voice}
if self.rate is not None:
kwargs["rate"] = self.rate
if self.volume is not None:
kwargs["volume"] = self.volume
communicator = edge_tts.Communicate(text, **kwargs)
await communicator.save(str(output_path))
def synthesize(self, text: str, output_path: Path) -> None:
asyncio.run(self._synthesize_async(text, output_path))
class OpenAITTSEngine(TTSEngine):
"""OpenAI TTS engine (paid, 6 premium voices).
Voices: alloy, echo, fable, onyx, nova, shimmer
Models: tts-1 (cheaper), tts-1-hd (higher quality)
Speed: 0.25 to 4.0 (1.0 = normal)
"""
def __init__(
self,
voice: str,
model: str = "tts-1",
speed: float = 1.0,
api_key: str | None = None,
) -> None:
if not HAS_OPENAI:
raise ImportError(
"OpenAI package required for OpenAI TTS. "
"Install with: pip install openai"
)
self.voice = voice
self.model = model
self.speed = max(0.25, min(4.0, speed)) # Clamp to valid range
# Use provided API key or fall back to environment variable
self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
if not self.api_key:
raise ValueError(
"OpenAI API key required. Set OPENAI_API_KEY environment variable "
"or pass api_key parameter."
)
self.client = OpenAI(api_key=self.api_key)
def synthesize(self, text: str, output_path: Path) -> None:
"""Synthesize text using OpenAI TTS API.
Outputs AAC format directly for optimal quality and performance.
"""
response = self.client.audio.speech.create(
model=self.model,
voice=self.voice, # type: ignore
input=text,
speed=self.speed,
response_format="aac", # Direct AAC output
)
# Save to file
output_path.parent.mkdir(parents=True, exist_ok=True)
response.stream_to_file(str(output_path))
def create_tts_engine(
provider: str,
voice: str,
**kwargs
) -> TTSEngine:
"""Factory function to create TTS engine based on provider.
Args:
provider: "edge" or "openai"
voice: Voice name (provider-specific)
**kwargs: Additional provider-specific parameters
Edge TTS: rate, volume
OpenAI TTS: model, speed, api_key
Returns:
TTSEngine instance
Raises:
ValueError: If provider is unknown
Examples:
>>> engine = create_tts_engine("edge", "en-US-GuyNeural", rate="+5%")
>>> engine = create_tts_engine("openai", "nova", model="tts-1-hd", speed=1.1)
"""
provider = provider.lower()
if provider == "edge":
return EdgeTTSEngine(
voice=voice,
rate=kwargs.get("rate"),
volume=kwargs.get("volume"),
)
elif provider == "openai":
return OpenAITTSEngine(
voice=voice,
model=kwargs.get("model", "tts-1"),
speed=kwargs.get("speed", 1.0),
api_key=kwargs.get("api_key"),
)
else:
raise ValueError(
f"Unknown TTS provider: {provider}. "
f"Supported providers: edge, openai"
)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/audiobook/voices.py | Python | from __future__ import annotations
import asyncio
from functools import lru_cache
import edge_tts
@lru_cache(maxsize=1)
def _all_edge_voices() -> list[dict]:
return asyncio.run(edge_tts.list_voices())
def list_edge_voices_for_language(language_code: str | None) -> list[dict]:
"""List Edge TTS voices for a specific language.
Args:
language_code: Language code prefix (e.g., "en", "zh")
Returns:
List of voice dictionaries with metadata
"""
voices = _all_edge_voices()
if not language_code:
return voices
prefix = language_code.lower()
filtered = [voice for voice in voices if voice.get("Locale", "").lower().startswith(prefix)]
if filtered:
return filtered
return voices
def list_openai_voices() -> list[dict]:
"""List OpenAI TTS voices.
Returns:
List of voice dictionaries with metadata
"""
return [
{
"ShortName": "alloy",
"Locale": "en-US",
"Gender": "Neutral",
"Description": "Neutral, balanced voice suitable for most content",
},
{
"ShortName": "echo",
"Locale": "en-US",
"Gender": "Male",
"Description": "Male voice, authoritative and clear",
},
{
"ShortName": "fable",
"Locale": "en-GB",
"Gender": "Male",
"Description": "British accent, expressive, great for storytelling",
},
{
"ShortName": "onyx",
"Locale": "en-US",
"Gender": "Male",
"Description": "Deep male voice, serious and professional",
},
{
"ShortName": "nova",
"Locale": "en-US",
"Gender": "Female",
"Description": "Female voice, energetic and friendly",
},
{
"ShortName": "shimmer",
"Locale": "en-US",
"Gender": "Female",
"Description": "Female voice, warm and expressive",
},
]
def list_voices_for_provider(
provider: str,
language_code: str | None = None
) -> list[dict]:
"""List voices for a specific TTS provider.
Args:
provider: "edge" or "openai"
language_code: Optional language filter (Edge TTS only)
Returns:
List of voice dictionaries
Raises:
ValueError: If provider is unknown
"""
provider = provider.lower()
if provider == "edge":
return list_edge_voices_for_language(language_code)
elif provider == "openai":
return list_openai_voices()
else:
raise ValueError(f"Unknown TTS provider: {provider}")
# Backward compatibility
def list_voices_for_language(language_code: str | None) -> list[dict]:
"""Legacy function for Edge TTS voice listing.
Deprecated: Use list_voices_for_provider("edge", language_code) instead.
"""
return list_edge_voices_for_language(language_code)
def format_voice_entry(voice: dict, provider: str = "edge") -> str:
"""Format a voice entry for display.
Args:
voice: Voice dictionary with metadata
provider: TTS provider ("edge" or "openai")
Returns:
Formatted string for display
"""
locale = voice.get("Locale", "")
short_name = voice.get("ShortName", "")
gender = voice.get("Gender", "")
if provider == "openai":
description = voice.get("Description", "")
return f"{short_name} ({locale}, {gender}) - {description}"
else: # edge
style_list = voice.get("StyleList") or []
styles = ", ".join(style_list) if style_list else "-"
return f"{short_name} ({locale}, {gender}, styles: {styles})"
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/__init__.py | Python | """CLI package for Tepub."""
from cli.main import app
__all__ = ["app"]
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/__init__.py | Python | """CLI commands module."""
import click
from cli.commands.audiobook import audiobook
from cli.commands.config import config
from cli.commands.export import export_command
from cli.commands.extract import extract
from cli.commands.format import format_cmd
from cli.commands.pipeline import pipeline_command
from cli.commands.resume import resume
from cli.commands.translate import translate
def register_commands(app: click.Group) -> None:
"""Register all CLI commands with the app."""
app.add_command(extract)
app.add_command(translate)
app.add_command(audiobook)
app.add_command(export_command)
app.add_command(pipeline_command, name="pipeline")
app.add_command(resume)
app.add_command(format_cmd, name="format")
app.add_command(config)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/audiobook.py | Python | """Audiobook command implementation."""
import os
import sys
from pathlib import Path
import click
from audiobook import run_audiobook
from audiobook.cover import SpineCoverCandidate, find_spine_cover_candidate
from audiobook.language import detect_language
from audiobook.preprocess import segment_to_text
from audiobook.state import load_state as load_audio_state
from audiobook.voices import format_voice_entry, list_voices_for_provider
from cli.core import prepare_settings_for_epub
from cli.errors import handle_state_errors
from config import AppSettings
from console_singleton import get_console
from epub_io.reader import EpubReader
from epub_io.resources import get_item_by_href
from state.base import safe_load_state
from state.models import SegmentsDocument
from state.store import load_segments
console = get_console()
class HelpfulGroup(click.Group):
"""Command group that shows help instead of error for invalid commands."""
def resolve_command(self, ctx, args):
try:
cmd_name, cmd, args = super().resolve_command(ctx, args)
return cmd_name, cmd, args
except click.UsageError:
# Show help and exit on command not found
click.echo(self.get_help(ctx))
ctx.exit(0)
def _write_cover_candidate(
settings: AppSettings,
input_epub: Path,
) -> tuple[Path | None, SpineCoverCandidate | None]:
try:
reader = EpubReader(input_epub, settings)
except Exception:
return None, None
candidate = find_spine_cover_candidate(reader)
if not candidate:
return None, None
try:
item = get_item_by_href(reader.book, Path(candidate.href) if isinstance(candidate.href, str) else candidate.href)
except KeyError:
return None, None
cover_dir = settings.work_dir / "audiobook" / "cover_candidates"
cover_dir.mkdir(parents=True, exist_ok=True)
suffix = Path(candidate.href).suffix or ".img"
target_name = f"spine_{Path(candidate.href).stem or 'candidate'}{suffix}"
candidate_path = cover_dir / target_name
candidate_path.write_bytes(item.get_content())
return candidate_path, candidate
@click.group(cls=HelpfulGroup, invoke_without_command=True)
@click.pass_context
def audiobook(ctx: click.Context) -> None:
"""Audiobook generation and chapter management.
Supports two TTS providers:
- Edge TTS (default): Free, 57+ voices, no API key needed
- OpenAI TTS: Paid (~$15/1M chars), 6 premium voices, requires OPENAI_API_KEY
Subcommands:
- generate: Create audiobook from EPUB file
- export-chapters: Export chapter structure to YAML config
- update-chapters: Update audiobook with new chapter markers from YAML
"""
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help())
ctx.exit(0)
@audiobook.command(name="generate")
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path))
@click.option("--voice", default=None, help="Voice name (provider-specific, skip to choose interactively).")
@click.option("--language", default=None, help="Override detected language (e.g. 'en').")
@click.option("--rate", default=None, help="Optional speaking rate override for Edge TTS, e.g. '+5%'.")
@click.option("--volume", default=None, help="Optional volume override for Edge TTS, e.g. '+2dB'.")
@click.option(
"--tts-provider",
default=None,
type=click.Choice(["edge", "openai"], case_sensitive=False),
help="TTS provider: edge (free, 57+ voices) or openai (paid, 6 premium voices). Default from config.",
)
@click.option(
"--tts-model",
default=None,
help="TTS model for OpenAI: tts-1 (cheaper) or tts-1-hd (higher quality).",
)
@click.option(
"--tts-speed",
default=None,
type=float,
help="Speech speed for OpenAI TTS (0.25-4.0, default 1.0).",
)
@click.option(
"--cover-path",
default=None,
type=click.Path(exists=True, path_type=Path),
help="Optional path to an image file to embed as the audiobook cover.",
)
@click.option(
"--cover-only",
is_flag=True,
help="Skip synthesis and only rebuild the audiobook container with the selected cover.",
)
@click.pass_context
@handle_state_errors
def generate(
ctx: click.Context,
input_epub: Path,
voice: str | None,
rate: str | None,
volume: str | None,
language: str | None,
tts_provider: str | None,
tts_model: str | None,
tts_speed: float | None,
cover_path: Path | None,
cover_only: bool,
) -> None:
"""Generate an audiobook from EPUB file using TTS.
INPUT_EPUB: Path to the EPUB file to convert to audiobook.
Examples:
tepub audiobook generate book.epub
tepub audiobook generate book.epub --tts-provider openai --voice nova
tepub audiobook generate book.epub --voice en-US-GuyNeural --rate '+10%'
"""
settings: AppSettings = ctx.obj["settings"]
settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
# Validate that segments file exists (required for audiobook) - errors handled by decorator
from exceptions import StateFileNotFoundError
if not settings.segments_file.exists():
raise StateFileNotFoundError("segments", input_epub)
# Also validate it's not corrupted
safe_load_state(settings.segments_file, SegmentsDocument, "segments")
# Determine TTS provider (from CLI, config, or stored state)
# Try provider-specific paths first, then fall back to legacy path
stored_voice = None
stored_language = None
stored_cover_path: Path | None = None
stored_provider = None
stored_model = None
stored_speed = None
# Check for stored state in provider-specific folders
edge_state_path = settings.work_dir / "audiobook@edgetts" / "audio_state.json"
openai_state_path = settings.work_dir / "audiobook@openaitts" / "audio_state.json"
legacy_state_path = settings.work_dir / "audiobook" / "audio_state.json"
for state_path in [edge_state_path, openai_state_path, legacy_state_path]:
if state_path.exists():
try:
stored_state = load_audio_state(state_path)
if stored_state:
stored_voice = stored_state.session.voice
stored_language = stored_state.session.language
stored_cover_path = stored_state.session.cover_path
stored_provider = stored_state.session.tts_provider
stored_model = stored_state.session.tts_model
stored_speed = stored_state.session.tts_speed
break # Use the first valid state found
except Exception:
continue
# Determine provider (CLI > config > stored)
# Config should take precedence over stored state so users can change provider
selected_provider = (tts_provider or settings.audiobook_tts_provider or stored_provider).lower()
selected_model = tts_model or settings.audiobook_tts_model or stored_model
selected_speed = (
tts_speed if tts_speed is not None
else (settings.audiobook_tts_speed if settings.audiobook_tts_speed is not None
else stored_speed)
)
# Warn if config changed from stored state provider
if stored_provider and settings.audiobook_tts_provider != stored_provider and not tts_provider:
console.print(
f"[yellow]Note: Provider changed from [bold]{stored_provider}[/bold] to [bold]{settings.audiobook_tts_provider}[/bold] in config. "
f"Starting fresh with {settings.audiobook_tts_provider}.[/yellow]"
)
console.print(f"[cyan]TTS Provider:[/cyan] {selected_provider}")
if selected_provider == "openai":
console.print(f"[cyan]Model:[/cyan] {selected_model or 'tts-1'}")
console.print(f"[cyan]Speed:[/cyan] {selected_speed}")
segments_doc = load_segments(settings.segments_file)
sample_texts: list[str] = []
for segment in segments_doc.segments:
text_sample = segment_to_text(segment)
if text_sample:
sample_texts.append(text_sample)
if len(sample_texts) >= 50:
break
detected_language = language or stored_language or detect_language(sample_texts)
if not language and detected_language:
console.print(f"[cyan]Detected language:[/cyan] {detected_language}")
# Voice selection based on provider
# Priority: CLI > config > stored (if compatible with provider)
selected_voice = voice or settings.audiobook_voice
if not selected_voice:
# Check if stored voice is compatible with selected provider
if stored_voice:
# Edge voices contain hyphens (e.g., en-US-GuyNeural)
# OpenAI voices are simple names (e.g., alloy, nova)
stored_is_edge = "-" in stored_voice
selected_is_edge = selected_provider == "edge"
# Only use stored voice if provider matches
if stored_is_edge == selected_is_edge:
selected_voice = stored_voice
if not selected_voice:
# Get voices for the selected provider
if selected_provider == "edge":
available_voices = list_voices_for_provider("edge", detected_language)
else: # openai
available_voices = list_voices_for_provider("openai")
available_voices = sorted(
available_voices,
key=lambda v: (v.get("Locale", ""), v.get("ShortName", "")),
)
if not available_voices:
# Fallback based on provider
if selected_provider == "edge":
console.print(
"[yellow]No voices found for detected language; falling back to en-US-GuyNeural.[/yellow]"
)
selected_voice = "en-US-GuyNeural"
else: # openai
console.print("[yellow]Falling back to OpenAI 'alloy' voice.[/yellow]")
selected_voice = "alloy"
else:
if selected_voice and any(v.get("ShortName") == selected_voice for v in available_voices):
# Voice was already set from stored state (and validated as compatible)
console.print(f"[cyan]Using stored voice:[/cyan] {selected_voice}")
elif sys.stdin.isatty():
click.echo(f"\nSelect a {selected_provider.upper()} voice:")
default_choice = 1
for idx, voice_info in enumerate(available_voices, start=1):
click.echo(f" {idx}. {format_voice_entry(voice_info, selected_provider)}")
if stored_voice and voice_info.get("ShortName") == stored_voice:
default_choice = idx
choice = click.prompt(
"Voice number",
default=default_choice,
type=click.IntRange(1, len(available_voices)),
)
selected_voice = available_voices[choice - 1]["ShortName"]
console.print(f"[cyan]Using voice:[/cyan] {selected_voice}")
else:
selected_voice = available_voices[0]["ShortName"]
console.print(f"[cyan]Using voice:[/cyan] {selected_voice}")
else:
if stored_voice and selected_voice == stored_voice and not voice:
console.print(f"[cyan]Using stored voice:[/cyan] {selected_voice}")
if not selected_voice:
selected_voice = "alloy" if selected_provider == "openai" else "en-US-GuyNeural"
env_cover_value = os.environ.get("TEPUB_AUDIOBOOK_COVER_PATH")
env_cover_path = None
if env_cover_value:
env_cover_path = Path(env_cover_value).expanduser()
if not env_cover_path.exists():
raise click.UsageError(
f"Cover path from TEPUB_AUDIOBOOK_COVER_PATH does not exist: {env_cover_path}"
)
# Priority: CLI > env > config > stored
selected_cover_path = cover_path or env_cover_path or settings.cover_image_path
# Handle config cover path (may be relative)
if selected_cover_path and not (cover_path or env_cover_path):
# This is from config, convert to Path if needed and resolve relative paths
if not isinstance(selected_cover_path, Path):
selected_cover_path = Path(selected_cover_path)
if not selected_cover_path.is_absolute():
selected_cover_path = settings.work_dir / selected_cover_path
selected_cover_path = selected_cover_path.expanduser()
if not selected_cover_path.exists():
console.print(
f"[yellow]Config cover_image_path not found: {selected_cover_path}. Falling back to auto-detection.[/yellow]"
)
selected_cover_path = None
else:
console.print(f"[cyan]Using config cover:[/cyan] {selected_cover_path}")
if selected_cover_path and not selected_cover_path.exists():
raise click.UsageError(f"Cover path does not exist: {selected_cover_path}")
candidate_path: Path | None = None
candidate_info: SpineCoverCandidate | None = None
if selected_cover_path is None:
if stored_cover_path:
stored_cover_fs = Path(stored_cover_path)
if stored_cover_fs.exists():
selected_cover_path = stored_cover_fs
console.print(f"[cyan]Using stored cover:[/cyan] {stored_cover_fs}")
else:
console.print(
f"[yellow]Stored cover path no longer exists; ignoring {stored_cover_path}.[/yellow]"
)
if selected_cover_path is None:
candidate_path, candidate_info = _write_cover_candidate(settings, input_epub)
if candidate_path and candidate_info:
click.echo(
f"\nDetected cover candidate: {candidate_info.href} (from {candidate_info.document_href})"
)
click.echo(f"Extracted candidate to: {candidate_path}")
if sys.stdin.isatty():
choice = click.prompt(
"Cover selection",
type=click.Choice(["use", "manual", "skip"], case_sensitive=False),
default="use",
)
choice = choice.lower()
if choice == "use":
selected_cover_path = candidate_path
elif choice == "manual":
selected_cover_path = click.prompt(
"Enter path to cover image",
type=click.Path(exists=True, path_type=Path),
)
else:
selected_cover_path = None
else:
selected_cover_path = candidate_path
console.print(
"[cyan]Non-interactive run; using detected cover candidate automatically.[/cyan]"
)
elif sys.stdin.isatty():
if click.confirm(
"No cover candidate detected automatically. Specify a cover image?", default=False
):
selected_cover_path = click.prompt(
"Enter path to cover image",
type=click.Path(exists=True, path_type=Path),
)
run_audiobook(
settings=settings,
input_epub=input_epub,
voice=selected_voice,
language=detected_language,
rate=rate,
volume=volume,
cover_path=selected_cover_path,
cover_only=cover_only,
tts_provider=selected_provider,
tts_model=selected_model,
tts_speed=selected_speed,
)
@audiobook.command(name="export-chapters")
@click.argument("source", type=click.Path(exists=True, path_type=Path))
@click.option(
"--output",
"-o",
type=click.Path(path_type=Path),
help="Output YAML file path (default: chapters.yaml in work_dir)",
)
@click.pass_context
def export_chapters(ctx: click.Context, source: Path, output: Path | None) -> None:
"""Export chapter information to YAML config file.
SOURCE can be either:
- EPUB file (.epub): Extract chapter structure before generation (preview mode)
- M4A audiobook (.m4a): Extract chapter markers from existing audiobook
The exported YAML file can be edited and used with update-chapters command.
"""
from audiobook.chapters import (
extract_chapters_from_epub,
extract_chapters_from_mp4,
write_chapters_yaml,
)
settings: AppSettings = ctx.obj["settings"]
# Determine source type
if source.suffix.lower() == ".epub":
# Preview mode: extract from EPUB
settings = prepare_settings_for_epub(ctx, settings, source, override=None)
# Validate segments file exists
from exceptions import StateFileNotFoundError
if not settings.segments_file.exists():
raise StateFileNotFoundError("segments", source)
console.print(f"[cyan]Extracting chapter structure from EPUB...[/cyan]")
chapters, metadata = extract_chapters_from_epub(source, settings)
# Default output to work_dir/chapters.yaml
if output is None:
output = settings.work_dir / "chapters.yaml"
elif source.suffix.lower() in {".m4a", ".mp4"}:
# Extract from audiobook
console.print(f"[cyan]Reading chapter markers from audiobook...[/cyan]")
chapters, metadata = extract_chapters_from_mp4(source)
# Default output to source directory
if output is None:
output = source.parent / "chapters.yaml"
else:
raise click.UsageError(
f"Unsupported file type: {source.suffix}. Expected .epub or .m4a"
)
# Write YAML
write_chapters_yaml(chapters, metadata, output)
console.print(f"\n[green]✓ Exported {len(chapters)} chapters to:[/green] {output}")
console.print(f"\n[cyan]Edit the file to customize chapter titles/timestamps, then use:[/cyan]")
if source.suffix.lower() == ".epub":
console.print(f" tepub audiobook {source.name}")
console.print(f"[dim](Audiobook generation will use custom titles from chapters.yaml)[/dim]")
else:
console.print(f" tepub audiobook update-chapters {source.name} chapters.yaml")
@audiobook.command(name="update-chapters")
@click.argument("audiobook_file", type=click.Path(exists=True, path_type=Path))
@click.argument("chapters_file", type=click.Path(exists=True, path_type=Path))
@click.pass_context
def update_chapters(ctx: click.Context, audiobook_file: Path, chapters_file: Path) -> None:
"""Update M4A audiobook with chapter markers from YAML config.
AUDIOBOOK_FILE: Path to M4A audiobook file
CHAPTERS_FILE: Path to YAML config file with chapter information
This command updates the chapter markers in an existing audiobook file.
All chapters in the YAML must have timestamps.
"""
from audiobook.chapters import read_chapters_yaml, update_mp4_chapters
# Validate audiobook file
if audiobook_file.suffix.lower() not in {".m4a", ".mp4"}:
raise click.UsageError(
f"Unsupported audiobook format: {audiobook_file.suffix}. Expected .m4a or .mp4"
)
# Read chapters from YAML
console.print(f"[cyan]Loading chapter configuration from:[/cyan] {chapters_file}")
chapters, metadata = read_chapters_yaml(chapters_file)
console.print(f"[cyan]Found {len(chapters)} chapters[/cyan]")
# Update audiobook
console.print(f"[cyan]Updating chapter markers in:[/cyan] {audiobook_file}")
update_mp4_chapters(audiobook_file, chapters)
console.print(f"\n[green]✓ Successfully updated {len(chapters)} chapter markers[/green]")
console.print(f"\n[dim]Chapters:[/dim]")
for i, ch in enumerate(chapters[:5], 1): # Show first 5
start_time = f"{ch.start:.1f}s" if ch.start is not None else "N/A"
console.print(f" {i}. {start_time:>8} - {ch.title}")
if len(chapters) > 5:
console.print(f" ... and {len(chapters) - 5} more")
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/config.py | Python | """Config command implementation."""
from __future__ import annotations
import shutil
from pathlib import Path
import click
from pydantic import ValidationError
from rich.panel import Panel
from rich.table import Table
from rich.tree import Tree
from cli.core import prepare_settings_for_epub
from config import AppSettings
from config.loader import _parse_yaml_file
from console_singleton import get_console
console = get_console()
@click.group()
def config() -> None:
"""Configuration management commands."""
pass
@config.command()
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path), required=False)
@click.option(
"--global",
"use_global",
is_flag=True,
help="Validate global config instead of per-book config.",
)
@click.option(
"--file",
"config_file_path",
type=click.Path(exists=True, path_type=Path),
help="Path to specific config file to validate.",
)
@click.pass_context
def validate(ctx: click.Context, input_epub: Path | None, use_global: bool, config_file_path: Path | None) -> None:
"""Validate configuration file syntax and values.
Examples:
tepub config validate # Validate global config
tepub config validate --global # Validate global config (explicit)
tepub config validate book.epub # Validate per-book config
tepub config validate --file path.yaml # Validate specific file
"""
# Determine which config to validate
if config_file_path:
config_path = config_file_path
config_type = "Custom"
elif use_global or input_epub is None:
config_path = _get_global_config_path()
config_type = "Global"
else:
# Get per-book config path
# We need settings, but it might not be available if global config is broken
# So we'll construct the path manually
settings: AppSettings = ctx.obj.get("settings")
if settings:
temp_settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
config_path = temp_settings.work_dir / "config.yaml"
else:
# Fallback: construct path manually
config_path = input_epub.parent / input_epub.stem / "config.yaml"
config_type = "Per-book"
# Check if config exists
if not config_path.exists():
console.print(Panel(
f"[yellow]Config file not found:[/yellow]\n{config_path}",
title="⚠ Configuration Not Found",
border_style="yellow",
))
if not use_global and input_epub:
console.print(f"\n[dim]Hint: Run [bold]tepub extract {input_epub.name}[/bold] to create the config file.[/dim]")
raise click.Abort()
# Display header
console.print()
console.print(Panel(
f"[bold]{config_type} Configuration Validation[/bold]\n"
f"[dim]File: {config_path}[/dim]",
border_style="cyan",
))
console.print()
# Parse YAML
try:
yaml_data = _parse_yaml_file(config_path)
except Exception as e:
console.print(Panel(
f"[red]YAML Syntax Error:[/red]\n{str(e)}",
title="✗ Validation Failed",
border_style="red",
))
raise click.Abort()
if not yaml_data:
yaml_data = {}
# Validate with Pydantic
validation_results = []
errors_by_field = {}
try:
# Try to create AppSettings from the YAML data
if use_global or input_epub is None:
# For global config, validate as standalone settings
# Use a temporary work_dir to satisfy required fields
temp_data = yaml_data.copy()
if "work_dir" not in temp_data:
temp_data["work_dir"] = "~/.tepub"
validated_settings = AppSettings(**temp_data)
else:
# For per-book config, overlay on existing settings
validated_settings = settings.model_copy(update=yaml_data)
# If we get here, validation passed
success = True
except ValidationError as e:
success = False
# Parse validation errors
for error in e.errors():
field_path = ".".join(str(loc) for loc in error["loc"])
errors_by_field[field_path] = error["msg"]
except Exception as e:
console.print(Panel(
f"[red]Validation Error:[/red]\n{str(e)}",
title="✗ Validation Failed",
border_style="red",
))
raise click.Abort()
# Additional validation: Check voice spelling
if "audiobook_voice" in yaml_data and yaml_data["audiobook_voice"]:
provider = yaml_data.get("audiobook_tts_provider", "edge")
voice_value = yaml_data["audiobook_voice"]
try:
from audiobook.voices import list_voices_for_provider
voices = list_voices_for_provider(provider)
valid_names = [v["ShortName"] for v in voices]
if voice_value not in valid_names:
errors_by_field["audiobook_voice"] = (
f"Invalid voice '{voice_value}' for provider '{provider}'. "
f"Valid voices: {', '.join(valid_names)}"
)
success = False
except Exception as voice_err:
# If voice listing fails, add warning but don't fail validation
console.print(f"[yellow]Warning: Could not validate voice: {voice_err}[/yellow]")
# Build validation results tree
tree = Tree("📝 Configuration Fields", guide_style="dim")
# Group fields by category
categories = {
"Translation Settings": [
"source_language", "target_language", "translation_workers",
"prompt_preamble", "output_mode", "translation_files"
],
"Audiobook Settings": [
"audiobook_tts_provider", "audiobook_tts_model", "audiobook_tts_speed",
"audiobook_voice", "audiobook_workers", "audiobook_files",
"audiobook_opening_statement", "audiobook_closing_statement",
"cover_image_path"
],
"Provider Settings": [
"primary_provider", "fallback_provider", "providers"
],
"Skip Rules": [
"skip_rules", "skip_after_back_matter"
],
"Directories": [
"work_root", "work_dir"
],
}
valid_count = 0
invalid_count = 0
for category, field_names in categories.items():
# Check if any fields in this category are present in yaml_data
category_fields = []
for field_name in field_names:
if field_name in yaml_data:
category_fields.append(field_name)
if not category_fields:
continue # Skip empty categories
category_branch = tree.add(f"[bold cyan]{category}[/bold cyan]")
for field_name in category_fields:
field_value = yaml_data[field_name]
# Check if this field has errors
if field_name in errors_by_field:
invalid_count += 1
error_msg = errors_by_field[field_name]
field_branch = category_branch.add(
f"[red]✗ {field_name}[/red]: {_format_value(field_value)}"
)
field_branch.add(f"[red]└─ Error: {error_msg}[/red]")
else:
valid_count += 1
category_branch.add(
f"[green]✓ {field_name}[/green]: {_format_value(field_value)}"
)
# Show the tree
console.print(tree)
console.print()
# Show summary
total_fields = valid_count + invalid_count
if success:
summary_panel = Panel(
f"[bold]Total fields:[/bold] {total_fields}\n"
f"[green]Valid:[/green] {valid_count} ✓\n"
f"[red]Invalid:[/red] {invalid_count} ✗\n\n"
f"[bold green]Status: PASSED ✓[/bold green]",
title="Validation Summary",
border_style="green",
)
else:
summary_panel = Panel(
f"[bold]Total fields:[/bold] {total_fields}\n"
f"[green]Valid:[/green] {valid_count} ✓\n"
f"[red]Invalid:[/red] {invalid_count} ✗\n\n"
f"[bold red]Status: FAILED ✗[/bold red]",
title="Validation Summary",
border_style="red",
)
console.print(summary_panel)
console.print()
if not success:
raise click.Abort()
@config.command()
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path), required=False)
@click.option(
"--global",
"use_global",
is_flag=True,
help="Reset global config.",
)
@click.option(
"--file",
"config_file_path",
type=click.Path(path_type=Path),
help="Path to specific config file to reset.",
)
@click.option(
"--force",
is_flag=True,
help="Skip confirmation prompt.",
)
@click.option(
"--backup",
is_flag=True,
help="Create .bak backup before resetting.",
)
@click.pass_context
def reset(
ctx: click.Context,
input_epub: Path | None,
use_global: bool,
config_file_path: Path | None,
force: bool,
backup: bool,
) -> None:
"""Reset configuration file to default template.
Examples:
tepub config reset --global # Reset global config
tepub config reset book.epub # Reset per-book config
tepub config reset --file config.yaml # Reset specific file
"""
# Determine target config
if config_file_path:
config_path = config_file_path
config_type = "Custom"
is_per_book = False
elif use_global or input_epub is None:
config_path = _get_global_config_path()
config_type = "Global"
is_per_book = False
else:
# Per-book config
settings: AppSettings = ctx.obj.get("settings")
if settings:
temp_settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
config_path = temp_settings.work_dir / "config.yaml"
else:
# Fallback
config_path = input_epub.parent / input_epub.stem / "config.yaml"
config_type = "Per-book"
is_per_book = True
# Check if file exists
if not config_path.exists():
console.print(Panel(
f"[yellow]Config file does not exist:[/yellow]\n{config_path}",
title="⚠ File Not Found",
border_style="yellow",
))
raise click.Abort()
# Confirmation
if not force:
console.print()
console.print(Panel(
f"[yellow]This will reset {config_type.lower()} config to default template:[/yellow]\n"
f"[bold]{config_path}[/bold]\n\n"
f"[red]All current settings will be lost![/red]",
title="⚠ Confirmation Required",
border_style="yellow",
))
console.print()
if not click.confirm("Continue?", default=False):
console.print("[cyan]Reset cancelled.[/cyan]")
raise click.Abort()
# Backup
if backup:
backup_path = config_path.parent / f"{config_path.name}.bak"
shutil.copy2(config_path, backup_path)
console.print(f"[green]✓ Backup created: {backup_path}[/green]")
# Reset based on type
if is_per_book and input_epub and input_epub.exists():
# Per-book config - regenerate from extraction
from config.templates import create_book_config_template
from state.store import load_segments
# Get settings for this book
settings = ctx.obj.get("settings")
if settings:
temp_settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
else:
# Minimal settings
from config import AppSettings
temp_settings = AppSettings(work_dir=config_path.parent)
# Check if segments exist
if not temp_settings.segments_file.exists():
console.print(
f"[red]Error: segments.json not found. Run [bold]tepub extract {input_epub.name}[/bold] first.[/red]"
)
raise click.Abort()
# Load metadata from segments
segments_doc = load_segments(temp_settings.segments_file)
metadata = {
"title": segments_doc.book_title,
"author": segments_doc.book_author,
"publisher": segments_doc.book_publisher,
"year": segments_doc.book_year,
}
# Recreate config
config_path.unlink() # Remove old
create_book_config_template(
temp_settings.work_dir,
input_epub.name,
metadata,
segments_doc,
input_epub
)
else:
# Global config - write default template
_write_global_config_template(config_path)
console.print()
console.print(Panel(
f"[green]Configuration reset successfully![/green]\n"
f"[dim]{config_path}[/dim]",
title="✓ Reset Complete",
border_style="green",
))
def _write_global_config_template(config_path: Path) -> None:
"""Write default global config template."""
template = """# Global TEPUB Configuration
# Location: ~/.tepub/config.yaml
# This file sets default settings for all books.
# Per-book configs (created by 'tepub extract') override these settings.
# ============================================================
# Translation Settings
# ============================================================
# Source and target languages
source_language: auto # auto-detect or specify (e.g., English, Japanese)
target_language: Simplified Chinese
# Parallel processing
translation_workers: 3 # Number of parallel translation workers
# ============================================================
# Translation Provider
# ============================================================
# Primary translation provider
primary_provider:
name: openai # openai, anthropic, gemini, grok, deepl, ollama
model: gpt-4o # Model name for the provider
# Optional fallback provider (if primary fails)
# fallback_provider:
# name: anthropic
# model: claude-3-5-sonnet-20241022
# ============================================================
# Audiobook Settings
# ============================================================
# TTS provider
audiobook_tts_provider: edge # edge (free) or openai (paid)
# Parallel processing
audiobook_workers: 3 # Number of parallel audiobook workers
# ============================================================
# Content Filtering (Skip Rules)
# ============================================================
# Files matching these keywords will be skipped during extraction
# skip_rules:
# - keyword: cover
# - keyword: copyright
# - keyword: dedication
# - keyword: acknowledgment
"""
config_path.write_text(template, encoding="utf-8")
def _get_global_config_path() -> Path:
"""Get path to global config file."""
return Path.home() / ".tepub" / "config.yaml"
def _format_value(value) -> str:
"""Format a config value for display."""
if value is None:
return "[dim]null[/dim]"
elif isinstance(value, bool):
return f"[bold]{value}[/bold]"
elif isinstance(value, (int, float)):
return f"[cyan]{value}[/cyan]"
elif isinstance(value, str):
if len(value) > 50:
return f"[yellow]\"{value[:47]}...\"[/yellow]"
return f"[yellow]\"{value}\"[/yellow]"
elif isinstance(value, dict):
return f"[magenta]{{...}}[/magenta] [dim]({len(value)} keys)[/dim]"
elif isinstance(value, list):
return f"[magenta][...] [/magenta][dim]({len(value)} items)[/dim]"
else:
return f"[dim]{type(value).__name__}[/dim]"
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/export.py | Python | """Export command implementation."""
from pathlib import Path
import click
from cli.core import (
create_web_archive,
derive_epub_paths,
prepare_settings_for_epub,
resolve_export_flags,
)
from cli.errors import handle_state_errors
from config import AppSettings
from console_singleton import get_console
from injection.engine import run_injection
from webbuilder import export_web
console = get_console()
def _run_exports(
settings: AppSettings,
input_epub: Path,
export_epub: bool,
web: bool,
output_epub: Path | None,
) -> None:
"""Run EPUB and/or web exports based on flags.
This shared function consolidates export logic used by both
pipeline and export commands.
"""
if not export_epub and not web:
console.print("[yellow]No export option selected. Use --epub or --web.[/yellow]")
return
web_dir: Path | None = None
web_archive: Path | None = None
if web:
web_dir = export_web(settings, input_epub, output_mode=settings.output_mode)
web_archive = create_web_archive(web_dir)
if export_epub:
bilingual_epub, translated_epub = derive_epub_paths(
input_epub, output_epub, settings.work_dir
)
if output_epub is None:
console.print(
f"[yellow]No --output-epub provided; writing bilingual EPUB to {bilingual_epub}[/yellow]"
)
updated_html, _ = run_injection(
settings=settings,
input_epub=input_epub,
output_epub=bilingual_epub,
mode="bilingual",
)
if updated_html:
run_injection(
settings=settings,
input_epub=input_epub,
output_epub=translated_epub,
mode="translated_only",
)
console.print(f"[green]Exported bilingual EPUB to {bilingual_epub}[/green]")
console.print(f"[green]Exported translated EPUB to {translated_epub}[/green]")
else:
console.print("[yellow]No translated segments found. EPUB export skipped.[/yellow]")
if web_archive:
console.print(f"[green]Exported web archive to {web_archive}[/green]")
@click.command(name="export")
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path))
@click.option(
"--epub",
"epub_flag",
is_flag=True,
help="Export only the translated EPUB output.",
)
@click.option(
"--web",
"web_flag",
is_flag=True,
help="Export only the web version.",
)
@click.option(
"--output-epub",
type=click.Path(path_type=Path),
help="Optional EPUB output path; defaults to creating '<name>_bilingual.epub' and '<name>_translated.epub'.",
)
@click.option(
"--output-mode",
type=click.Choice(["bilingual", "translated-only"], case_sensitive=False),
help="Select bilingual (default) or translated-only EPUB output.",
)
@click.pass_context
@handle_state_errors
def export_command(
ctx: click.Context,
input_epub: Path,
epub_flag: bool,
web_flag: bool,
output_epub: Path | None,
output_mode: str | None,
) -> None:
"""Export translated outputs (EPUB by default)."""
settings: AppSettings = ctx.obj["settings"]
settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
if output_mode:
settings = settings.model_copy(update={"output_mode": output_mode.replace("-", "_")})
ctx.obj["settings"] = settings
# Validate that required state files exist (errors handled by decorator)
settings.validate_for_export(input_epub)
export_epub, web = resolve_export_flags(epub_flag, web_flag)
# _run_exports may also raise CorruptedStateError (handled by decorator)
_run_exports(settings, input_epub, export_epub, web, output_epub)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/extract.py | Python | """Extract command implementation."""
from pathlib import Path
import click
from cli.core import prepare_settings_for_epub
from config import AppSettings, create_book_config_template
from console_singleton import get_console
from debug_tools.extraction_summary import print_extraction_summary
from extraction.epub_export import extract_epub_structure, get_epub_metadata_files
from extraction.image_export import extract_images, get_image_mapping
from extraction.markdown_export import export_combined_markdown, export_to_markdown
from extraction.pipeline import run_extraction
from state.store import load_segments
console = get_console()
@click.command()
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path))
@click.option(
"--output",
type=click.Path(path_type=Path),
help="Optional work directory override for this extraction run.",
)
@click.option(
"--include-back-matter",
is_flag=True,
help="Include back-matter continuation pages (index, notes, etc.). By default, files after back-matter triggers are skipped.",
)
@click.pass_context
def extract(
ctx: click.Context, input_epub: Path, output: Path | None, include_back_matter: bool
) -> None:
"""Extract segments from the EPUB file."""
settings: AppSettings = ctx.obj["settings"]
settings = prepare_settings_for_epub(ctx, settings, input_epub, output)
# Apply --include-back-matter flag
if include_back_matter:
settings = settings.model_copy(update={"skip_after_back_matter": False})
run_extraction(settings=settings, input_epub=input_epub)
console.print(f"[green]Segments written to {settings.segments_file}[/green]")
# Load extracted metadata and create config.yaml with filled values
segments_doc = load_segments(settings.segments_file)
metadata = {
"title": segments_doc.book_title,
"author": segments_doc.book_author,
"publisher": segments_doc.book_publisher,
"year": segments_doc.book_year,
}
create_book_config_template(
settings.work_dir, input_epub.name, metadata, segments_doc, input_epub
)
print_extraction_summary(settings, epub_path=input_epub)
# Extract complete EPUB structure
epub_raw_dir = settings.work_dir / "epub_raw"
try:
structure_mapping = extract_epub_structure(
input_epub, epub_raw_dir, preserve_structure=True
)
console.print(
f"[green]Extracted complete EPUB structure ({len(structure_mapping)} files) to {epub_raw_dir.relative_to(Path.cwd()) if epub_raw_dir.is_relative_to(Path.cwd()) else epub_raw_dir}[/green]"
)
# Show key metadata files
metadata_files = get_epub_metadata_files(structure_mapping)
if metadata_files:
console.print("[cyan]Key EPUB files extracted:[/cyan]")
for key, path in sorted(metadata_files.items()):
rel_path = path.relative_to(epub_raw_dir)
console.print(f" {key}: {rel_path}")
except Exception as e:
console.print(f"[yellow]Warning: Could not extract EPUB structure: {e}[/yellow]")
# Extract images to markdown/images directory
markdown_dir = settings.work_dir / "markdown"
images_dir = markdown_dir / "images"
extracted_images = extract_images(settings, input_epub, images_dir)
image_mapping = get_image_mapping(extracted_images)
if extracted_images:
console.print(f"[green]Extracted {len(extracted_images)} images to {images_dir}[/green]")
# Report cover candidates
cover_candidates = [img for img in extracted_images if img.is_cover_candidate]
if cover_candidates:
console.print("[cyan]Potential cover candidates:[/cyan]")
for img in cover_candidates[:3]: # Show top 3
console.print(f" - {img.extracted_path.name}")
# Export markdown files with image references
created_files = export_to_markdown(settings, input_epub, markdown_dir, image_mapping)
console.print(f"[green]Exported {len(created_files)} markdown files to {markdown_dir}[/green]")
# Export combined markdown file
combined_file = export_combined_markdown(settings, input_epub, markdown_dir, image_mapping)
console.print(f"[green]Created combined markdown: {combined_file.name}[/green]")
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/format.py | Python | """Format command implementation."""
import click
from config import AppSettings
from console_singleton import get_console
from state.store import load_state, save_state
from translation.polish import polish_state, target_is_chinese
console = get_console()
@click.command()
@click.pass_context
def format_cmd(ctx: click.Context) -> None:
"""Format translated text for Chinese typography."""
settings: AppSettings = ctx.obj["settings"]
settings.ensure_directories()
if not target_is_chinese(settings.target_language):
console.print("[yellow]Target language is not Chinese; nothing to format.[/yellow]")
return
try:
state = load_state(settings.state_file)
except FileNotFoundError:
console.print("[red]State file not found. Run extract/translate first.[/red]")
return
polished = polish_state(state)
if polished.model_dump() == state.model_dump():
console.print("[green]Translations already formatted. No changes made.[/green]")
return
save_state(polished, settings.state_file)
console.print(f"[green]Formatted translations saved to {settings.state_file}[/green]")
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/pipeline.py | Python | """Pipeline command implementation."""
from pathlib import Path
import click
from cli.commands.export import _run_exports
from cli.core import check_pipeline_artifacts, prepare_settings_for_epub, resolve_export_flags
from config import AppSettings
from console_singleton import get_console
from extraction.pipeline import run_extraction
from translation.controller import run_translation
from translation.languages import normalize_language
console = get_console()
@click.command()
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path))
@click.option("--from", "source_language", default=None, help="Source language (code or name).")
@click.option("--to", "target_language", default=None, help="Target language (code or name).")
@click.option(
"--epub",
"epub_flag",
is_flag=True,
help="Export only the translated EPUB output.",
)
@click.option(
"--web",
"web_flag",
is_flag=True,
help="Export only the web version.",
)
@click.option(
"--output-epub",
type=click.Path(path_type=Path),
help="Optional EPUB output path; defaults to creating '<name>_bilingual.epub' and '<name>_translated.epub'.",
)
@click.option(
"--output-mode",
type=click.Choice(["bilingual", "translated-only"], case_sensitive=False),
help="Select bilingual (default) or translated-only EPUB output.",
)
@click.pass_context
def pipeline_command(
ctx: click.Context,
input_epub: Path,
source_language: str | None,
target_language: str | None,
epub_flag: bool,
web_flag: bool,
output_epub: Path | None,
output_mode: str | None,
) -> None:
"""Run extraction, translation, and export in one go."""
settings: AppSettings = ctx.obj["settings"]
settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
if output_mode:
settings = settings.model_copy(update={"output_mode": output_mode.replace("-", "_")})
ctx.obj["settings"] = settings
if check_pipeline_artifacts(settings, input_epub):
console.print(
f"[cyan]Resuming with existing extraction at {settings.segments_file}; using state {settings.state_file}[/cyan]"
)
else:
run_extraction(settings=settings, input_epub=input_epub)
source_pref = source_language or settings.source_language
target_pref = target_language or settings.target_language
source_code, _ = normalize_language(source_pref)
target_code, _ = normalize_language(target_pref)
settings = settings.model_copy(
update={
"source_language": source_pref,
"target_language": target_pref,
}
)
ctx.obj["settings"] = settings
run_translation(
settings=settings,
input_epub=input_epub,
source_language=source_code,
target_language=target_code,
)
export_epub, web = resolve_export_flags(epub_flag, web_flag)
_run_exports(settings, input_epub, export_epub, web, output_epub)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/resume.py | Python | """Resume command implementation."""
import click
from rich.table import Table
from config import AppSettings
from console_singleton import get_console
from state.resume import load_resume_info
console = get_console()
@click.command()
@click.pass_context
def resume(ctx: click.Context) -> None:
"""Show resumable state summary."""
settings: AppSettings = ctx.obj["settings"]
info = load_resume_info(settings.state_file)
table = Table(title="Translation Resume Info")
table.add_column("Category")
table.add_column("Count")
table.add_row("Remaining", str(len(info.remaining_segments)))
table.add_row("Completed", str(len(info.completed_segments)))
table.add_row("Skipped", str(len(info.skipped_segments)))
console.print(table)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/commands/translate.py | Python | """Translate command implementation."""
from pathlib import Path
import click
from cli.core import prepare_settings_for_epub
from cli.errors import handle_state_errors
from config import AppSettings
from translation.controller import run_translation
from translation.languages import normalize_language
@click.command()
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path))
@click.option("--from", "source_language", help="Source language (code or name).", default=None)
@click.option(
"--to",
"target_language",
help="Target language (code or name).",
default=None,
)
@click.pass_context
@handle_state_errors
def translate(
ctx: click.Context,
input_epub: Path,
source_language: str | None,
target_language: str | None,
) -> None:
"""Translate pending segments using configured provider."""
settings: AppSettings = ctx.obj["settings"]
settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
# Validate that required files exist for translation (errors handled by decorator)
settings.validate_for_translation(input_epub)
source_pref = source_language or settings.source_language
target_pref = target_language or settings.target_language
source_code, _source_display = normalize_language(source_pref)
target_code, _target_display = normalize_language(target_pref)
settings = settings.model_copy(
update={
"source_language": source_pref,
"target_language": target_pref,
}
)
ctx.obj["settings"] = settings
run_translation(
settings=settings,
input_epub=input_epub,
source_language=source_code,
target_language=target_code,
)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/core.py | Python | """Shared CLI utilities and common operations."""
from __future__ import annotations
import logging
import shutil
from pathlib import Path
import click
from config import AppSettings, load_settings_from_cli
from logging_utils.logger import configure_logging
from state.store import load_segments, load_state
def prepare_initial_settings(
config_file: str | None, work_dir: Path | None, verbose: bool
) -> AppSettings:
"""Initialize settings from CLI arguments.
Args:
config_file: Optional path to config file
work_dir: Optional work directory override
verbose: Enable verbose logging
Returns:
Configured AppSettings instance
"""
configure_logging()
if verbose:
configure_logging(level=logging.DEBUG)
settings = load_settings_from_cli(config_file)
if work_dir:
settings = settings.model_copy(update={"work_dir": work_dir})
# Note: ensure_directories() is called later in prepare_settings_for_epub()
# after the workspace is properly configured
return settings
def prepare_settings_for_epub(
ctx: click.Context, settings: AppSettings, input_epub: Path, override: Path | None
) -> AppSettings:
"""Prepare settings for a specific EPUB file.
Args:
ctx: Click context
settings: Base settings
input_epub: Path to EPUB file
override: Optional work directory override
Returns:
Settings configured for the specific EPUB
"""
base_override = override or ctx.obj.get("work_dir_override_path")
if base_override:
settings = settings.with_override_root(base_override, input_epub)
ctx.obj["work_dir_overridden"] = True
ctx.obj["work_dir_override_path"] = base_override
elif not ctx.obj.get("work_dir_overridden", False):
settings = settings.with_book_workspace(input_epub)
settings.ensure_directories()
ctx.obj["settings"] = settings
return settings
def resolve_export_flags(epub_flag: bool, web_flag: bool) -> tuple[bool, bool]:
"""Determine which exports to run based on user flags.
Args:
epub_flag: User requested EPUB export
web_flag: User requested web export
Returns:
Tuple of (export_epub, export_web) booleans
"""
if not epub_flag and not web_flag:
# Default: export both
return True, True
return epub_flag, web_flag
def derive_epub_paths(
input_epub: Path, requested: Path | None, work_dir: Path
) -> tuple[Path, Path]:
"""Derive bilingual and translated EPUB output paths.
Args:
input_epub: Source EPUB path
requested: User-requested output path (optional)
work_dir: Workspace directory
Returns:
Tuple of (bilingual_path, translated_path)
"""
if requested:
bilingual = requested
else:
# Export to workspace directory, not alongside EPUB
bilingual = work_dir / f"{input_epub.stem}_bilingual{input_epub.suffix}"
stem = bilingual.stem
if stem.endswith("_bilingual"):
base_stem = stem[: -len("_bilingual")]
else:
base_stem = stem
translated = bilingual.with_name(f"{base_stem}_translated{bilingual.suffix}")
return bilingual, translated
def create_web_archive(web_dir: Path) -> Path:
"""Create ZIP archive of web export.
Args:
web_dir: Directory containing web export
Returns:
Path to created ZIP archive
"""
base_name = web_dir.parent / web_dir.name
archive = shutil.make_archive(
str(base_name), "zip", root_dir=web_dir.parent, base_dir=web_dir.name
)
return Path(archive)
def check_pipeline_artifacts(settings: AppSettings, input_epub: Path) -> bool:
"""Check if valid pipeline artifacts exist for resuming.
Args:
settings: Application settings
input_epub: EPUB file path
Returns:
True if valid artifacts exist, False otherwise
"""
segments_path = settings.segments_file
state_path = settings.state_file
if not segments_path.exists() or not state_path.exists():
return False
try:
segments_doc = load_segments(segments_path)
state_doc = load_state(state_path)
except Exception:
return False
# Validate EPUB path matches
try:
saved_epub_path = Path(segments_doc.epub_path)
except TypeError:
saved_epub_path = Path(str(segments_doc.epub_path))
try:
if saved_epub_path.resolve() != input_epub.resolve():
return False
except Exception:
if saved_epub_path != input_epub:
return False
# Validate segments match state
segment_ids = {segment.segment_id for segment in segments_doc.segments}
if not segment_ids:
return False
if not segment_ids.issubset(state_doc.segments.keys()):
return False
return True
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/debug/__init__.py | Python | """Debug command group and registration."""
import click
from .commands import (
analyze_skips,
inspect_segment_cmd,
list_files_cmd,
preview_skips,
purge_refusals,
show_pending_cmd,
show_skip_list_cmd,
workspace,
)
@click.group()
@click.pass_context
def debug(ctx: click.Context) -> None: # pragma: no cover - primarily used interactively
"""Debugging utilities for inspecting pipeline state."""
pass
def register_debug_commands(app: click.Group) -> None:
"""Register debug group with all debug commands."""
# Add all debug subcommands
debug.add_command(show_skip_list_cmd)
debug.add_command(show_pending_cmd)
debug.add_command(purge_refusals)
debug.add_command(inspect_segment_cmd)
debug.add_command(list_files_cmd)
debug.add_command(preview_skips)
debug.add_command(workspace)
debug.add_command(analyze_skips)
# Register debug group to main app
app.add_command(debug)
__all__ = ["debug", "register_debug_commands"]
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/debug/commands.py | Python | """Debug command implementations."""
from __future__ import annotations
from pathlib import Path
import click
from cli.core import prepare_settings_for_epub
from config import AppSettings
from console_singleton import get_console
from state.models import SegmentStatus, TranslationRecord
from state.store import load_state, save_state
from translation.refusal_filter import looks_like_refusal
console = get_console()
@click.command("show-skip-list")
@click.pass_context
def show_skip_list_cmd(ctx: click.Context) -> None:
"""Show configured skip rules."""
from debug_tools.skip_lists import show_skip_list
settings: AppSettings = ctx.obj["settings"]
show_skip_list(settings)
@click.command("show-pending")
@click.pass_context
def show_pending_cmd(ctx: click.Context) -> None:
"""Show pending segments."""
from debug_tools.pending import show_pending
settings: AppSettings = ctx.obj["settings"]
show_pending(settings)
@click.command("purge-refusals")
@click.option("--dry-run", is_flag=True, help="Only report matches without modifying state.")
@click.pass_context
def purge_refusals(ctx: click.Context, dry_run: bool) -> None:
"""Reset segments whose translations look like provider refusals."""
settings: AppSettings = ctx.obj["settings"]
try:
state = load_state(settings.state_file)
except FileNotFoundError:
console.print("[red]State file not found. Run extract/translate first.[/red]")
return
matches: list[str] = []
for segment_id, record in state.segments.items():
if record.translation and looks_like_refusal(record.translation):
matches.append(segment_id)
if dry_run:
continue
payload = record.model_dump()
payload.update(
{
"translation": None,
"status": SegmentStatus.PENDING,
"provider_name": None,
"model_name": None,
"error_message": None,
}
)
state.segments[segment_id] = TranslationRecord.model_validate(payload)
if not matches:
console.print("[green]No refusal-like translations found.[/green]")
return
if dry_run:
console.print(f"[yellow]Found {len(matches)} refusal-like segments (dry run).[/yellow]")
for segment_id in matches:
console.print(f" - {segment_id}")
return
save_state(state, settings.state_file)
console.print(
f"[green]Reset {len(matches)} segments to pending; rerun translate to retry them.[/green]"
)
@click.command("inspect-segment")
@click.argument("segment_id")
@click.pass_context
def inspect_segment_cmd(ctx: click.Context, segment_id: str) -> None:
"""Inspect a specific segment."""
from debug_tools.inspect import inspect_segment
settings: AppSettings = ctx.obj["settings"]
inspect_segment(settings, segment_id)
@click.command("list-files")
@click.pass_context
def list_files_cmd(ctx: click.Context) -> None:
"""List all processed files."""
from debug_tools.files import list_files
settings: AppSettings = ctx.obj["settings"]
list_files(settings)
@click.command("preview-skip-candidates")
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path))
@click.pass_context
def preview_skips(ctx: click.Context, input_epub: Path) -> None:
"""Preview skip candidates for an EPUB."""
from debug_tools.preview import preview_skip_candidates
settings: AppSettings = ctx.obj["settings"]
settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
preview_skip_candidates(settings, input_epub)
@click.command("workspace")
@click.argument("input_epub", type=click.Path(exists=True, path_type=Path))
@click.pass_context
def workspace(ctx: click.Context, input_epub: Path) -> None:
"""Show workspace paths for an EPUB."""
settings: AppSettings = ctx.obj["settings"]
preview_settings = prepare_settings_for_epub(ctx, settings, input_epub, override=None)
console.print(f"Base work root: {preview_settings.work_root}", soft_wrap=True)
console.print(f"Derived workspace: {preview_settings.work_dir}", soft_wrap=True)
console.print(f"Segments file: {preview_settings.segments_file}", soft_wrap=True)
console.print(f"State file: {preview_settings.state_file}", soft_wrap=True)
@click.command("analyze-skips")
@click.option(
"--library",
type=click.Path(path_type=Path),
help="Directory or EPUB to analyze (defaults to ~/Ultimate/epub).",
)
@click.option("--limit", type=int, help="Process at most N EPUB files.")
@click.option(
"--top-n",
type=int,
default=15,
show_default=True,
help="Number of unmatched TOC titles to list.",
)
@click.option(
"--report",
type=click.Path(path_type=Path),
help="Optional JSON file for detailed results.",
)
@click.pass_context
def analyze_skips(
ctx: click.Context,
library: Path | None,
limit: int | None,
top_n: int,
report: Path | None,
) -> None:
"""Analyze skip rules across an EPUB library."""
from debug_tools.analysis import analyze_library
settings: AppSettings = ctx.obj["settings"]
target_library = library or (Path.home() / "Ultimate" / "epub")
analyze_library(settings, target_library, limit=limit, top_n=top_n, report_path=report)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/errors.py | Python | """Centralized error handling decorators for CLI commands."""
from collections.abc import Callable
from functools import wraps
from typing import TypeVar, cast
import click
from console_singleton import get_console
from exceptions import (
CorruptedStateError,
StateFileNotFoundError,
WorkspaceNotFoundError,
)
console = get_console()
F = TypeVar("F", bound=Callable)
def handle_state_errors(func: F) -> F:
"""Decorator to standardize state-related error handling.
Catches StateFileNotFoundError, WorkspaceNotFoundError, and CorruptedStateError,
prints them in red, and exits with code 1.
Usage:
@click.command()
@handle_state_errors
def my_command(ctx: click.Context):
settings.validate_for_translation(input_epub) # May raise state errors
...
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (StateFileNotFoundError, WorkspaceNotFoundError, CorruptedStateError) as e:
console.print(f"[red]{e}[/red]")
raise click.exceptions.Exit(1)
return cast(F, wrapper)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/cli/main.py | Python | """Tepub CLI main entry point."""
from __future__ import annotations
from pathlib import Path
import click
from cli.commands import register_commands
from cli.core import prepare_initial_settings
from cli.debug import register_debug_commands
from console_singleton import configure_console, get_console
console = get_console()
class DefaultCommandGroup(click.Group):
"""Click group that supports a default command."""
def __init__(self, *args, default_command: str | None = None, **kwargs):
self.default_command = default_command
super().__init__(*args, **kwargs)
def parse_args(self, ctx: click.Context, args: list[str]) -> list[str]:
if self.default_command and args:
first = args[0]
if first not in self.commands and not first.startswith("-"):
args.insert(0, self.default_command)
return super().parse_args(ctx, args)
@click.group(cls=DefaultCommandGroup, default_command="pipeline")
@click.option(
"--config",
"config_file",
type=click.Path(exists=True, path_type=Path),
help="Path to config.yaml file.",
)
@click.option(
"--work-dir",
"work_dir",
type=click.Path(path_type=Path),
help="Override top-level work directory for all operations.",
)
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Enable verbose logging for debugging.",
)
@click.option(
"-q",
"--quiet",
is_flag=True,
help="Suppress all console output.",
)
@click.pass_context
def app(
ctx: click.Context,
config_file: Path | None,
work_dir: Path | None,
verbose: bool,
quiet: bool,
) -> None:
"""Tepub: EPUB Bilingual Translator & Multi-format Exporter."""
configure_console(quiet=quiet, verbose=verbose)
settings = prepare_initial_settings(config_file, work_dir, verbose)
ctx.ensure_object(dict)
ctx.obj["settings"] = settings
# Register all commands
register_commands(app)
register_debug_commands(app)
if __name__ == "__main__":
app()
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/config/__init__.py | Python | """Configuration module for tepub.
This module provides configuration management, workspace handling,
and template generation for the tepub EPUB translation tool.
"""
from __future__ import annotations
from config.loader import load_settings, load_settings_from_cli
from config.models import AppSettings, ProviderConfig, RateLimitConfig, RetryConfig, SkipRule
from config.templates import create_book_config_template
from config.workspace import build_workspace_name
# Import workspace module to attach methods to AppSettings
import config.workspace # noqa: F401
__all__ = [
# Models
"AppSettings",
"ProviderConfig",
"SkipRule",
"RetryConfig",
"RateLimitConfig",
# Loaders
"load_settings",
"load_settings_from_cli",
# Workspace
"build_workspace_name",
# Templates
"create_book_config_template",
]
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/config/loader.py | Python | from __future__ import annotations
import os
from pathlib import Path
from typing import Any
from config.models import AppSettings
try:
import yaml
except Exception: # pragma: no cover - optional dependency
yaml = None
def _parse_env_file(path: Path) -> dict[str, str]:
"""Parse .env file into dictionary."""
data: dict[str, str] = {}
for raw_line in path.read_text(encoding="utf-8").splitlines():
line = raw_line.strip()
if not line or line.startswith("#"):
continue
key, _, value = line.partition("=")
data[key.strip()] = value.strip().strip("'").strip('"')
return data
def _parse_yaml_file(path: Path) -> dict[str, Any]:
"""Parse YAML file into dictionary."""
if not path.exists():
return {}
text = path.read_text(encoding="utf-8")
if not text.strip():
return {}
if yaml:
loaded = yaml.safe_load(text)
return loaded or {}
# Minimal fallback parser: only supports top-level "key: value" pairs
result: dict[str, Any] = {}
for raw_line in text.splitlines():
line = raw_line.strip()
if not line or line.startswith("#"):
continue
if ":" not in line:
continue
key, _, value = line.partition(":")
result[key.strip()] = value.strip().strip('"').strip("'")
return result
def _prepare_provider_credentials(settings: AppSettings) -> AppSettings:
"""Inject API keys and base URLs from environment variables."""
openai_key = os.getenv("OPENAI_API_KEY")
if openai_key and settings.primary_provider.name == "openai":
settings.primary_provider.api_key = openai_key
ollama_url = os.getenv("OLLAMA_BASE_URL")
if ollama_url:
if settings.primary_provider.name == "ollama":
settings.primary_provider.base_url = ollama_url
if settings.fallback_provider and settings.fallback_provider.name == "ollama":
settings.fallback_provider.base_url = ollama_url
return settings
def load_settings(config_path: Path | None = None) -> AppSettings:
"""Load configuration from multiple sources with proper precedence.
Loading order (later overrides earlier):
1. Global config: ~/.tepub/config.yaml
2. Environment file: .env
3. Project config: config.yaml
4. Environment variable: TEPUB_WORK_ROOT
5. Explicit config file: config_path parameter
Args:
config_path: Optional path to explicit config file (YAML or .env)
Returns:
AppSettings instance with merged configuration
"""
payload: dict[str, Any] = {}
# Load global config from ~/.tepub/config.yaml first
global_config = Path.home() / ".tepub" / "config.yaml"
if global_config.exists():
global_payload = _parse_yaml_file(global_config)
if isinstance(global_payload, dict):
payload.update(global_payload)
default_env = Path(".env")
if default_env.exists():
payload.update(_parse_env_file(default_env))
# Load project config.yaml (can override global)
yaml_path = Path("config.yaml")
if yaml_path.exists():
yaml_payload = _parse_yaml_file(yaml_path)
if isinstance(yaml_payload, dict):
payload.update(yaml_payload)
env_root = os.getenv("TEPUB_WORK_ROOT")
if env_root:
root_path = Path(env_root).expanduser()
payload["work_root"] = root_path
payload.setdefault("work_dir", root_path)
if config_path:
config_path = config_path.expanduser()
if config_path.suffix.lower() in {".yaml", ".yml"}:
payload.update(_parse_yaml_file(config_path))
else:
payload.update(_parse_env_file(config_path))
# Convert known keys to structured data if present
if "work_dir" in payload:
payload["work_dir"] = Path(payload["work_dir"]).expanduser()
if "work_root" not in payload:
payload["work_root"] = payload["work_dir"]
if "work_root" in payload:
payload["work_root"] = Path(payload["work_root"]).expanduser()
if "source_language" in payload:
payload["source_language"] = str(payload["source_language"]).strip()
if "target_language" in payload:
payload["target_language"] = str(payload["target_language"]).strip()
if "skip_rules" in payload and isinstance(payload["skip_rules"], list):
normalized_rules: list[dict[str, Any]] = []
for item in payload["skip_rules"]:
if isinstance(item, str):
normalized_rules.append({"keyword": item})
elif isinstance(item, dict):
normalized_rules.append(item)
payload["skip_rules"] = normalized_rules
if "prompt_preamble" in payload and payload["prompt_preamble"] is not None:
payload["prompt_preamble"] = str(payload["prompt_preamble"]).strip()
if "output_mode" in payload:
payload["output_mode"] = str(payload["output_mode"]).replace("-", "_").strip().lower()
settings = AppSettings(**payload)
configured = _prepare_provider_credentials(settings)
try:
from translation.prompt_builder import configure_prompt
configure_prompt(configured.prompt_preamble)
except Exception: # pragma: no cover - prompt builder optional during tooling
pass
return configured
def load_settings_from_cli(config_file: str | None) -> AppSettings:
"""CLI entry point for loading settings."""
return load_settings(Path(config_file).expanduser()) if config_file else load_settings()
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/config/models.py | Python | from __future__ import annotations
import re
from pathlib import Path
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, field_validator
class RetryConfig(BaseModel):
max_attempts: int = Field(3, ge=1)
backoff_seconds: float = Field(1.5, gt=0)
jitter: float = Field(0.1, ge=0)
class RateLimitConfig(BaseModel):
requests_per_minute: int | None = Field(None, gt=0)
concurrency: int = Field(1, ge=1)
class ProviderConfig(BaseModel):
name: str = Field(..., description="Provider identifier, e.g. openai or ollama")
model: str = Field(..., description="Model name used for translation")
base_url: str | None = None
api_key: str | None = None
extra_headers: dict[str, str] = Field(default_factory=dict)
@field_validator("name")
@classmethod
def _lowercase_name(cls, value: str) -> str:
return value.lower()
class SkipRule(BaseModel):
keyword: str
reason: str = "auto-detected front/back matter"
@field_validator("keyword")
@classmethod
def _normalize_keyword(cls, value: str) -> str:
return value.strip().lower()
DEFAULT_ROOT_DIR = Path.cwd() / ".tepub"
_WORD_SPLIT_PATTERN = re.compile(r"[\s_-]+")
_NON_SLUG_CHARS = re.compile(r"[^a-z0-9]+")
_WORKSPACE_HASH_LENGTH = 8
class AppSettings(BaseModel):
work_root: Path = Field(default_factory=lambda: DEFAULT_ROOT_DIR)
work_dir: Path = Field(default_factory=lambda: DEFAULT_ROOT_DIR)
source_language: str = Field(default="auto")
target_language: str = Field(default="Simplified Chinese")
primary_provider: ProviderConfig = Field(
default_factory=lambda: ProviderConfig(name="openai", model="gpt-4o")
)
fallback_provider: ProviderConfig | None = Field(
default_factory=lambda: ProviderConfig(name="ollama", model="qwen2.5:14b-instruct")
)
retry: RetryConfig = Field(default_factory=RetryConfig)
rate_limit: RateLimitConfig = Field(default_factory=RateLimitConfig)
skip_rules: list[SkipRule] = Field(
default_factory=lambda: [
SkipRule(keyword="cover"),
SkipRule(keyword="praise"),
SkipRule(keyword="also by"),
SkipRule(keyword="copyright"),
SkipRule(keyword="dedication"),
SkipRule(keyword="acknowledgment"),
SkipRule(keyword="acknowledgement"),
SkipRule(keyword="the author"),
SkipRule(keyword="further reading"),
SkipRule(keyword="photograph"),
SkipRule(keyword="credit"),
SkipRule(keyword="glossary"),
SkipRule(keyword="bibliography"),
SkipRule(keyword="notes"),
SkipRule(keyword="endnote"),
SkipRule(keyword="endnotes"),
SkipRule(keyword="index"),
SkipRule(keyword="appendix"),
SkipRule(keyword="appendices"),
SkipRule(keyword="afterword"),
SkipRule(keyword="reference"),
SkipRule(keyword="references"),
]
)
# Back-matter cascade skipping configuration
skip_after_back_matter: bool = True
back_matter_triggers: list[str] = Field(
default_factory=lambda: [
"index",
"notes",
"endnotes",
"bibliography",
"references",
"glossary",
]
)
back_matter_threshold: float = 0.7 # Only trigger in last 30% of TOC
prompt_preamble: str | None = None
output_mode: str = Field(default="bilingual")
# Parallel processing settings
translation_workers: int = Field(default=3, ge=1, description="Number of parallel workers for translation")
audiobook_workers: int = Field(default=3, ge=1, description="Number of parallel workers for audiobook generation")
# Per-book settings
cover_image_path: Path | None = None
audiobook_voice: str | None = None
audiobook_opening_statement: str | None = None
audiobook_closing_statement: str | None = None
# TTS Provider settings
audiobook_tts_provider: str = Field(default="edge", description="TTS provider: edge or openai")
audiobook_tts_model: str | None = Field(default=None, description="TTS model (OpenAI: tts-1 or tts-1-hd)")
audiobook_tts_speed: float = Field(default=1.0, ge=0.25, le=4.0, description="TTS speed for OpenAI (0.25-4.0)")
# File inclusion lists (per-book config only)
translation_files: list[str] | None = None
audiobook_files: list[str] | None = None
segments_file: Path = Field(default_factory=lambda: Path("segments.json"))
state_file: Path = Field(default_factory=lambda: Path("state.json"))
model_config = ConfigDict(arbitrary_types_allowed=True)
@field_validator("output_mode")
@classmethod
def _normalise_output_mode(cls, value: str) -> str:
if not value:
return "bilingual"
normalised = value.replace("-", "_").strip().lower()
if normalised not in {"bilingual", "translated_only"}:
raise ValueError("output_mode must be 'bilingual' or 'translated_only'")
return normalised
@field_validator("audiobook_tts_provider")
@classmethod
def _normalise_tts_provider(cls, value: str) -> str:
if not value:
return "edge"
normalised = value.strip().lower()
if normalised not in {"edge", "openai"}:
raise ValueError("audiobook_tts_provider must be 'edge' or 'openai'")
return normalised
def model_post_init(self, __context: Any) -> None: # type: ignore[override]
work_root = self.work_root.expanduser()
if not work_root.is_absolute():
work_root = Path.cwd() / work_root
object.__setattr__(self, "work_root", work_root)
work_dir = self.work_dir.expanduser()
if "work_dir" not in self.model_fields_set:
work_dir = work_root
elif not work_dir.is_absolute():
work_dir = Path.cwd() / work_dir
object.__setattr__(self, "work_dir", work_dir)
if not self.segments_file.is_absolute():
object.__setattr__(self, "segments_file", self.work_dir / self.segments_file)
if not self.state_file.is_absolute():
object.__setattr__(self, "state_file", self.work_dir / self.state_file)
def ensure_directories(self) -> None:
# Create work_dir (which creates work_root as parent if needed)
self.work_dir.mkdir(parents=True, exist_ok=True)
self.segments_file.parent.mkdir(parents=True, exist_ok=True)
self.state_file.parent.mkdir(parents=True, exist_ok=True)
def model_copy(self, *, update: dict[str, Any] | None = None, deep: bool = False) -> AppSettings: # type: ignore[override]
old_work_dir = self.work_dir
copied: AppSettings = super().model_copy(update=update, deep=deep)
new_work_dir = copied.work_dir
overridden: set[str] = set(update.keys()) if update else set()
if "work_dir" in overridden and "work_root" not in overridden:
object.__setattr__(copied, "work_root", new_work_dir)
if "work_dir" in overridden and new_work_dir != old_work_dir:
copied._refresh_workdir_bound_paths(old_work_dir, overridden)
return copied
def _refresh_workdir_bound_paths(self, old_work_dir: Path, overridden: set[str]) -> None:
for attr in ("segments_file", "state_file"):
if attr in overridden:
continue
current = getattr(self, attr)
try:
relative = current.relative_to(old_work_dir)
except ValueError:
continue
object.__setattr__(self, attr, self.work_dir / relative)
def dump(self, path: Path) -> None:
import json
payload = json.loads(self.model_dump_json(indent=2))
path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/config/templates.py | Python | from __future__ import annotations
from pathlib import Path
from config.models import AppSettings
def create_book_config_template(
work_dir: Path,
epub_name: str,
metadata: dict[str, str | None] | None = None,
segments_doc=None,
input_epub: Path | None = None,
) -> None:
"""Create a config.yaml in the book's working directory with filled metadata.
Args:
work_dir: Book's working directory
epub_name: Name of the EPUB file
metadata: Book metadata dict with keys: title, author, publisher, year
If None, uses "Unknown" as defaults
segments_doc: Optional SegmentsDocument for generating inclusion lists
input_epub: Optional path to EPUB file for extracting TOC titles
"""
config_path = work_dir / "config.yaml"
if config_path.exists():
return # Don't overwrite existing config
# Extract metadata with defaults
meta = metadata or {}
book_name = meta.get("title") or "Unknown"
author = meta.get("author") or "Unknown"
publisher = meta.get("publisher") or "Unknown"
year_of_publication = meta.get("year") or "Unknown"
# Generate US English voice list
voice_lines = []
try:
from audiobook.voices import list_voices_for_language
us_voices = list_voices_for_language("en-US")
us_voices = sorted(us_voices, key=lambda v: v.get("ShortName", ""))
for voice in us_voices:
name = voice.get("ShortName", "")
gender = voice.get("Gender", "")
voice_lines.append(f"# audiobook_voice: {name:<30} # {gender}")
except Exception:
# Fallback if voice listing fails
voice_lines = [
"# audiobook_voice: en-US-GuyNeural # Male",
"# audiobook_voice: en-US-JennyNeural # Female",
]
voices_section = "\n".join(voice_lines)
# Build file→title mapping from TOC
file_titles: dict[str, str] = {}
if input_epub and input_epub.exists():
try:
from epub_io.reader import EpubReader
from epub_io.toc_utils import parse_toc_to_dict
# Use empty AppSettings for TOC parsing only
temp_settings = AppSettings(work_dir=work_dir)
reader = EpubReader(input_epub, temp_settings)
# Parse TOC to get file→title mapping
file_titles = parse_toc_to_dict(reader)
except Exception:
# If TOC parsing fails, continue without titles
pass
# Build file inclusion lists
inclusion_lists_section = ""
if segments_doc:
# Get all unique file paths from segments (in spine order)
file_paths_map: dict[str, int] = {} # file_path -> min_spine_index
for segment in segments_doc.segments:
file_path_str = segment.file_path.as_posix()
if file_path_str not in file_paths_map:
file_paths_map[file_path_str] = segment.metadata.spine_index
else:
file_paths_map[file_path_str] = min(
file_paths_map[file_path_str], segment.metadata.spine_index
)
# Build skipped files map
skipped_map: dict[str, str] = {} # file_path -> reason
for skipped_doc in segments_doc.skipped_documents:
skipped_map[skipped_doc.file_path.as_posix()] = skipped_doc.reason
# Combine and sort all files by spine index
all_files = sorted(
[(path, spine_idx) for path, spine_idx in file_paths_map.items()],
key=lambda x: x[1],
)
# Add skipped files that might not be in segments
for skipped_path, reason in skipped_map.items():
if skipped_path not in file_paths_map:
all_files.append((skipped_path, 9999)) # Put at end
# Generate translation_files section
translation_lines = []
for file_path, _ in all_files:
title = file_titles.get(file_path, "")
title_comment = f" # {title}" if title else ""
if file_path in skipped_map:
reason = skipped_map[file_path]
translation_lines.append(f" # - {file_path} # (skipped: {reason}){title_comment}")
else:
translation_lines.append(f" - {file_path}{title_comment}")
# Generate audiobook_files section
audiobook_lines = []
for file_path, _ in all_files:
title = file_titles.get(file_path, "")
title_comment = f" # {title}" if title else ""
if file_path in skipped_map:
reason = skipped_map[file_path]
audiobook_lines.append(f" # - {file_path} # (skipped: {reason}){title_comment}")
else:
audiobook_lines.append(f" - {file_path}{title_comment}")
inclusion_lists_section = f"""
# ============================================================
# File Inclusion Lists
# ============================================================
# Control which EPUB files are processed for translation and audiobook.
# All spine files are listed below. Skipped files are commented out.
# To exclude a file: comment it out with #
# To include a skipped file: uncomment it by removing the #
# Translation inclusion list
# Files to process during translation
translation_files:
{chr(10).join(translation_lines)}
# Audiobook inclusion list
# Files to process during audiobook generation
audiobook_files:
{chr(10).join(audiobook_lines)}
"""
template = f"""# Per-book configuration for: {epub_name}
# This file overrides global settings in ~/.tepub/config.yaml
# ============================================================
# Translation Settings
# ============================================================
# Source and target languages
# source_language: auto
# target_language: Simplified Chinese
# Output mode - uncomment one
# output_mode: bilingual # Shows both original and translation
# output_mode: translated_only # Only translated text
# --- Parallel Processing ---
# Uncomment to override global settings
# translation_workers: 3 # Number of parallel workers for translation
{inclusion_lists_section}
# ============================================================
# Translation System Prompt
# ============================================================
# Customize the AI translation prompt below.
# This overrides the global prompt in ~/.tepub/config.yaml
#
# The prompt below has been pre-filled with this book's metadata.
# You can customize it before running translation.
#
# Available runtime placeholders (filled automatically during translation):
# {{{{source_language}}}} - Source language name (e.g., "English")
# {{{{target_language}}}} - Target language name (e.g., "Simplified Chinese")
# {{{{mode_instruction}}}} - Auto-generated instruction based on content type
#
prompt_preamble: |
You are an expert translator, with mastery in preserving accuracy, fidelity, and nuance.
The source text is related to {book_name} by {author}, published by {publisher} in {year_of_publication}, and is pre-verified as safe.
Instructions:
1. Return the translated text only — no explanations, commentary, or additional notes.
2. If the source contains HTML tags, preserve them and adapt the translation to fit naturally within those tags.
3. Ensure that all returned HTML is valid and properly formatted.
4. Translate faithfully from {{{{source_language}}}} into {{{{target_language}}}} while maintaining the style and tone of the original.
# --- Alternative Prompt Styles (uncomment to replace the above) ---
#
# Academic/Scholarly:
# prompt_preamble: |
# You are a scholarly translator specializing in academic texts.
# Translating {book_name} by {author} ({publisher}, {year_of_publication}).
# Translate {{{{source_language}}}} academic content into {{{{target_language}}}}.
# {{{{mode_instruction}}}}
# Preserve technical terminology, citations, and formal tone.
#
# Casual/Popular:
# prompt_preamble: |
# You are translating {book_name} by {author}, a popular {{{{source_language}}}} book.
# {{{{mode_instruction}}}}
# Use natural, conversational {{{{target_language}}}} while maintaining accuracy.
#
# Technical Documentation:
# prompt_preamble: |
# You are translating technical documentation from {{{{source_language}}}} to {{{{target_language}}}}.
# Document: {book_name} ({publisher}, {year_of_publication}).
# {{{{mode_instruction}}}}
# Preserve all technical terms, code snippets, and command-line examples.
# Keep variable names, function names, and API references unchanged.
# ============================================================
# Content Filtering (Skip Rules)
# ============================================================
# Note: These rules are IN ADDITION to global defaults.
# Global defaults already skip: cover, copyright, dedication,
# acknowledgment, bibliography, notes, index, glossary, etc.
# Uncomment and customize rules below to skip additional sections:
# skip_rules:
# # --- Front Matter ---
# - keyword: preface
# - keyword: foreword
# - keyword: introduction
# - keyword: prologue
#
# # --- Back Matter ---
# - keyword: appendix
# - keyword: epilogue
# - keyword: afterword
# - keyword: references
# - keyword: endnotes
#
# # --- Other Common Sections ---
# - keyword: table of contents
# - keyword: illustrations
# - keyword: about the publisher
# - keyword: about the author
#
# # --- Custom reason example ---
# - keyword: chapter 5
# reason: skip this specific chapter
# ============================================================
# Audiobook Settings
# ============================================================
# --- TTS Provider Selection ---
# Choose which text-to-speech service to use
# audiobook_tts_provider: edge # Options: edge, openai
# Edge TTS (Microsoft) - FREE, NO API KEY NEEDED
# - 57+ voices in multiple languages
# - Good quality, default provider
# - See all voices: edge-tts --list-voices
# audiobook_voice: en-US-GuyNeural # Clear male voice
# audiobook_voice: en-US-JennyNeural # Warm female voice
# audiobook_voice: en-US-AriaNeural # Professional female voice
# OpenAI TTS - PAID (~$15 per 1 million characters)
# - Higher quality, more natural sounding
# - Requires OPENAI_API_KEY environment variable
# To use OpenAI TTS, uncomment these lines:
# audiobook_tts_provider: openai
# audiobook_tts_model: tts-1 # Options: tts-1 (cheaper), tts-1-hd (higher quality)
# audiobook_tts_speed: 1.0 # Speed: 0.25-4.0 (1.0 = normal)
#
# OpenAI voices (uncomment ONE):
# audiobook_voice: alloy # Neutral, balanced
# audiobook_voice: echo # Male, authoritative
# audiobook_voice: fable # British, expressive
# audiobook_voice: onyx # Deep male, professional
# audiobook_voice: nova # Female, friendly
# audiobook_voice: shimmer # Female, warm
# Cost comparison (300-page book ~750,000 characters):
# - Edge TTS: Free
# - OpenAI tts-1: ~$11.25
# - OpenAI tts-1-hd: ~$22.50
# --- Advanced Edge TTS Voice Selection ---
# Full list of US English voices available:
{voices_section}
# Cover image (relative to this directory or absolute path)
# cover_image_path: markdown/images/cover.jpg
# --- Parallel Processing ---
# Uncomment to override global settings
# audiobook_workers: 3 # Number of parallel workers for audiobook generation
# ============================================================
# Audiobook Opening & Closing Statements
# ============================================================
# These statements are spoken at the beginning and end of the audiobook.
# Available placeholders:
# {{book_name}} - Book title
# {{author}} - Author name
# {{narrator_name}} - Extracted from voice (e.g., "Guy")
audiobook_opening_statement: |
This is an audiobook version of {{book_name}}, written by {{author}}. Narrated by {{narrator_name}}. Created by T EPUB.
audiobook_closing_statement: |
You've been listening to {{book_name}}, written by {{author}}, and narrated by {{narrator_name}}. Thank you for listening.
"""
config_path.write_text(template, encoding="utf-8")
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/config/workspace.py | Python | from __future__ import annotations
import hashlib
import unicodedata
from pathlib import Path
from config.models import AppSettings, _NON_SLUG_CHARS, _WORD_SPLIT_PATTERN, _WORKSPACE_HASH_LENGTH
from exceptions import StateFileNotFoundError, WorkspaceNotFoundError
def derive_book_workspace(settings: AppSettings, input_epub: Path) -> Path:
"""
Derive working directory alongside EPUB file.
Example:
/path/to/book.epub -> /path/to/book/
"""
epub_path = input_epub.expanduser().resolve()
return epub_path.parent / epub_path.stem
def with_book_workspace(settings: AppSettings, input_epub: Path) -> AppSettings:
"""Create settings with book-specific workspace, loading per-book config if exists."""
from config.loader import _parse_yaml_file
derived = derive_book_workspace(settings, input_epub)
new_settings = settings.model_copy(update={"work_root": derived.parent, "work_dir": derived})
# Load per-book config if it exists
book_config = derived / "config.yaml"
if book_config.exists():
book_payload = _parse_yaml_file(book_config)
if book_payload and isinstance(book_payload, dict):
new_settings = new_settings.model_copy(update=book_payload)
return new_settings
def with_override_root(settings: AppSettings, base_path: Path, input_epub: Path) -> AppSettings:
"""Override work directory with explicit path."""
base_path = base_path.expanduser()
if not base_path.is_absolute():
base_path = Path.cwd() / base_path
# If path looks like a working directory, use it directly
segments_exists = (base_path / "segments.json").exists()
state_exists = (base_path / "state.json").exists()
if segments_exists or state_exists:
return settings.model_copy(update={"work_root": base_path.parent, "work_dir": base_path})
# Otherwise use it as root and create book-specific subdir
work_dir = base_path / build_workspace_name(input_epub)
return settings.model_copy(update={"work_root": base_path, "work_dir": work_dir})
def validate_for_export(settings: AppSettings, input_epub: Path) -> None:
"""Validate that all required files exist for export operations.
Args:
settings: AppSettings instance
input_epub: Path to the EPUB file being processed
Raises:
WorkspaceNotFoundError: If workspace directory doesn't exist
StateFileNotFoundError: If required state files are missing
CorruptedStateError: If state files are corrupted
"""
from state.base import safe_load_state
from state.models import SegmentsDocument, StateDocument
if not settings.work_dir.exists():
raise WorkspaceNotFoundError(input_epub, settings.work_dir)
if not settings.segments_file.exists():
raise StateFileNotFoundError("segments", input_epub)
if not settings.state_file.exists():
raise StateFileNotFoundError("translation", input_epub)
# Validate that files can actually be loaded (not corrupted)
safe_load_state(settings.segments_file, SegmentsDocument, "segments")
safe_load_state(settings.state_file, StateDocument, "translation")
def validate_for_translation(settings: AppSettings, input_epub: Path) -> None:
"""Validate that required files exist for translation operations.
Args:
settings: AppSettings instance
input_epub: Path to the EPUB file being processed
Raises:
WorkspaceNotFoundError: If workspace directory doesn't exist
StateFileNotFoundError: If segments file is missing
CorruptedStateError: If segments file is corrupted
"""
from state.base import safe_load_state
from state.models import SegmentsDocument
if not settings.work_dir.exists():
raise WorkspaceNotFoundError(input_epub, settings.work_dir)
if not settings.segments_file.exists():
raise StateFileNotFoundError("segments", input_epub)
# Validate that segments file can actually be loaded (not corrupted)
safe_load_state(settings.segments_file, SegmentsDocument, "segments")
def build_workspace_name(input_epub: Path) -> str:
"""Build workspace directory name from EPUB filename."""
first_word = _extract_first_word(input_epub)
digest = hashlib.sha1(
str(input_epub.expanduser().resolve(strict=False)).encode("utf-8")
).hexdigest()[:_WORKSPACE_HASH_LENGTH]
return f"{first_word}-{digest}" if digest else first_word
def _extract_first_word(input_epub: Path) -> str:
"""Extract first word from EPUB filename for workspace naming."""
stem = input_epub.stem
tokens = [token for token in _WORD_SPLIT_PATTERN.split(stem) if token]
candidate = tokens[0] if tokens else "book"
normalized = unicodedata.normalize("NFKD", candidate)
ascii_candidate = normalized.encode("ascii", "ignore").decode("ascii").lower()
ascii_candidate = _NON_SLUG_CHARS.sub("", ascii_candidate)
return ascii_candidate or "book"
# Attach workspace methods to AppSettings for backward compatibility
AppSettings.derive_book_workspace = derive_book_workspace # type: ignore
AppSettings.with_book_workspace = with_book_workspace # type: ignore
AppSettings.with_override_root = with_override_root # type: ignore
AppSettings.validate_for_export = validate_for_export # type: ignore
AppSettings.validate_for_translation = validate_for_translation # type: ignore
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/console_singleton.py | Python | """Centralized console singleton for global quiet/verbose control."""
from __future__ import annotations
from rich.console import Console
_console: Console | None = None
def get_console() -> Console:
"""Get the shared Console instance.
Returns:
The global Console instance configured by configure_console().
If not configured, returns a default Console instance.
"""
global _console
if _console is None:
_console = Console()
return _console
def configure_console(*, quiet: bool = False, verbose: bool = False) -> None:
"""Configure the global Console instance with quiet/verbose settings.
Args:
quiet: Suppress all console output (takes precedence over verbose)
verbose: Enable verbose output (ignored if quiet=True)
Note:
This should be called once from main.py after parsing CLI flags.
Calling it multiple times will replace the existing console instance.
"""
global _console
# quiet takes precedence over verbose
_console = Console(quiet=quiet)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/debug_tools/analysis.py | Python | from __future__ import annotations
import json
from collections import Counter
from collections.abc import Iterable
from pathlib import Path
from rich.panel import Panel
from rich.progress import Progress
from rich.table import Table
from config import AppSettings
from epub_io.selector import analyze_skip_candidates
from .common import console
def _iter_epubs(library: Path) -> Iterable[Path]:
if library.is_file() and library.suffix.lower() == ".epub":
yield library
return
for path in sorted(library.rglob("*.epub")):
if path.is_file():
yield path
def analyze_library(
settings: AppSettings,
library: Path,
limit: int | None = None,
top_n: int = 15,
report_path: Path | None = None,
) -> None:
library = library.expanduser()
if not library.exists():
console.print(f"[bold red]Library path not found:[/bold red] {library}")
raise SystemExit(1)
epubs = list(_iter_epubs(library))
if not epubs:
console.print(f"[yellow]No EPUB files found under {library}.[/yellow]")
return
if limit is not None:
epubs = epubs[: limit if limit >= 0 else 0]
reason_counter: Counter[str] = Counter()
source_counter: Counter[str] = Counter()
unmatched_counter: Counter[str] = Counter()
processed = 0
books_with_skips = 0
errors: list[tuple[Path, str]] = []
with Progress() as progress:
task = progress.add_task("Analyzing", total=len(epubs))
for epub_path in epubs:
try:
analysis = analyze_skip_candidates(epub_path, settings)
except Exception as exc: # pragma: no cover - surfaced in output
errors.append((epub_path, str(exc)))
progress.advance(task)
continue
candidates = [c for c in analysis.candidates if c.flagged]
if candidates:
books_with_skips += 1
reason_counter.update(c.reason for c in candidates)
source_counter.update(c.source for c in candidates)
unmatched_counter.update(analysis.toc_unmatched_titles)
processed += 1
progress.advance(task)
console.print(
Panel(
f"Processed {processed} EPUBs | {books_with_skips} with skips | "
f"{len(errors)} errors",
title="Skip Analysis",
)
)
if reason_counter:
table = Table(title="Skip Reasons", show_lines=False)
table.add_column("Reason")
table.add_column("Count", justify="right")
for reason, count in reason_counter.most_common():
table.add_row(reason, str(count))
console.print(table)
if source_counter:
source_table = Table(title="Skip Sources", show_lines=False)
source_table.add_column("Source")
source_table.add_column("Count", justify="right")
for source, count in source_counter.most_common():
source_table.add_row(source, str(count))
console.print(source_table)
if unmatched_counter:
unmatched_table = Table(title=f"Potential New Keywords (top {top_n})")
unmatched_table.add_column("Title")
unmatched_table.add_column("Count", justify="right")
for title, count in unmatched_counter.most_common(top_n):
unmatched_table.add_row(title, str(count))
console.print(unmatched_table)
if errors:
error_table = Table(title="Errors", show_lines=False)
error_table.add_column("EPUB")
error_table.add_column("Error")
for path, message in errors[:10]:
error_table.add_row(path.as_posix(), message)
console.print(error_table)
if report_path:
payload = {
"processed": processed,
"books_with_skips": books_with_skips,
"errors": [{"path": str(path), "error": message} for path, message in errors],
"reasons": dict(reason_counter),
"sources": dict(source_counter),
"unmatched_titles": dict(unmatched_counter),
}
report_path = report_path.expanduser()
report_path.parent.mkdir(parents=True, exist_ok=True)
report_path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")
console.print(f"[green]Report written to {report_path}[/green]")
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/debug_tools/common.py | Python | from __future__ import annotations
from pathlib import Path
from config import AppSettings
from console_singleton import get_console
from state.store import load_segments, load_state
console = get_console()
def require_file(path: Path, description: str) -> None:
if not path.exists():
console.print(f"[bold red]{description} not found:[/bold red] {path}")
raise SystemExit(1)
def load_all_segments(settings: AppSettings):
require_file(settings.segments_file, "Segments file")
return load_segments(settings.segments_file)
def load_translation_state(settings: AppSettings):
require_file(settings.state_file, "State file")
return load_state(settings.state_file)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/debug_tools/extraction_summary.py | Python | from __future__ import annotations
from pathlib import Path
from rich.panel import Panel
from rich.table import Table
from config import AppSettings
from state.models import SegmentStatus
from state.store import load_segments, load_state
from .common import console
def print_extraction_summary(
settings: AppSettings,
show_samples: int = 5,
epub_path: Path | None = None,
) -> None:
segments_doc = load_segments(settings.segments_file)
state = load_state(settings.state_file)
auto_skips = segments_doc.skipped_documents
pending_ids = _collect_pending_ids(state)
console.print(Panel.fit("Extraction Summary", style="bold"))
# Show EPUB structure statistics
if epub_path:
_print_epub_statistics(epub_path, segments_doc, auto_skips)
if auto_skips:
# Separate TOC-based and cascade skips
toc_skips = [s for s in auto_skips if s.source == "toc"]
cascade_skips = [s for s in auto_skips if s.source == "cascade"]
if toc_skips:
auto_table = Table(title="Automatically Skipped Documents (TOC-based)")
auto_table.add_column("File")
auto_table.add_column("Reason")
auto_table.add_column("Source")
for skipped in toc_skips:
auto_table.add_row(
skipped.file_path.as_posix(),
skipped.reason,
skipped.source,
)
console.print(auto_table)
if cascade_skips:
# Show cascade skip summary, not all files
trigger_reasons = {}
for skip in cascade_skips:
reason = skip.reason
trigger_reasons[reason] = trigger_reasons.get(reason, 0) + 1
cascade_table = Table(title="Cascade Skipped Documents (Back-matter continuation)")
cascade_table.add_column("Trigger", style="cyan")
cascade_table.add_column("Files Skipped", style="bold yellow")
for reason, count in sorted(trigger_reasons.items()):
cascade_table.add_row(reason, str(count))
console.print(cascade_table)
console.print("[dim] (Use --include-back-matter to process these files)[/dim]")
else:
console.print("[green]No automatic skips detected.[/green]")
console.print(
f"Pending segments: [bold]{len(pending_ids)}[/bold] (showing up to {show_samples})"
)
if pending_ids and show_samples > 0:
sample_table = Table()
sample_table.add_column("Segment ID")
sample_table.add_column("File")
sample_table.add_column("Status")
segments_index = {segment.segment_id: segment for segment in segments_doc.segments}
for seg_id in pending_ids[:show_samples]:
segment = segments_index.get(seg_id)
if not segment:
continue
sample_table.add_row(
seg_id,
segment.file_path.as_posix(),
state.segments[seg_id].status.value,
)
console.print(sample_table)
def _collect_pending_ids(state) -> list[str]:
return [
seg_id
for seg_id, record in state.segments.items()
if record.status == SegmentStatus.PENDING
]
def _print_epub_statistics(epub_path: Path, segments_doc, auto_skips) -> None:
"""Print EPUB structure statistics (spine, TOC, processed files)."""
try:
from config import AppSettings
from epub_io.reader import EpubReader
from epub_io.resources import iter_spine_items
from epub_io.selector import _flatten_toc_entries
# Load EPUB to get spine and TOC counts
temp_settings = AppSettings()
reader = EpubReader(epub_path, temp_settings)
spine_items = list(iter_spine_items(reader.book))
toc_entries = _flatten_toc_entries(getattr(reader.book, "toc", []))
# Count processed files
processed_files = len(set(seg.file_path for seg in segments_doc.segments))
skipped_files = len(auto_skips)
total_handled = processed_files + skipped_files
# Build statistics table
stats_table = Table(title="EPUB Structure Statistics", show_header=False)
stats_table.add_column("Metric", style="cyan")
stats_table.add_column("Count", style="bold")
stats_table.add_row("Total spine items", str(len(spine_items)))
stats_table.add_row("TOC entries", str(len(toc_entries)))
stats_table.add_row("Files processed", f"[green]{processed_files}[/green]")
stats_table.add_row("Files skipped", f"[yellow]{skipped_files}[/yellow]")
stats_table.add_row("Total handled", str(total_handled))
# Warning if many files skipped
if skipped_files > 0:
skip_percentage = (skipped_files / total_handled) * 100
if skip_percentage > 50:
stats_table.add_row(
"⚠ Warning",
f"[red]{skip_percentage:.1f}% of files skipped[/red]",
)
console.print(stats_table)
console.print()
except Exception:
# Silently fail if we can't load EPUB statistics
pass
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/debug_tools/files.py | Python | from __future__ import annotations
from collections import Counter
from rich.table import Table
from config import AppSettings
from .common import console, load_all_segments
def list_files(settings: AppSettings) -> None:
segments_doc = load_all_segments(settings)
counter = Counter(segment.file_path.as_posix() for segment in segments_doc.segments)
table = Table(title="Segment Counts per File")
table.add_column("File")
table.add_column("Segments")
for file_path, count in sorted(counter.items()):
table.add_row(file_path, str(count))
console.print(table)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/debug_tools/inspect.py | Python | from __future__ import annotations
from rich.panel import Panel
from config import AppSettings
from .common import console, load_all_segments, load_translation_state
def inspect_segment(settings: AppSettings, segment_id: str) -> None:
segments_doc = load_all_segments(settings)
state = load_translation_state(settings)
segment = next((seg for seg in segments_doc.segments if seg.segment_id == segment_id), None)
if not segment:
console.print(f"[red]Segment {segment_id} not found in segments file.[/red]")
return
record = state.segments.get(segment_id)
if not record:
console.print(f"[yellow]No translation state found for segment {segment_id}.[/yellow]")
return
console.print(
Panel.fit(
f"File: {segment.file_path}\nXPath: {segment.xpath}\nMode: {segment.extract_mode}\nElement: {segment.metadata.element_type}\nStatus: {record.status}",
title=f"Segment {segment_id}",
)
)
console.print(Panel(segment.source_content, title="Original"))
if record.translation:
console.print(Panel(record.translation, title="Translation"))
if record.error_message:
console.print(Panel(record.error_message, title="Error", subtitle_align="left"))
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/debug_tools/pending.py | Python | from __future__ import annotations
from collections import defaultdict
from rich.table import Table
from config import AppSettings
from state.models import SegmentStatus
from .common import console, load_all_segments, load_translation_state
def show_pending(settings: AppSettings) -> None:
segments_doc = load_all_segments(settings)
state = load_translation_state(settings)
pending_by_file = defaultdict(int)
segment_index = {segment.segment_id: segment for segment in segments_doc.segments}
for record in state.segments.values():
if record.status == SegmentStatus.PENDING:
segment = segment_index.get(record.segment_id)
if segment:
pending_by_file[segment.file_path.as_posix()] += 1
if not pending_by_file:
console.print("[green]No pending segments. All caught up![/green]")
return
table = Table(title="Pending Segments")
table.add_column("File")
table.add_column("Count")
for file_path, count in sorted(pending_by_file.items()):
table.add_row(file_path, str(count))
console.print(table)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/debug_tools/preview.py | Python | from __future__ import annotations
from pathlib import Path
from config import AppSettings
from epub_io.selector import collect_skip_candidates
from .common import console
def preview_skip_candidates(settings: AppSettings, input_epub: Path) -> None:
candidates = collect_skip_candidates(input_epub, settings)
if not candidates:
console.print("[green]No skip candidates detected.[/green]")
return
console.print("[bold]Skip Candidates[/bold]")
for candidate in candidates:
console.print(
f"- {candidate.file_path.as_posix()} :: {candidate.reason} (source={candidate.source})"
)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/debug_tools/skip_lists.py | Python | from __future__ import annotations
from rich.table import Table
from config import AppSettings
from state.models import SegmentStatus
from .common import console, load_all_segments, load_translation_state
def show_skip_list(settings: AppSettings) -> None:
segments_doc = load_all_segments(settings)
state = load_translation_state(settings)
auto_skips = segments_doc.skipped_documents
manual_skip_ids = [
seg_id
for seg_id, record in state.segments.items()
if record.status == SegmentStatus.SKIPPED
]
if not auto_skips and not manual_skip_ids:
console.print("[green]No skipped segments found.[/green]")
return
if auto_skips:
auto_table = Table(title="Automatically Skipped Documents")
auto_table.add_column("File")
auto_table.add_column("Reason")
auto_table.add_column("Source")
for skipped in auto_skips:
auto_table.add_row(
skipped.file_path.as_posix(),
skipped.reason,
skipped.source,
)
console.print(auto_table)
if manual_skip_ids:
segment_index = {segment.segment_id: segment for segment in segments_doc.segments}
manual_table = Table(title="Skipped Segments")
manual_table.add_column("Segment ID")
manual_table.add_column("File")
manual_table.add_column("Reason")
for seg_id in manual_skip_ids:
segment = segment_index.get(seg_id)
if not segment:
continue
reason = state.segments[seg_id].error_message or segment.metadata.notes or ""
manual_table.add_row(seg_id, segment.file_path.as_posix(), reason)
console.print(manual_table)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/epub_io/path_utils.py | Python | """Utilities for normalizing EPUB paths and hrefs."""
from __future__ import annotations
import posixpath
from pathlib import Path, PurePosixPath
def normalize_epub_href(document_path: Path, raw_href: str) -> str | None:
"""Normalize EPUB href relative to document path.
Resolves relative hrefs against the document's directory and normalizes
the result to a clean POSIX path. Filters out invalid hrefs like data URIs,
external URLs, and paths that traverse outside the EPUB root.
Args:
document_path: Path to the document containing the href
raw_href: The href attribute value to normalize
Returns:
Normalized POSIX path string, or None if the href is invalid.
Examples:
>>> normalize_epub_href(Path("text/chapter1.xhtml"), "image.jpg")
"text/image.jpg"
>>> normalize_epub_href(Path("text/chapter1.xhtml"), "../images/cover.jpg")
"images/cover.jpg"
>>> normalize_epub_href(Path("text/chapter1.xhtml"), "data:image/png;base64,...")
None
>>> normalize_epub_href(Path("text/chapter1.xhtml"), "http://example.com/img.jpg")
None
"""
# Validate input
if not raw_href:
return None
value = raw_href.strip()
if not value:
return None
# Filter out data URIs and external URLs
if value.startswith("data:"):
return None
if "://" in value:
return None
# Convert document path to POSIX for consistent handling
doc_posix = PurePosixPath(document_path.as_posix())
# Resolve href relative to document's directory
if value.startswith("/"):
# Absolute path within EPUB (relative to EPUB root)
candidate = PurePosixPath(value.lstrip("/"))
else:
# Relative path (relative to document's directory)
doc_dir = doc_posix.parent
candidate = doc_dir.joinpath(PurePosixPath(value))
# Normalize the path (resolve .. and .)
normalized = PurePosixPath(posixpath.normpath(str(candidate)))
# Reject paths that traverse outside EPUB root
if normalized.as_posix().startswith("../"):
return None
return normalized.as_posix()
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/epub_io/reader.py | Python | from __future__ import annotations
from collections.abc import Iterable
from dataclasses import dataclass
from pathlib import Path
from lxml import html
from config import AppSettings
from .resources import SpineItem, get_item_by_href, iter_spine_items, load_book
# Maximum EPUB file size: 500MB
MAX_EPUB_SIZE = 500 * 1024 * 1024
@dataclass
class HtmlDocument:
spine_item: SpineItem
tree: html.HtmlElement
raw_html: bytes
@property
def path(self) -> Path:
return self.spine_item.href
class EpubReader:
def __init__(self, epub_path: Path, settings: AppSettings):
self.epub_path = epub_path
self.settings = settings
# Validate file size before processing
if not epub_path.exists():
raise FileNotFoundError(f"EPUB file not found: {epub_path}")
file_size = epub_path.stat().st_size
if file_size > MAX_EPUB_SIZE:
size_mb = file_size / (1024 * 1024)
max_mb = MAX_EPUB_SIZE / (1024 * 1024)
raise ValueError(
f"EPUB file too large: {size_mb:.1f}MB (maximum: {max_mb:.0f}MB)"
)
self.book = load_book(epub_path)
def iter_documents(self) -> Iterable[HtmlDocument]:
for spine_item in iter_spine_items(self.book):
if not spine_item.media_type.startswith("application/xhtml"):
continue
item = get_item_by_href(self.book, spine_item.href)
raw_html: bytes = item.get_content()
tree = html.fromstring(raw_html)
yield HtmlDocument(spine_item=spine_item, tree=tree, raw_html=raw_html)
def get_document(self, href: Path) -> HtmlDocument:
item = get_item_by_href(self.book, href)
raw_html: bytes = item.get_content()
tree = html.fromstring(raw_html)
spine_item = next(
(sp for sp in iter_spine_items(self.book) if sp.href == href),
None,
)
if spine_item is None:
raise KeyError(f"Spine item not found for {href}")
return HtmlDocument(spine_item=spine_item, tree=tree, raw_html=raw_html)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/epub_io/resources.py | Python | from __future__ import annotations
import warnings
from collections.abc import Iterable
from dataclasses import dataclass
from pathlib import Path
from ebooklib import epub
@dataclass
class SpineItem:
index: int
idref: str
href: Path
media_type: str
linear: bool
def load_book(epub_path: Path) -> epub.EpubBook:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="In the future version we will turn default option ignore_ncx to True.",
)
warnings.filterwarnings(
"ignore",
message="This search incorrectly ignores the root element",
)
return epub.read_epub(str(epub_path), options={"ignore_ncx": False})
def iter_spine_items(book: epub.EpubBook) -> Iterable[SpineItem]:
manifest = {item.id: item for item in book.get_items()}
for idx, (idref, linear) in enumerate(book.spine):
item = manifest.get(idref)
if item is None:
continue
href = Path(item.file_name)
yield SpineItem(
index=idx,
idref=idref,
href=href,
media_type=item.media_type,
linear=linear == "yes" if isinstance(linear, str) else bool(linear),
)
def get_html_items(book: epub.EpubBook) -> list[epub.EpubHtml]:
return [item for item in book.get_items() if item.get_type() == epub.ITEM_DOCUMENT]
def get_item_by_href(book: epub.EpubBook, href: Path):
# ebooklib stores file_name using forward slashes
target = href.as_posix()
for item in book.get_items():
if item.file_name == target:
return item
raise KeyError(f"No item found for href {href}")
def extract_metadata(book: epub.EpubBook) -> dict[str, str | None]:
"""Extract book metadata from EPUB.
Returns:
Dictionary with keys: title, author, publisher, year
Values are None if metadata is not available
"""
DC = "http://purl.org/dc/elements/1.1/"
# Extract title
title_meta = book.get_metadata(DC, "title")
title = title_meta[0][0] if title_meta else None
# Extract creator/author
creator_meta = book.get_metadata(DC, "creator")
author = creator_meta[0][0] if creator_meta else None
# Extract publisher
publisher_meta = book.get_metadata(DC, "publisher")
publisher = publisher_meta[0][0] if publisher_meta else None
# Extract date (try to extract just the year)
date_meta = book.get_metadata(DC, "date")
year = None
if date_meta:
date_str = date_meta[0][0]
# Try to extract year from various date formats (YYYY, YYYY-MM-DD, etc.)
if date_str:
year = date_str[:4] if len(date_str) >= 4 and date_str[:4].isdigit() else date_str
return {
"title": title,
"author": author,
"publisher": publisher,
"year": year,
}
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/epub_io/selector.py | Python | from __future__ import annotations
from collections.abc import Iterable
from dataclasses import dataclass
from pathlib import Path
from rich.prompt import Confirm
from config import AppSettings
from .reader import EpubReader
from .resources import SpineItem, iter_spine_items
@dataclass
class SkipAnalysis:
candidates: list[SkipCandidate]
toc_unmatched_titles: list[str]
@dataclass
class SkipCandidate:
file_path: Path
spine_index: int
reason: str
source: str = "content"
flagged: bool = True
TOC_FRONT_SAMPLE = 8
TOC_BACK_SAMPLE = 6
def _normalize_text(value: str) -> str:
return " ".join(value.lower().split())
def _match_keyword(text: str, keywords: Iterable[str]) -> str | None:
normalized = _normalize_text(text)
for keyword in keywords:
if keyword in normalized:
return keyword
return None
def _flatten_toc_entries(entries) -> list[tuple[str, str]]:
flattened: list[tuple[str, str]] = []
def _walk(items) -> None:
for item in items:
if item is None:
continue
href = getattr(item, "href", None)
title = getattr(item, "title", "")
if href and title:
flattened.append((title, href))
subitems = getattr(item, "subitems", None)
if subitems:
_walk(subitems)
elif isinstance(item, (list, tuple)):
_walk(item)
_walk(entries)
return flattened
def _collect_toc_candidates(
spine_lookup: dict[Path, SpineItem],
toc_entries: list[tuple[str, str]],
keywords: Iterable[str],
) -> tuple[dict[Path, SkipCandidate], list[str]]:
candidates: dict[Path, SkipCandidate] = {}
unmatched_titles: list[str] = []
total_entries = len(toc_entries)
seen_titles: set[str] = set()
for index, (title, href) in enumerate(toc_entries):
href_path = Path(href.split("#", 1)[0])
spine_item = spine_lookup.get(href_path)
if spine_item is None:
continue
normalized_title = _normalize_text(title)
keyword = _match_keyword(normalized_title, keywords)
if keyword:
candidates[href_path] = SkipCandidate(
file_path=href_path,
spine_index=spine_item.index,
reason=keyword,
source="toc",
)
else:
if (
normalized_title
and normalized_title not in seen_titles
and not any(ch.isdigit() for ch in normalized_title)
and "chapter" not in normalized_title
and "part" not in normalized_title
and (index < TOC_FRONT_SAMPLE or index >= max(total_entries - TOC_BACK_SAMPLE, 0))
):
unmatched_titles.append(normalized_title)
seen_titles.add(normalized_title)
return candidates, unmatched_titles
def _apply_skip_after_logic(
candidates: dict[Path, SkipCandidate],
spine_lookup: dict[Path, SpineItem],
toc_entries: list[tuple[str, str]],
settings: AppSettings,
) -> dict[Path, SkipCandidate]:
"""
Apply cascade skipping after back-matter triggers.
When a back-matter section (index, notes, bibliography, etc.) is found
in the last portion of the TOC, skip all subsequent spine items.
This prevents processing hundreds of continuation pages for indexes
and endnotes that are split across many HTML files.
Args:
candidates: Existing skip candidates from TOC matching
spine_lookup: Map of href to SpineItem for all spine items
toc_entries: Flattened list of (title, href) from TOC
settings: App settings with cascade skip configuration
Returns:
Updated candidates dictionary with cascade skip entries added
"""
if not settings.skip_after_back_matter:
return candidates
total_entries = len(toc_entries)
threshold_index = int(total_entries * settings.back_matter_threshold)
trigger_spine_index = None
trigger_keyword = None
# Find earliest back-matter trigger in last portion of TOC
for index, (title, href) in enumerate(toc_entries):
if index < threshold_index:
continue
normalized_title = _normalize_text(title)
keyword = _match_keyword(normalized_title, settings.back_matter_triggers)
if keyword:
href_path = Path(href.split("#", 1)[0])
spine_item = spine_lookup.get(href_path)
if spine_item:
trigger_spine_index = spine_item.index
trigger_keyword = keyword
break
# If trigger found, mark all subsequent spine items for cascade skipping
if trigger_spine_index is not None:
for path, item in spine_lookup.items():
if item.index > trigger_spine_index and path not in candidates:
candidates[path] = SkipCandidate(
file_path=path,
spine_index=item.index,
reason=f"after {trigger_keyword}",
source="cascade",
)
return candidates
def analyze_skip_candidates(epub_path: Path, settings: AppSettings) -> SkipAnalysis:
reader = EpubReader(epub_path, settings)
keywords = [rule.keyword for rule in settings.skip_rules]
spine_lookup = {item.href: item for item in iter_spine_items(reader.book)}
toc_entries = _flatten_toc_entries(getattr(reader.book, "toc", []))
toc_candidates, unmatched_titles = _collect_toc_candidates(spine_lookup, toc_entries, keywords)
# Only use TOC-based skip detection, not filename/content-based
# Filenames are arbitrary technical artifacts and can cause false positives
# (e.g., "index_split_000.html" is main content, not an index page)
skip_candidates: dict[Path, SkipCandidate] = dict(toc_candidates)
# Apply cascade skipping after back-matter triggers
skip_candidates = _apply_skip_after_logic(skip_candidates, spine_lookup, toc_entries, settings)
ordered_candidates = sorted(
skip_candidates.values(), key=lambda c: (c.spine_index, c.file_path.as_posix())
)
return SkipAnalysis(candidates=ordered_candidates, toc_unmatched_titles=unmatched_titles)
def collect_skip_candidates(epub_path: Path, settings: AppSettings) -> list[SkipCandidate]:
analysis = analyze_skip_candidates(epub_path, settings)
return analysis.candidates
def build_skip_map(
epub_path: Path, settings: AppSettings, *, interactive: bool = False
) -> dict[Path, SkipCandidate]:
candidates = collect_skip_candidates(epub_path, settings)
skip_map: dict[Path, SkipCandidate] = {}
for candidate in candidates:
skip = candidate.flagged
if interactive:
skip = Confirm.ask(
f"Skip {candidate.file_path.as_posix()}? reason={candidate.reason}",
default=True,
)
candidate.flagged = skip
skip_map[candidate.file_path] = candidate
return skip_map
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/epub_io/toc_utils.py | Python | """Utilities for parsing EPUB table of contents (TOC)."""
from __future__ import annotations
from epub_io.reader import EpubReader
def parse_toc_to_dict(reader: EpubReader) -> dict[str, str]:
"""Extract TOC titles mapped by document href.
Parses the EPUB's table of contents and creates a mapping from
document hrefs (without fragments) to their titles.
Args:
reader: EpubReader instance with loaded EPUB
Returns:
Dictionary mapping href (without fragment) to title.
For example: {"chapter1.xhtml": "Chapter 1: Introduction"}
Examples:
>>> reader = EpubReader(epub_path, settings)
>>> toc_map = parse_toc_to_dict(reader)
>>> toc_map.get("intro.xhtml")
"Introduction"
"""
mapping: dict[str, str] = {}
def recurse(entries):
"""Recursively traverse TOC entries."""
for item in entries:
# Handle direct Link objects
if hasattr(item, "href") and hasattr(item, "title"):
href = item.href.split("#", 1)[0] # Remove fragment
mapping[href] = item.title or mapping.get(href, "")
# Handle nested tuple/list structure (older EpubPy format)
elif isinstance(item, (list, tuple)) and item:
head = item[0]
if hasattr(head, "href") and hasattr(head, "title"):
href = head.href.split("#", 1)[0]
mapping[href] = head.title or mapping.get(href, "")
# Recurse into children if they exist
if len(item) > 1:
recurse(item[1])
toc = reader.book.toc or []
recurse(toc)
return mapping
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/epub_io/writer.py | Python | from __future__ import annotations
from pathlib import Path, PurePosixPath
from ebooklib import ITEM_STYLE, epub
from .resources import get_item_by_href, load_book
TRANSLATED_ONLY_CSS = """
[data-lang=\"original\"] {
display: none !important;
}
[data-lang=\"translation\"] {
display: block !important;
}
"""
def write_updated_epub(
input_epub: Path,
output_epub: Path,
updated_html: dict[Path, bytes],
*,
toc_updates: dict[PurePosixPath, dict[str | None, str]] | None = None,
css_mode: str = "bilingual",
) -> None:
book = load_book(input_epub)
for href, content in updated_html.items():
item = get_item_by_href(book, href)
item.set_content(content)
if toc_updates and css_mode == "translated_only":
_rewrite_toc_titles(book, toc_updates)
if css_mode == "translated_only":
_append_translated_only_css(book)
output_epub.parent.mkdir(parents=True, exist_ok=True)
epub.write_epub(str(output_epub), book)
def _append_translated_only_css(book: epub.EpubBook) -> None:
for item in book.get_items_of_type(ITEM_STYLE):
content = item.get_content().decode("utf-8")
if '[data-lang="original"]' in content:
return
content = f"{content}\n\n{TRANSLATED_ONLY_CSS.strip()}\n"
item.set_content(content.encode("utf-8"))
return
def _rewrite_toc_titles(
book: epub.EpubBook, toc_updates: dict[PurePosixPath, dict[str | None, str]]
) -> None:
def recurse(entries):
for entry in entries:
if isinstance(entry, epub.Link):
path, fragment = _split_href(entry.href)
title = _lookup_title(toc_updates, path, fragment)
if title:
entry.title = title
elif isinstance(entry, (list, tuple)):
recurse(entry)
recurse(book.toc)
def _split_href(href: str | None) -> tuple[PurePosixPath, str | None]:
if not href:
return PurePosixPath(""), None
if "#" in href:
path_part, fragment = href.split("#", 1)
else:
path_part, fragment = href, None
return PurePosixPath(path_part), fragment
def _lookup_title(
toc_updates: dict[PurePosixPath, dict[str | None, str]],
path: PurePosixPath,
fragment: str | None,
) -> str | None:
updates = toc_updates.get(path)
if not updates:
return None
if fragment and fragment in updates:
return updates[fragment]
return updates.get(None)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/exceptions.py | Python | """Custom exceptions for TEPUB application."""
from __future__ import annotations
from pathlib import Path
class TepubError(Exception):
"""Base exception for all TEPUB errors."""
pass
class StateFileNotFoundError(TepubError):
"""Raised when a required state file is missing.
This provides user-friendly error messages with actionable next steps.
"""
def __init__(self, state_type: str, epub_path: Path):
"""Initialize error with context.
Args:
state_type: Type of state file ("segments" or "translation")
epub_path: Path to the EPUB file being processed
"""
self.state_type = state_type
self.epub_path = epub_path
super().__init__(self._get_message())
def _get_message(self) -> str:
"""Generate user-friendly error message based on state type."""
if self.state_type == "segments":
return (
f"No extraction state found for '{self.epub_path.name}'.\n"
f"Please run: tepub extract '{self.epub_path}'"
)
elif self.state_type == "translation":
return (
f"No translation state found for '{self.epub_path.name}'.\n"
f"Please run the following commands first:\n"
f" 1. tepub extract '{self.epub_path}'\n"
f" 2. tepub translate '{self.epub_path}'"
)
return f"State file '{self.state_type}' not found."
class WorkspaceNotFoundError(TepubError):
"""Raised when workspace directory doesn't exist for an EPUB."""
def __init__(self, epub_path: Path, work_dir: Path):
"""Initialize error with context.
Args:
epub_path: Path to the EPUB file
work_dir: Expected workspace directory path
"""
self.epub_path = epub_path
self.work_dir = work_dir
message = (
f"No workspace found for '{epub_path.name}'.\n"
f"Expected workspace at: {work_dir}\n"
f"Please run: tepub extract '{epub_path}'"
)
super().__init__(message)
class CorruptedStateError(TepubError):
"""Raised when a state file is corrupted or has invalid format."""
def __init__(self, file_path: Path, state_type: str, reason: str):
"""Initialize error with context.
Args:
file_path: Path to the corrupted file
state_type: Type of state file (e.g., "state", "segments")
reason: Specific reason for corruption
"""
self.file_path = file_path
self.state_type = state_type
self.reason = reason
message = (
f"State file is corrupted: {file_path.name}\n"
f"Reason: {reason}\n"
f"This may indicate file corruption or incompatibility.\n"
f"You may need to re-run: tepub extract"
)
super().__init__(message)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/extraction/cleaners.py | Python | from __future__ import annotations
import re
from lxml import html
def strip_spans_and_links(fragment: str) -> str:
element = html.fragment_fromstring(fragment, create_parent=True)
for node in list(element.iter()):
if isinstance(node.tag, str) and node.tag.lower() in {"span", "a"}:
node.drop_tag()
return "".join(
html.tostring(child, encoding="unicode") if isinstance(child.tag, str) else child
for child in element
)
def normalize_whitespace(text: str) -> str:
return " ".join(text.split())
def normalize_punctuation(text: str) -> str:
"""Normalize punctuation patterns for consistency.
Handles common typographical variations:
- Spaced ellipsis (. . . or . . . .) → ...
- Ensures single space after ellipsis when followed by text
Args:
text: Text to normalize
Returns:
Text with normalized punctuation
"""
# Replace spaced dots (. . . or . . . .) with standard ellipsis
text = re.sub(r"\.\s+\.\s+\.(?:\s+\.)*", "...", text)
# Ensure exactly one space after ellipsis when followed by non-whitespace
text = re.sub(r"\.\.\.\s*(?=\S)", "... ", text)
return text
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/extraction/epub_export.py | Python | """Extract complete EPUB internal structure to workspace."""
from __future__ import annotations
import zipfile
from pathlib import Path
from console_singleton import get_console
console = get_console()
def extract_epub_structure(
input_epub: Path,
output_dir: Path,
preserve_structure: bool = True,
) -> dict[str, Path]:
"""
Extract all files from EPUB maintaining directory structure.
This function extracts the complete internal structure of an EPUB file,
preserving the original directory hierarchy (META-INF/, OEBPS/, etc.).
This is useful for:
- Inspecting original HTML/CSS/metadata
- Debugging translation issues
- Advanced custom processing
- Re-packaging EPUBs
Args:
input_epub: Path to the source EPUB file
output_dir: Directory where EPUB contents will be extracted
preserve_structure: If True, maintains original directory structure
Returns:
Dictionary mapping internal EPUB paths to extracted file paths
Example: {"OEBPS/text00000.html": Path("/workspace/epub_raw/OEBPS/text00000.html")}
Raises:
FileNotFoundError: If input_epub doesn't exist
zipfile.BadZipFile: If input_epub is not a valid ZIP/EPUB file
Example:
>>> mapping = extract_epub_structure(
... Path("book.epub"),
... Path("workspace/epub_raw")
... )
>>> print(mapping["OEBPS/content.opf"])
PosixPath('workspace/epub_raw/OEBPS/content.opf')
"""
if not input_epub.exists():
raise FileNotFoundError(f"EPUB file not found: {input_epub}")
output_dir.mkdir(parents=True, exist_ok=True)
mapping: dict[str, Path] = {}
try:
with zipfile.ZipFile(input_epub, "r") as epub_zip:
# Get list of all files in the EPUB
file_list = epub_zip.namelist()
for internal_path in file_list:
# Skip directories (they end with /)
if internal_path.endswith("/"):
continue
# Determine output path
if preserve_structure:
# Preserve full directory structure
output_path = output_dir / internal_path
else:
# Flatten to single directory
filename = Path(internal_path).name
output_path = output_dir / filename
# Create parent directories if needed
output_path.parent.mkdir(parents=True, exist_ok=True)
# Extract file
with epub_zip.open(internal_path) as source:
output_path.write_bytes(source.read())
# Store mapping
mapping[internal_path] = output_path
except zipfile.BadZipFile as e:
raise zipfile.BadZipFile(f"Invalid EPUB/ZIP file: {input_epub}") from e
return mapping
def get_epub_metadata_files(mapping: dict[str, Path]) -> dict[str, Path]:
"""
Extract key metadata files from the structure mapping.
Args:
mapping: Dictionary from extract_epub_structure()
Returns:
Dictionary with standardized keys:
- 'mimetype': Path to mimetype file
- 'container': Path to META-INF/container.xml
- 'opf': Path to content.opf (package document)
- 'ncx': Path to toc.ncx (navigation)
Example:
>>> mapping = extract_epub_structure(epub_path, output_dir)
>>> metadata = get_epub_metadata_files(mapping)
>>> print(metadata['opf'])
PosixPath('workspace/epub_raw/OEBPS/content.opf')
"""
result: dict[str, Path] = {}
for internal_path, extracted_path in mapping.items():
internal_lower = internal_path.lower()
if internal_path == "mimetype":
result["mimetype"] = extracted_path
elif "container.xml" in internal_lower:
result["container"] = extracted_path
elif internal_lower.endswith(".opf"):
result["opf"] = extracted_path
elif internal_lower.endswith(".ncx"):
result["ncx"] = extracted_path
return result
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/extraction/image_export.py | Python | from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from config import AppSettings
from console_singleton import get_console
from epub_io.reader import EpubReader
console = get_console()
@dataclass
class ImageInfo:
"""Information about an extracted image."""
epub_path: Path # Original path in EPUB
extracted_path: Path # Path where image was extracted
is_cover_candidate: bool = False
# Supported image extensions
IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".svg", ".bmp"}
def _is_image_item(item) -> bool:
"""Check if an EPUB item is an image."""
if hasattr(item, "media_type"):
return item.media_type.startswith("image/")
if hasattr(item, "file_name"):
ext = Path(item.file_name).suffix.lower()
return ext in IMAGE_EXTENSIONS
return False
def _is_potential_cover(file_path: Path, is_first_spine_image: bool) -> bool:
"""Determine if an image is likely a cover candidate."""
name_lower = file_path.name.lower()
# Check filename patterns
if "cover" in name_lower:
return True
if "title" in name_lower:
return True
# First image in spine is often the cover
if is_first_spine_image:
return True
return False
def extract_images(
settings: AppSettings,
input_epub: Path,
output_dir: Path,
) -> list[ImageInfo]:
"""
Extract all images from EPUB to output directory.
Args:
settings: Application settings
input_epub: Path to source EPUB
output_dir: Directory where images will be saved (typically {markdown_dir}/images)
Returns:
List of ImageInfo objects with extraction details
"""
reader = EpubReader(input_epub, settings)
output_dir.mkdir(parents=True, exist_ok=True)
extracted_images: list[ImageInfo] = []
seen_first_spine_image = False
# Extract all image items from EPUB
for item in reader.book.get_items():
if not _is_image_item(item):
continue
epub_path = Path(item.file_name)
# Generate output filename (preserve original name, handle duplicates)
output_filename = epub_path.name
output_path = output_dir / output_filename
# Handle duplicate filenames by adding a counter
counter = 1
while output_path.exists():
stem = epub_path.stem
suffix = epub_path.suffix
output_filename = f"{stem}_{counter}{suffix}"
output_path = output_dir / output_filename
counter += 1
# Write image file
try:
content = item.get_content()
output_path.write_bytes(content)
# Check if this could be a cover
is_cover_candidate = _is_potential_cover(epub_path, not seen_first_spine_image)
if not seen_first_spine_image:
seen_first_spine_image = True
extracted_images.append(
ImageInfo(
epub_path=epub_path,
extracted_path=output_path,
is_cover_candidate=is_cover_candidate,
)
)
except Exception as e:
console.print(f"[yellow]Warning: Failed to extract image {epub_path}: {e}[/yellow]")
continue
return extracted_images
def get_image_mapping(extracted_images: list[ImageInfo]) -> dict[str, str]:
"""
Create mapping from EPUB image paths to extracted filenames.
Args:
extracted_images: List of ImageInfo objects
Returns:
Dictionary mapping EPUB path (as posix string) to extracted filename
"""
mapping = {}
for img_info in extracted_images:
epub_key = img_info.epub_path.as_posix()
extracted_name = img_info.extracted_path.name
mapping[epub_key] = extracted_name
return mapping
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/extraction/markdown_export.py | Python | from __future__ import annotations
import posixpath
import re
from dataclasses import dataclass
from pathlib import Path, PurePosixPath
import html2text
from lxml import html as lxml_html
from config import AppSettings
from console_singleton import get_console
from epub_io.reader import EpubReader
from epub_io.resources import iter_spine_items
from epub_io.toc_utils import parse_toc_to_dict
from epub_io.path_utils import normalize_epub_href
from state.models import Segment
from state.store import load_segments
console = get_console()
@dataclass
class ChapterBlock:
"""Represents a logical chapter grouping multiple spine files."""
title: str # TOC entry title
spine_start: int # First spine index in this block
spine_end: int # Last spine index (exclusive) - start of next block
files: list[Path] # All file paths in this block, sorted by spine
toc_href: str # Original TOC href for reference
def _build_chapter_blocks(
reader: EpubReader, segments_by_file: dict[Path, list[Segment]]
) -> list[ChapterBlock]:
"""
Group spine files into chapter blocks based on TOC structure.
Smart grouping logic:
- If multiple spine files exist between two TOC entries, group them
- If one TOC entry = one spine file, keep as single block
- Files before first TOC entry are kept as individual blocks
- Files after last TOC entry are grouped as one block
Args:
reader: EPUB reader with access to TOC and spine
segments_by_file: Segments grouped by file path
Returns:
List of ChapterBlock objects, one per logical chapter/section
"""
# Build spine index lookup: Path -> spine_index
spine_lookup: dict[Path, int] = {}
for spine_item in iter_spine_items(reader.book):
spine_lookup[spine_item.href] = spine_item.index
# Get max spine index
max_spine_index = max(spine_lookup.values()) if spine_lookup else 0
# Extract TOC entries with their spine indices
toc_entries: list[tuple[str, int, str]] = [] # (title, spine_index, href)
def collect_toc_entries(entries):
for item in entries:
if hasattr(item, "href") and hasattr(item, "title"):
href = item.href.split("#", 1)[0]
href_path = Path(href)
if href_path in spine_lookup:
spine_idx = spine_lookup[href_path]
title = item.title or href
toc_entries.append((title, spine_idx, href))
elif isinstance(item, (list, tuple)) and item:
head = item[0] if item else None
if head and hasattr(head, "href") and hasattr(head, "title"):
href = head.href.split("#", 1)[0]
href_path = Path(href)
if href_path in spine_lookup:
spine_idx = spine_lookup[href_path]
title = head.title or href
toc_entries.append((title, spine_idx, href))
if len(item) > 1:
collect_toc_entries(item[1])
toc = reader.book.toc or []
collect_toc_entries(toc)
# Sort by spine index
toc_entries.sort(key=lambda x: x[1])
# Build chapter blocks
blocks: list[ChapterBlock] = []
for i, (title, spine_start, href) in enumerate(toc_entries):
# Determine end of this block (start of next TOC entry or end of spine)
if i + 1 < len(toc_entries):
spine_end = toc_entries[i + 1][1]
else:
spine_end = max_spine_index + 1
# Collect all files in spine range [spine_start, spine_end)
block_files: list[tuple[Path, int]] = [] # (path, spine_index)
for file_path, segments in segments_by_file.items():
if segments:
file_spine_idx = segments[0].metadata.spine_index
if spine_start <= file_spine_idx < spine_end:
block_files.append((file_path, file_spine_idx))
# Sort by spine index
block_files.sort(key=lambda x: x[1])
files_only = [path for path, _ in block_files]
if files_only:
blocks.append(
ChapterBlock(
title=title,
spine_start=spine_start,
spine_end=spine_end,
files=files_only,
toc_href=href,
)
)
# Handle files before first TOC entry (front matter)
if toc_entries and segments_by_file:
first_toc_spine = min(entry[1] for entry in toc_entries)
front_matter_files: list[tuple[Path, int]] = []
for file_path, segments in segments_by_file.items():
if segments:
file_spine_idx = segments[0].metadata.spine_index
if file_spine_idx < first_toc_spine:
front_matter_files.append((file_path, file_spine_idx))
if front_matter_files:
# Sort and add as individual blocks (they don't belong to a chapter)
front_matter_files.sort(key=lambda x: x[1])
for file_path, spine_idx in front_matter_files:
# Use filename as title for front matter
title = file_path.stem.replace("_", " ").title()
blocks.insert(
0,
ChapterBlock(
title=title,
spine_start=spine_idx,
spine_end=spine_idx + 1,
files=[file_path],
toc_href=file_path.as_posix(),
),
)
# Sort all blocks by spine index
blocks.sort(key=lambda b: b.spine_start)
return blocks
def _sanitize_filename(title: str, max_length: int = 50) -> str:
"""Convert title to safe filename component."""
# Remove or replace unsafe characters
safe = re.sub(r'[<>:"/\\|?*!]', "", title)
safe = re.sub(r"\s+", "-", safe.strip())
safe = safe.lower()
# Remove leading/trailing hyphens
safe = safe.strip("-")
# Limit length
if len(safe) > max_length:
safe = safe[:max_length].rstrip("-")
return safe or "untitled"
def _html_to_markdown(
html_content: str,
document_path: Path,
image_mapping: dict[str, str],
) -> str:
"""
Convert HTML content to markdown, preserving formatting and images.
Args:
html_content: HTML content to convert
document_path: Path of the document containing this content (for resolving image refs)
image_mapping: Mapping from EPUB image paths to extracted filenames
Returns:
Markdown formatted text with image references
"""
try:
# Configure html2text
h = html2text.HTML2Text()
h.body_width = 0 # Don't wrap lines
h.ignore_links = False # Preserve links
h.ignore_images = False # Preserve images
h.ignore_emphasis = False # Preserve bold/italic
h.mark_code = True # Mark code blocks
h.protect_links = True # Don't alter link text
h.single_line_break = False # Use double line breaks for paragraphs
# Convert HTML to markdown
markdown = h.handle(html_content)
# Post-process: fix image paths to use images/ directory
# Parse to find image references and replace with correct paths
tree = lxml_html.fromstring(f"<div>{html_content}</div>")
for img in tree.xpath(".//img | .//image"):
src = img.get("src") or img.get("href") or img.get("{http://www.w3.org/1999/xlink}href")
if src:
normalized_path = normalize_epub_href(document_path, src)
if normalized_path and normalized_path in image_mapping:
extracted_name = image_mapping[normalized_path]
# Replace the path in markdown
# html2text converts <img src="path"> to 
markdown = markdown.replace(f"]({src})", f"](images/{extracted_name})")
# Also handle URL-encoded or relative variations
markdown = markdown.replace(
f"]({normalized_path})", f"](images/{extracted_name})"
)
return markdown.strip()
except Exception as e:
# Fallback to simple tag stripping
console.print(f"[yellow]Warning: HTML to markdown conversion failed: {e}[/yellow]")
return re.sub(r"<[^>]+>", "", html_content)
def export_to_markdown(
settings: AppSettings,
input_epub: Path,
output_dir: Path,
image_mapping: dict[str, str] | None = None,
) -> list[Path]:
"""
Export extracted segments to numbered markdown files.
Uses smart chapter grouping: multiple spine files between TOC entries
are combined into a single markdown file per chapter.
Args:
settings: Application settings
input_epub: Path to source EPUB
output_dir: Directory where markdown files will be written
image_mapping: Optional mapping from EPUB image paths to extracted filenames
Returns:
List of created markdown file paths
"""
segments_doc = load_segments(settings.segments_file)
reader = EpubReader(input_epub, settings)
img_map = image_mapping or {}
# Group segments by file path
by_file: dict[Path, list[Segment]] = {}
for segment in segments_doc.segments:
if segment.file_path not in by_file:
by_file[segment.file_path] = []
by_file[segment.file_path].append(segment)
# Sort segments within each file
for file_path in by_file:
by_file[file_path].sort(key=lambda s: s.metadata.order_in_file)
# Build chapter blocks for smart grouping
blocks = _build_chapter_blocks(reader, by_file)
output_dir.mkdir(parents=True, exist_ok=True)
created_files: list[Path] = []
for idx, block in enumerate(blocks, start=1):
# Use chapter title for filename
safe_title = _sanitize_filename(block.title)
md_filename = f"{idx:03d}_{safe_title}.md"
md_path = output_dir / md_filename
# Build markdown content from all files in this block
lines = [f"# {block.title}", ""]
# Process all files in the block
for file_path in block.files:
segments = by_file.get(file_path, [])
for segment in segments:
content = segment.source_content or ""
if not content.strip():
continue
# Convert HTML to markdown, preserving images
text = _html_to_markdown(content, file_path, img_map)
if text.strip():
lines.append(text)
lines.append("")
# Write file
md_content = "\n".join(lines)
md_path.write_text(md_content, encoding="utf-8")
created_files.append(md_path)
return created_files
def export_combined_markdown(
settings: AppSettings,
input_epub: Path,
output_dir: Path,
image_mapping: dict[str, str] | None = None,
) -> Path:
"""
Export all segments to a single combined markdown file.
Uses smart chapter grouping: multiple spine files between TOC entries
are combined under a single ## heading per chapter.
Args:
settings: Application settings
input_epub: Path to source EPUB
output_dir: Directory where markdown file will be written
image_mapping: Optional mapping from EPUB image paths to extracted filenames
Returns:
Path to the created combined markdown file
"""
segments_doc = load_segments(settings.segments_file)
reader = EpubReader(input_epub, settings)
img_map = image_mapping or {}
# Group segments by file path
by_file: dict[Path, list[Segment]] = {}
for segment in segments_doc.segments:
if segment.file_path not in by_file:
by_file[segment.file_path] = []
by_file[segment.file_path].append(segment)
# Sort segments within each file
for file_path in by_file:
by_file[file_path].sort(key=lambda s: s.metadata.order_in_file)
# Build chapter blocks for smart grouping
blocks = _build_chapter_blocks(reader, by_file)
output_dir.mkdir(parents=True, exist_ok=True)
# Use EPUB filename (without extension) for combined markdown
combined_filename = f"{input_epub.stem}.md"
combined_path = output_dir / combined_filename
# Build combined content
all_lines = []
# Add book title
book_title = input_epub.stem
all_lines.append(f"# {book_title}")
all_lines.append("")
all_lines.append("---")
all_lines.append("")
# Add each chapter block
for idx, block in enumerate(blocks, start=1):
# Add chapter heading (one per block, not per file)
all_lines.append(f"## {block.title}")
all_lines.append("")
# Add content from all files in this block
for file_path in block.files:
segments = by_file.get(file_path, [])
for segment in segments:
content = segment.source_content or ""
if not content.strip():
continue
# Convert HTML to markdown, preserving images
text = _html_to_markdown(content, file_path, img_map)
if text.strip():
all_lines.append(text)
all_lines.append("")
# Add separator between chapters (except after last chapter)
if idx < len(blocks):
all_lines.append("---")
all_lines.append("")
# Write combined file
combined_content = "\n".join(all_lines)
combined_path.write_text(combined_content, encoding="utf-8")
return combined_path
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/extraction/pipeline.py | Python | from __future__ import annotations
from datetime import datetime, timezone
from pathlib import Path
from rich.progress import Progress
from config import AppSettings
from console_singleton import get_console
from epub_io.reader import EpubReader
from epub_io.resources import extract_metadata
from epub_io.selector import build_skip_map
from state.models import Segment, SegmentsDocument, SkippedDocument, build_default_state
from state.store import ensure_state, load_state, save_segments, save_state
console = get_console()
from .segments import iter_segments
def _audit_extraction(settings: AppSettings, input_epub: Path, segments: list[Segment]) -> None:
console.print("[cyan]Let me double-check the extraction output…[/cyan]")
total_segments = len(segments)
unique_ids = {seg.segment_id for seg in segments}
if len(unique_ids) != total_segments:
console.print(
f"[yellow]Warning: Found duplicate segment IDs. Total {total_segments}, unique {len(unique_ids)}.[/yellow]"
)
state = load_state(settings.state_file)
missing = [seg.segment_id for seg in segments if seg.segment_id not in state.segments]
extra = [seg_id for seg_id in state.segments if seg_id not in unique_ids]
if missing or extra:
console.print(
"[yellow]Some segment statuses are out of sync; mending those omissions or errors…[/yellow]"
)
ensure_state(
settings.state_file,
segments,
provider=settings.primary_provider.name,
model=settings.primary_provider.model,
source_language=state.source_language,
target_language=state.target_language,
force_reset=True,
)
state = load_state(settings.state_file)
missing = [seg.segment_id for seg in segments if seg.segment_id not in state.segments]
extra = [seg_id for seg_id in state.segments if seg_id not in unique_ids]
if not missing and not extra:
console.print("[green]Audit mended the state file successfully.[/green]")
else:
console.print(
"[red]Audit still found mismatches after repair. Consider re-running extraction.[/red]"
)
console.print(
f"[cyan]Segments total: {total_segments}; unique IDs: {len(unique_ids)}; state entries: {len(state.segments)}.[/cyan]"
)
def run_extraction(settings: AppSettings, input_epub: Path) -> None:
work_dir = settings.work_dir
work_dir.mkdir(parents=True, exist_ok=True)
reader = EpubReader(input_epub, settings)
skip_map = build_skip_map(input_epub, settings, interactive=False)
skipped_documents: list[SkippedDocument] = []
segments: list[Segment] = []
with Progress() as progress:
task = progress.add_task("Extracting", total=None)
for document in reader.iter_documents():
file_path = document.path
if not document.spine_item.linear:
continue
decision = skip_map.get(file_path)
skip_reason = None
skip_source = None
if decision and decision.flagged:
# Track skipped files for reporting, but still extract segments
skipped_documents.append(
SkippedDocument(
file_path=file_path,
reason=decision.reason,
source=decision.source,
)
)
skip_reason = decision.reason
skip_source = decision.source
for segment in iter_segments(
document.tree, file_path=file_path, spine_index=document.spine_item.index
):
# Tag segments with skip metadata if file is flagged
segment.skip_reason = skip_reason
segment.skip_source = skip_source
segments.append(segment)
progress.advance(task)
# Extract book metadata
metadata = extract_metadata(reader.book)
timestamp = datetime.now(timezone.utc).isoformat()
segments_doc = SegmentsDocument(
epub_path=input_epub,
generated_at=timestamp,
segments=segments,
skipped_documents=skipped_documents,
book_title=metadata.get("title"),
book_author=metadata.get("author"),
book_publisher=metadata.get("publisher"),
book_year=metadata.get("year"),
)
segments_path = settings.segments_file
segments_path.parent.mkdir(parents=True, exist_ok=True)
save_segments(segments_doc, segments_path)
state_path = settings.state_file
state_path.parent.mkdir(parents=True, exist_ok=True)
state_doc = build_default_state(
segments,
provider=settings.primary_provider.name,
model=settings.primary_provider.model,
)
save_state(state_doc, state_path)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/extraction/segments.py | Python | from __future__ import annotations
import hashlib
from collections.abc import Iterator
from copy import deepcopy
from itertools import count
from pathlib import Path
from lxml import html
from extraction.cleaners import normalize_punctuation
from state.models import ExtractMode, Segment, SegmentMetadata
SIMPLE_TAGS = {"p", "blockquote", "div", *{f"h{i}" for i in range(1, 7)}}
ATOMIC_TAGS = {"ul", "ol", "dl", "table", "figure"}
# Tags that require smart extraction (skip structural wrappers)
# For these tags, only extract if element has text at its own level
# or is a leaf node (no same-tag descendants)
SMART_EXTRACT_TAGS = {"blockquote", "div"}
BLOCK_LEVEL_TAGS = {
"address",
"article",
"aside",
"blockquote",
"div",
"dl",
"figure",
"figcaption",
"footer",
"form",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"header",
"hr",
"li",
"main",
"nav",
"ol",
"p",
"pre",
"section",
"table",
"ul",
}
def _normalize_tag(tag: str) -> str:
return tag.split("}")[-1].lower()
def _has_atomic_ancestor(element: html.HtmlElement) -> bool:
parent = element.getparent()
while parent is not None:
if _normalize_tag(parent.tag) in ATOMIC_TAGS:
return True
parent = parent.getparent()
return False
def _div_is_text_only(element: html.HtmlElement) -> bool:
if _normalize_tag(element.tag) != "div":
return False
return not any(
_normalize_tag(child.tag) in BLOCK_LEVEL_TAGS
for child in element
if isinstance(child.tag, str)
)
def _contains_descendant_with_tag(element: html.HtmlElement, tag: str) -> bool:
"""Check if element contains any descendant with the specified tag.
Used to identify leaf nodes in nested same-tag structures.
Args:
element: Element to check
tag: Tag name to search for (normalized)
Returns:
True if any descendant has the specified tag
"""
for descendant in element.iter():
if descendant is element: # Skip self
continue
if isinstance(descendant.tag, str) and _normalize_tag(descendant.tag) == tag:
return True
return False
def _has_direct_text_content(element: html.HtmlElement, exclude_tag: str) -> bool:
"""Check if element has text content at its own level.
Returns True if element would still have text after removing all
same-tag descendants. This identifies elements with meaningful content
vs. pure structural wrappers.
Example:
<blockquote>
Own text here ← has direct text: True
<blockquote>Child</blockquote>
</blockquote>
<blockquote>
<blockquote>Only child</blockquote>
</blockquote> ← has direct text: False
Args:
element: Element to check
exclude_tag: Tag to exclude when checking text (normalized)
Returns:
True if element has text outside same-tag descendants
"""
# Clone to avoid modifying original
clone = deepcopy(element)
# Remove all same-tag descendants
for desc in list(clone.iter()):
if desc is clone:
continue
if isinstance(desc.tag, str) and _normalize_tag(desc.tag) == exclude_tag:
# Remove the descendant
parent = desc.getparent()
if parent is not None:
parent.remove(desc)
# Check if remaining element has any text
text = clone.text_content().strip()
return bool(text)
def _extract_text(element: html.HtmlElement) -> str:
text = " ".join(element.text_content().split())
return normalize_punctuation(text)
def _clean_html_copy(element: html.HtmlElement) -> html.HtmlElement:
clone = html.fromstring(html.tostring(element, encoding="unicode"))
# Remove all attributes (id, class, style, etc.) except src for images
for node in clone.iter():
if isinstance(node.tag, str):
tag = _normalize_tag(node.tag)
if tag in {"img", "image"}:
# Preserve src attribute for images
src = (
node.get("src")
or node.get("href")
or node.get("{http://www.w3.org/1999/xlink}href")
)
node.attrib.clear()
if src:
node.set("src", src)
else:
node.attrib.clear()
for bad_tag in list(clone.iter()):
if isinstance(bad_tag.tag, str) and bad_tag.tag.lower() in {"span", "a", "font"}:
bad_tag.drop_tag()
return clone
def _extract_inner_html(element: html.HtmlElement) -> str:
clone = _clean_html_copy(element)
html_content = (clone.text or "") + "".join(
html.tostring(child, encoding="unicode") if isinstance(child.tag, str) else child
for child in clone
)
return html_content.strip()
def _build_segment_id(file_path: Path, xpath: str) -> str:
digest = hashlib.sha1(xpath.encode("utf-8")).hexdigest()[:12]
return f"{file_path.stem}-{digest}"
def iter_segments(
tree: html.HtmlElement,
file_path: Path,
spine_index: int,
) -> Iterator[Segment]:
order_counter = count(1)
root_tree = tree.getroottree()
for element in tree.iter():
if not isinstance(element.tag, str):
continue
tag = _normalize_tag(element.tag)
if tag in ATOMIC_TAGS:
if _has_atomic_ancestor(element):
continue
extract_mode = ExtractMode.HTML
elif tag in SIMPLE_TAGS:
if tag == "div" and not _div_is_text_only(element):
continue
# Smart extraction for tags that commonly have nested structures
# Skip if element is just a structural wrapper (no own text, only descendants)
if tag in SMART_EXTRACT_TAGS:
has_same_tag_descendants = _contains_descendant_with_tag(element, tag)
if has_same_tag_descendants:
# Has nested same-tag elements: only extract if this level has own text
if not _has_direct_text_content(element, tag):
continue # Skip - pure structural wrapper
extract_mode = ExtractMode.TEXT
else:
continue
xpath = root_tree.getpath(element)
segment_id = _build_segment_id(file_path, xpath)
order_idx = next(order_counter)
if extract_mode == ExtractMode.TEXT:
content = _extract_text(element)
if not content:
continue
else:
content = _extract_inner_html(element)
if not content:
continue
metadata = SegmentMetadata(
element_type=tag,
spine_index=spine_index,
order_in_file=order_idx,
)
yield Segment(
segment_id=segment_id,
file_path=file_path,
xpath=xpath,
extract_mode=extract_mode,
source_content=content,
metadata=metadata,
)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/injection/engine.py | Python | from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
from pathlib import Path, PurePosixPath
from lxml import etree, html
from config import AppSettings
from console_singleton import get_console
from epub_io.reader import EpubReader
from epub_io.writer import write_updated_epub
from logging_utils.logger import get_logger
from state.models import ExtractMode, Segment, SegmentStatus
from state.store import load_segments, load_state, save_state
from translation.polish import polish_if_chinese
from .html_ops import (
_set_html_content,
_set_text_only,
build_translation_element,
insert_translation_after,
prepare_original,
)
logger = get_logger(__name__)
console = get_console()
def _group_translated_segments(settings: AppSettings) -> dict[Path, list[tuple[Segment, str]]]:
segments_doc = load_segments(settings.segments_file)
state_doc = load_state(settings.state_file)
grouped: dict[Path, list[tuple[Segment, str]]] = defaultdict(list)
for segment in segments_doc.segments:
record = state_doc.segments.get(segment.segment_id)
if not record or record.status != SegmentStatus.COMPLETED or not record.translation:
continue
# Skip auto-copied segments (those not translated by AI provider)
if record.provider_name is None:
continue
grouped[segment.file_path].append((segment, record.translation))
return grouped
HEADING_TAGS = {"h1", "h2", "h3", "h4"}
def _restore_document_structure(document, raw_html: bytes) -> None:
try:
original_root = html.fromstring(raw_html)
except Exception: # pragma: no cover - malformed markup fallback
return
new_root = document.tree
original_head = original_root.find("head")
new_head = new_root.find("head")
if original_head is not None and new_head is not None:
new_head.attrib.clear()
new_head.attrib.update(original_head.attrib)
new_head[:] = [deepcopy(child) for child in original_head]
original_body = original_root.find("body")
new_body = new_root.find("body")
if original_body is not None and new_body is not None:
new_body.attrib.clear()
new_body.attrib.update(original_body.attrib)
def _apply_translations_to_document(
document,
segments: list[tuple[Segment, str]],
mode: str,
title_updates: defaultdict[PurePosixPath, dict[str | None, str]],
) -> tuple[bool, list[str]]:
updated = False
failed_ids: list[str] = []
tree = document.tree
root_tree = tree.getroottree()
for segment, translation in sorted(
segments,
key=lambda item: item[0].metadata.order_in_file,
reverse=True,
):
nodes = root_tree.xpath(segment.xpath)
if not nodes:
logger.warning("XPath not found for segment %s", segment.segment_id)
failed_ids.append(segment.segment_id)
continue
original = nodes[0]
if mode == "translated_only":
_replace_with_translation(original, segment, translation)
_record_heading_title(segment.file_path, original, title_updates)
else:
prepare_original(original)
translation_element = build_translation_element(original, segment, translation)
try:
insert_translation_after(original, translation_element)
except ValueError as exc:
logger.warning("Failed to insert translation for %s: %s", segment.segment_id, exc)
failed_ids.append(segment.segment_id)
continue
updated = True
return updated, failed_ids
def _replace_with_translation(
original: html.HtmlElement, segment: Segment, translation: str
) -> None:
original.attrib.pop("data-lang", None)
if segment.extract_mode == ExtractMode.TEXT:
_set_text_only(original, translation)
else:
_set_html_content(original, translation)
def _record_heading_title(
file_path: Path,
element: html.HtmlElement,
title_updates: defaultdict[PurePosixPath, dict[str | None, str]],
) -> None:
tag = (element.tag or "").lower()
if tag not in HEADING_TAGS:
return
text = element.text_content().strip()
if not text:
return
key = PurePosixPath(file_path.as_posix())
updates = title_updates[key]
element_id = element.get("id")
if element_id:
updates[element_id] = text
updates.setdefault(None, text)
def apply_translations(
settings: AppSettings,
input_epub: Path,
*,
mode: str | None = None,
) -> tuple[dict[Path, bytes], dict[PurePosixPath, dict[str | None, str]]]:
settings.ensure_directories()
polish_if_chinese(
settings.state_file,
settings.target_language,
load_fn=load_state,
save_fn=save_state,
console_print=console.print,
message_prefix="Before injection:",
)
console.print(f"[cyan]Preparing to inject translations into {input_epub}[/cyan]")
grouped = _group_translated_segments(settings)
if not grouped:
console.print("[yellow]No translated segments found. Run translation first.[/yellow]")
return {}, {}
total_segments = sum(len(segments) for segments in grouped.values())
console.print(
f"[cyan]Found {total_segments} translated segments across {len(grouped)} files.[/cyan]"
)
reader = EpubReader(input_epub, settings)
documents = {doc.path: doc for doc in reader.iter_documents()}
updated_html: dict[Path, bytes] = {}
effective_mode = mode or getattr(settings, "output_mode", "bilingual")
title_updates: defaultdict[PurePosixPath, dict[str | None, str]] = defaultdict(dict)
failed_segments: list[str] = []
missing_documents: list[Path] = []
for file_path, segments in grouped.items():
document = documents.get(file_path)
if not document:
logger.warning("Document %s missing from EPUB", file_path)
missing_documents.append(file_path)
continue
updated, failures = _apply_translations_to_document(
document, segments, effective_mode, title_updates
)
failed_segments.extend(failures)
if updated:
html_bytes = etree.tostring(document.tree, encoding="utf-8", method="html")
updated_html[file_path] = html_bytes
if missing_documents:
console.print(
f"[yellow]Skipped {len(missing_documents)} missing documents: {', '.join(str(p) for p in missing_documents)}[/yellow]"
)
if failed_segments:
console.print(
f"[yellow]Encountered {len(failed_segments)} segment insertion failures; see logs for details.[/yellow]"
)
_restore_document_structure(document, document.raw_html)
if effective_mode != "translated_only":
title_updates.clear()
return updated_html, {path: mapping for path, mapping in title_updates.items()}
def run_injection(
settings: AppSettings,
input_epub: Path,
output_epub: Path,
*,
mode: str = "bilingual",
) -> tuple[dict[Path, bytes], dict[PurePosixPath, dict[str | None, str]]]:
updated_html, title_updates = apply_translations(settings, input_epub, mode=mode)
if not updated_html:
return updated_html, title_updates
console.print(f"[cyan]Writing translated EPUB to {output_epub} (mode={mode})...[/cyan]")
try:
write_updated_epub(
input_epub,
output_epub,
updated_html,
toc_updates=title_updates,
css_mode=mode,
)
except Exception as exc: # pragma: no cover - filesystem errors
console.print(f"[red]Failed to write EPUB: {exc}[/red]")
raise
else:
console.print(
f"[green]Wrote translated EPUB to {output_epub} with {len(updated_html)} updated files.[/green]"
)
return updated_html, title_updates
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/injection/html_ops.py | Python | from __future__ import annotations
from lxml import html
from state.models import ExtractMode, Segment
def _clone_element(element: html.HtmlElement) -> html.HtmlElement:
return html.fragment_fromstring(html.tostring(element, encoding="unicode"))
def prepare_original(element: html.HtmlElement) -> None:
element.attrib["data-lang"] = "original"
def _set_text_only(element: html.HtmlElement, text: str) -> None:
element.text = text
for child in list(element):
element.remove(child)
def _set_html_content(element: html.HtmlElement, markup: str) -> None:
"""Set element's content from HTML markup string.
Clears element and populates with parsed HTML fragments.
Properly handles the fragment_fromstring wrapper to avoid tag leaks.
"""
element.clear()
if not markup:
return
# Parse HTML - fragment_fromstring with create_parent=True returns container
container = html.fragment_fromstring(f"<wrapper>{markup}</wrapper>", create_parent=True)
# Container has <wrapper> as first child - extract its contents
if len(container) > 0 and getattr(container[0], "tag", None) == "wrapper":
wrapper = container[0]
element.text = wrapper.text
for child in wrapper:
element.append(child)
else:
# Fallback: shouldn't happen but handle gracefully
element.text = container.text
for child in container:
element.append(child)
def build_translation_element(
original: html.HtmlElement, segment: Segment, translation: str
) -> html.HtmlElement:
clone = _clone_element(original)
clone.attrib["data-lang"] = "translation"
if segment.extract_mode == ExtractMode.TEXT:
_set_text_only(clone, translation)
else:
_set_html_content(clone, translation)
return clone
def insert_translation_after(
original: html.HtmlElement, translation_element: html.HtmlElement
) -> None:
parent = original.getparent()
if parent is None:
raise ValueError("Original element missing parent; cannot insert translation")
index = parent.index(original)
parent.insert(index + 1, translation_element)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/logging_utils/logger.py | Python | from __future__ import annotations
import logging
from rich.console import Console
from rich.logging import RichHandler
_RICH_HANDLER: RichHandler | None = None
def configure_logging(level: int = logging.INFO) -> None:
global _RICH_HANDLER
if _RICH_HANDLER is None:
console = Console(force_terminal=True)
handler = RichHandler(
console=console, show_time=False, show_path=False, rich_tracebacks=True
)
logging.basicConfig(level=level, handlers=[handler], format="%(message)s")
_RICH_HANDLER = handler
# Silence httpx HTTP request logs (used by OpenAI SDK)
logging.getLogger("httpx").setLevel(logging.WARNING)
else:
logging.getLogger().setLevel(level)
_RICH_HANDLER.setLevel(level)
def get_logger(name: str) -> logging.Logger:
configure_logging()
return logging.getLogger(name)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/state/base.py | Python | """Generic state management base for translation and audiobook state.
This module provides type-safe, reusable state operations that can be
shared across different state management systems in the application.
"""
from __future__ import annotations
import json
from collections.abc import Callable
from pathlib import Path
from typing import TypeVar
from pydantic import BaseModel, ValidationError
from exceptions import CorruptedStateError
try:
import portalocker
HAS_PORTALOCKER = True
except ImportError:
HAS_PORTALOCKER = False
# Generic type variable for state documents
TDocument = TypeVar("TDocument", bound=BaseModel)
def atomic_write(path: Path, payload: dict) -> None:
"""
Atomically write a dictionary to a JSON file with file locking.
Uses a temporary file with .tmp suffix to ensure atomicity.
The temporary file is written first, then atomically renamed to
the target path, preventing corruption on crashes or interrupts.
File locking prevents concurrent writes from multiple processes
corrupting the state file during parallel operations.
Args:
path: Target file path
payload: Dictionary to serialize as JSON
Example:
>>> atomic_write(Path("state.json"), {"key": "value"})
"""
tmp_path = path.with_suffix(path.suffix + ".tmp")
content = json.dumps(payload, indent=2, ensure_ascii=False)
if HAS_PORTALOCKER:
# Use file locking for concurrent write protection
with portalocker.Lock(tmp_path, "w", encoding="utf-8", timeout=30) as f:
f.write(content)
else:
# Fallback without locking (better than nothing)
tmp_path.write_text(content, encoding="utf-8")
tmp_path.replace(path)
def load_generic_state(path: Path, model_class: type[TDocument]) -> TDocument:
"""
Load and deserialize a state document from JSON file.
Args:
path: Path to JSON state file
model_class: Pydantic model class for deserialization
Returns:
Deserialized state document instance
Raises:
FileNotFoundError: If the file doesn't exist
json.JSONDecodeError: If the file contains invalid JSON
pydantic.ValidationError: If data doesn't match model schema
Example:
>>> from state.models import StateDocument
>>> state = load_generic_state(Path("state.json"), StateDocument)
"""
data = json.loads(path.read_text(encoding="utf-8"))
return model_class.model_validate(data)
def save_generic_state(document: TDocument, path: Path) -> None:
"""
Serialize and atomically save a state document to JSON file.
Args:
document: Pydantic model instance to save
path: Target file path
Example:
>>> from state.models import StateDocument
>>> state = StateDocument(segments={}, ...)
>>> save_generic_state(state, Path("state.json"))
"""
payload = json.loads(document.model_dump_json(indent=2))
atomic_write(path, payload)
def update_state_item(
state_path: Path,
model_class: type[TDocument],
updater: Callable[[TDocument], TDocument],
) -> TDocument:
"""
Load state, apply update function, and save atomically.
This is a generic read-modify-write operation that ensures
atomicity through file locking via atomic_write.
Args:
state_path: Path to state file
model_class: Pydantic model class for the state document
updater: Function that takes current state and returns updated state
Returns:
Updated state document
Example:
>>> def increment_counter(state: StateDocument) -> StateDocument:
... state.counter += 1
... return state
>>> updated = update_state_item(
... Path("state.json"),
... StateDocument,
... increment_counter
... )
"""
state = load_generic_state(state_path, model_class)
updated_state = updater(state)
save_generic_state(updated_state, state_path)
return updated_state
def safe_load_state(
path: Path,
model_class: type[TDocument],
state_type: str = "state",
) -> TDocument:
"""
Safely load state with graceful error handling.
Converts low-level errors (JSONDecodeError, ValidationError) into
user-friendly CorruptedStateError exceptions.
Args:
path: Path to state file
model_class: Pydantic model class for deserialization
state_type: Human-readable name for error messages
Returns:
Loaded state document
Raises:
FileNotFoundError: If file doesn't exist
CorruptedStateError: If file is corrupted or has invalid schema
"""
try:
return load_generic_state(path, model_class)
except json.JSONDecodeError as e:
raise CorruptedStateError(
path,
state_type,
f"Invalid JSON format (line {e.lineno}, column {e.colno})",
)
except ValidationError as e:
error_count = len(e.errors())
first_error = e.errors()[0]
field = ".".join(str(loc) for loc in first_error["loc"])
raise CorruptedStateError(
path,
state_type,
f"Schema validation failed: {field} - {first_error['msg']} ({error_count} error(s) total)",
)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/state/models.py | Python | from __future__ import annotations
from datetime import datetime
from enum import Enum
from pathlib import Path
from pydantic import BaseModel, Field
class ExtractMode(str, Enum):
TEXT = "text"
HTML = "html"
class SegmentStatus(str, Enum):
PENDING = "pending"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
SKIPPED = "skipped"
ERROR = "error"
class SegmentMetadata(BaseModel):
element_type: str
spine_index: int
order_in_file: int
notes: str | None = None
class Segment(BaseModel):
segment_id: str = Field(..., description="Deterministic identifier for matching original nodes")
file_path: Path = Field(..., description="Relative path to the XHTML file inside the EPUB")
xpath: str = Field(..., description="Absolute XPath to the element in the document")
extract_mode: ExtractMode
source_content: str = Field(..., description="Content extracted pre-translation")
metadata: SegmentMetadata
skip_reason: str | None = Field(None, description="Reason for skipping (e.g., 'cover', 'index')")
skip_source: str | None = Field(None, description="Source of skip decision (e.g., 'content', 'rule')")
class TranslationRecord(BaseModel):
model_config = {"protected_namespaces": ()}
segment_id: str
translation: str | None = None
provider_name: str | None = None
model_name: str | None = None
status: SegmentStatus = SegmentStatus.PENDING
error_message: str | None = None
class SkippedDocument(BaseModel):
file_path: Path
reason: str
source: str = "content"
class SegmentsDocument(BaseModel):
epub_path: Path
generated_at: str
segments: list[Segment]
skipped_documents: list[SkippedDocument] = Field(default_factory=list)
# Book metadata (optional, extracted from EPUB)
book_title: str | None = None
book_author: str | None = None
book_publisher: str | None = None
book_year: str | None = None
class StateDocument(BaseModel):
segments: dict[str, TranslationRecord] = Field(default_factory=dict)
current_provider: str | None = None
current_model: str | None = None
version: int = 1
source_language: str = "auto"
target_language: str = "Simplified Chinese"
consecutive_failures: int = 0
cooldown_until: datetime | None = None
class ResumeInfo(BaseModel):
remaining_segments: list[str]
completed_segments: list[str]
skipped_segments: list[str]
def build_default_state(segments: list[Segment], provider: str, model: str) -> StateDocument:
return StateDocument(
segments={
segment.segment_id: TranslationRecord(
segment_id=segment.segment_id,
status=SegmentStatus.PENDING,
)
for segment in segments
},
current_provider=provider,
current_model=model,
)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/state/resume.py | Python | from __future__ import annotations
from collections.abc import Iterable
from pathlib import Path
from .models import ResumeInfo, Segment, SegmentStatus, StateDocument
from .store import compute_resume_info, ensure_state, load_state, mark_status
def init_state_if_needed(
state_path: Path,
segments: Iterable[Segment],
provider_name: str,
model_name: str,
source_language: str,
target_language: str,
) -> StateDocument:
return ensure_state(
state_path,
segments,
provider_name,
model_name,
source_language,
target_language,
)
def load_resume_info(state_path: Path) -> ResumeInfo:
if not state_path.exists():
return ResumeInfo(remaining_segments=[], completed_segments=[], skipped_segments=[])
return compute_resume_info(load_state(state_path))
def reset_segment(state_path: Path, segment_id: str) -> None:
mark_status(state_path, segment_id, SegmentStatus.PENDING, translation=None, error_message=None)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/state/store.py | Python | from __future__ import annotations
import threading
from collections.abc import Iterable
from datetime import datetime
from pathlib import Path
from .base import load_generic_state, save_generic_state
from .models import (
ResumeInfo,
Segment,
SegmentsDocument,
SegmentStatus,
StateDocument,
TranslationRecord,
)
# Thread-safe state file operations
_state_file_locks: dict[str, threading.Lock] = {}
_locks_lock = threading.Lock()
def _get_lock(path: Path) -> threading.Lock:
"""Get or create a lock for a specific state file path."""
path_str = str(path.resolve())
with _locks_lock:
if path_str not in _state_file_locks:
_state_file_locks[path_str] = threading.Lock()
return _state_file_locks[path_str]
def save_segments(document: SegmentsDocument, path: Path) -> None:
save_generic_state(document, path)
def load_segments(path: Path) -> SegmentsDocument:
return load_generic_state(path, SegmentsDocument)
def save_state(document: StateDocument, path: Path) -> None:
lock = _get_lock(path)
with lock:
save_generic_state(document, path)
def load_state(path: Path) -> StateDocument:
return load_generic_state(path, StateDocument)
def ensure_state(
path: Path,
segments: Iterable[Segment],
provider: str,
model: str,
source_language: str,
target_language: str,
force_reset: bool = False,
) -> StateDocument:
segments_list = list(segments) # Consume iterable once
segment_ids = {seg.segment_id for seg in segments_list}
if path.exists() and not force_reset:
existing = load_state(path)
if (
existing.current_provider == provider
and existing.current_model == model
and existing.source_language == source_language
and existing.target_language == target_language
):
# Sync segments: add missing, keep existing translations
existing_ids = set(existing.segments.keys())
missing_ids = segment_ids - existing_ids
if missing_ids:
# Add new segments that weren't in the state
for seg in segments_list:
if seg.segment_id in missing_ids:
existing.segments[seg.segment_id] = TranslationRecord(
segment_id=seg.segment_id
)
save_state(existing, path)
return existing
doc = StateDocument(
segments={
segment.segment_id: TranslationRecord(segment_id=segment.segment_id)
for segment in segments_list
},
current_provider=provider,
current_model=model,
source_language=source_language,
target_language=target_language,
)
save_state(doc, path)
return doc
def update_translation_record(state_path: Path, segment_id: str, updater) -> TranslationRecord:
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, StateDocument)
record = state.segments.get(segment_id)
if record is None:
raise KeyError(f"Segment {segment_id} missing from state file")
updated = updater(record)
state.segments[segment_id] = updated
save_generic_state(state, state_path)
return updated
def mark_status(
state_path: Path, segment_id: str, status: SegmentStatus, **fields
) -> TranslationRecord:
def _updater(record: TranslationRecord) -> TranslationRecord:
payload = record.model_dump()
payload.update(fields)
payload["status"] = status
return TranslationRecord.model_validate(payload)
return update_translation_record(state_path, segment_id, _updater)
def compute_resume_info(state: StateDocument) -> ResumeInfo:
remaining, completed, skipped = [], [], []
for record in state.segments.values():
if record.status == SegmentStatus.COMPLETED:
completed.append(record.segment_id)
elif record.status == SegmentStatus.SKIPPED:
skipped.append(record.segment_id)
else:
remaining.append(record.segment_id)
return ResumeInfo(
remaining_segments=sorted(remaining),
completed_segments=sorted(completed),
skipped_segments=sorted(skipped),
)
def iter_pending_segments(state: StateDocument) -> Iterable[str]:
for segment_id, record in state.segments.items():
if record.status == SegmentStatus.PENDING:
yield segment_id
def iter_segments_by_status(state: StateDocument, status: SegmentStatus) -> Iterable[str]:
for segment_id, record in state.segments.items():
if record.status == status:
yield segment_id
def set_consecutive_failures(state_path: Path, count: int) -> None:
"""Set the consecutive failures counter in the state file."""
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, StateDocument)
state.consecutive_failures = count
save_generic_state(state, state_path)
def set_cooldown(state_path: Path, until: datetime | None) -> None:
"""Set the cooldown expiration timestamp in the state file."""
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, StateDocument)
state.cooldown_until = until
save_generic_state(state, state_path)
def reset_error_segments(state_path: Path, segment_ids: list[str] | None = None) -> list[str]:
"""Reset ERROR segments to PENDING for retry.
Args:
state_path: Path to state file
segment_ids: Optional list of specific segment IDs to reset. If None, resets all ERROR segments.
Returns:
List of segment IDs that were reset
"""
lock = _get_lock(state_path)
with lock:
state = load_generic_state(state_path, StateDocument)
changed = False
reset_ids: list[str] = []
target_ids = set(segment_ids) if segment_ids else None
for seg_id, record in state.segments.items():
if target_ids is not None and seg_id not in target_ids:
continue
if record.status == SegmentStatus.ERROR:
record.status = SegmentStatus.PENDING
record.error_message = None
changed = True
reset_ids.append(seg_id)
if changed:
save_generic_state(state, state_path)
return reset_ids
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/controller.py | Python | from __future__ import annotations
import time
from collections import Counter
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timedelta
from pathlib import Path
from rich.console import Group
from rich.live import Live
from rich.panel import Panel
from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeElapsedColumn
from rich.table import Table
from config import AppSettings
from console_singleton import get_console
from logging_utils.logger import get_logger
from state.models import SegmentStatus
from state.store import (
ensure_state,
load_segments,
load_state,
mark_status,
reset_error_segments,
save_state,
set_consecutive_failures,
set_cooldown,
)
from translation.languages import describe_language
from translation.polish import polish_if_chinese, polish_translation, target_is_chinese
from translation.providers import ProviderError, ProviderFatalError, create_provider
from .prefilter import should_auto_copy
def _build_dashboard_panel(
*,
total_files: int,
skipped_files: int,
completed_files: int,
total_segments: int,
completed_segments: int,
pending_segments: int,
preview_lines: list[str],
progress_renderable,
active_workers: int = 0,
max_workers: int = 1,
in_cooldown: bool = False,
cooldown_remaining: str = "",
) -> Panel:
stats = Table.grid(padding=(1, 1))
stats.add_column(style="bold cyan", justify="left")
stats.add_column(justify="left", overflow="fold")
stats.add_row(
"files",
f"total {total_files}, skipped {skipped_files}, completed {completed_files}",
)
stats.add_row(
"segments",
f"total {total_segments}, completed {completed_segments}, pending {pending_segments}",
)
if max_workers > 1:
stats.add_row("workers", f"active {active_workers}/{max_workers}")
if in_cooldown:
stats.add_row("cooldown", f"⏸ waiting {cooldown_remaining} (3 consecutive fails)")
# Show preview lines (one per worker slot)
for i, line in enumerate(preview_lines):
label = f"w{i+1}" if max_workers > 1 else "current"
stats.add_row(label, line or "…")
return Panel(stats, border_style="magenta", title="Dashboard", padding=(1, 1))
def _strip_tags(text: str) -> str:
from lxml import html
try:
wrapper = html.fromstring(f"<div>{text}</div>")
cleaned = " ".join(part.strip() for part in wrapper.itertext())
return " ".join(cleaned.split())
except Exception: # pragma: no cover - fallback for malformed html
return text
def _truncate_text(text: str, max_length: int = 80) -> str:
"""Truncate text to max_length, adding ellipsis if needed."""
if len(text) <= max_length:
return text
return text[: max_length - 1] + "…"
logger = get_logger(__name__)
console = get_console()
class TranslationResult:
"""Result of translating a single segment."""
__slots__ = ("segment_id", "translation", "error", "is_auto_copy", "provider_name", "model_name")
def __init__(
self,
segment_id: str,
translation: str | None = None,
error: Exception | None = None,
is_auto_copy: bool = False,
provider_name: str | None = None,
model_name: str | None = None,
):
self.segment_id = segment_id
self.translation = translation
self.error = error
self.is_auto_copy = is_auto_copy
self.provider_name = provider_name
self.model_name = model_name
def _translate_segment(
segment,
provider,
source_language: str,
target_language: str,
) -> TranslationResult:
"""Translate a single segment. Thread-safe worker function.
Args:
segment: The segment to translate
provider: Translation provider instance
source_language: Source language code
target_language: Target language code
Returns:
TranslationResult with translation or error
"""
# Check if segment should be auto-copied
if should_auto_copy(segment):
return TranslationResult(
segment_id=segment.segment_id,
translation=segment.source_content,
is_auto_copy=True,
provider_name=None,
model_name=None,
)
# Perform translation
try:
translation_text = provider.translate(
segment,
source_language=source_language,
target_language=target_language,
)
# Apply polish immediately
polished_text = polish_translation(translation_text)
return TranslationResult(
segment_id=segment.segment_id,
translation=polished_text,
provider_name=provider.name,
model_name=provider.model,
)
except (ProviderError, ProviderFatalError) as exc:
return TranslationResult(
segment_id=segment.segment_id,
error=exc,
)
except Exception as exc: # pragma: no cover - unexpected errors
return TranslationResult(
segment_id=segment.segment_id,
error=ProviderError(f"Unexpected error: {exc}"),
)
def run_translation(
settings: AppSettings,
input_epub: Path,
*,
source_language: str,
target_language: str,
) -> None:
settings.ensure_directories()
segments_doc = load_segments(settings.segments_file)
if segments_doc.epub_path != input_epub:
logger.warning(
"Segments file was generated for %s but %s was provided.",
segments_doc.epub_path,
input_epub,
)
# Filter segments based on translation_files inclusion list or skip metadata
original_count = len(segments_doc.segments)
if settings.translation_files is not None:
# Explicit inclusion list takes precedence
allowed_files = set(settings.translation_files)
segments_doc.segments = [
seg
for seg in segments_doc.segments
if seg.file_path.as_posix() in allowed_files
]
else:
# No inclusion list: filter out segments with skip metadata
segments_doc.segments = [
seg
for seg in segments_doc.segments
if seg.skip_reason is None
]
filtered_count = original_count - len(segments_doc.segments)
if filtered_count > 0:
filter_type = "inclusion list" if settings.translation_files is not None else "skip rules"
console.print(
f"[cyan]Filtered {filtered_count} segments from {original_count} "
f"based on {filter_type}[/cyan]"
)
# Early exit if all segments were filtered out
if len(segments_doc.segments) == 0:
if settings.translation_files is not None:
console.print(
"[yellow]No segments to translate. All segments were filtered out by the "
"translation_files inclusion list in config.yaml.[/yellow]"
)
console.print(
"[yellow]This may indicate a mismatch between config.yaml and segments.json. "
"Try deleting the work directory and re-running extraction.[/yellow]"
)
else:
console.print("[yellow]No segments to translate. All segments were skipped.[/yellow]")
return
provider = create_provider(settings.primary_provider)
state_doc = ensure_state(
settings.state_file,
segments_doc.segments,
provider.name,
provider.model,
source_language,
target_language,
)
if state_doc.source_language != source_language or state_doc.target_language != target_language:
console.print("[yellow]Language preferences changed; resetting translation state.[/yellow]")
state_doc = ensure_state(
settings.state_file,
segments_doc.segments,
provider.name,
provider.model,
source_language,
target_language,
force_reset=True,
)
console.print(
f"Translating from {describe_language(source_language)} into {describe_language(target_language)} using {provider.model}"
)
total = sum(
1
for seg in segments_doc.segments
if not state_doc.segments.get(seg.segment_id)
or state_doc.segments.get(seg.segment_id).status != SegmentStatus.COMPLETED
)
if total == 0:
console.print("[green]All segments already translated.[/green]")
return
file_totals: Counter[Path] = Counter(seg.file_path for seg in segments_doc.segments)
file_completed = Counter()
for seg in segments_doc.segments:
record = state_doc.segments.get(seg.segment_id)
if record and record.status == SegmentStatus.COMPLETED:
file_completed[seg.file_path] += 1
completed_segments = sum(
1 for record in state_doc.segments.values() if record.status == SegmentStatus.COMPLETED
)
pending_segments = total
skipped_files = len({doc.file_path for doc in segments_doc.skipped_documents})
total_files = len(file_totals)
def completed_files_count() -> int:
return sum(
1
for path, total_required in file_totals.items()
if file_completed[path] >= total_required
)
preview_text = "waiting…"
progress = Progress(
TextColumn("[bold blue]{task.description}"),
BarColumn(bar_width=None),
TaskProgressColumn(),
TimeElapsedColumn(),
auto_refresh=False,
)
task_id = progress.add_task(
"Translating", total=len(segments_doc.segments), completed=completed_segments
)
# Collect pending segments
pending_segments_list = [
seg
for seg in segments_doc.segments
if not state_doc.segments.get(seg.segment_id)
or state_doc.segments.get(seg.segment_id).status != SegmentStatus.COMPLETED
]
max_workers = settings.translation_workers
active_workers = 0
# Track recent completions (one per worker slot) for display
preview_lines = ["waiting…"] * max_workers
preview_index = 0 # Round-robin index for updating preview lines
# Track cooldown state
in_cooldown = False
cooldown_remaining = ""
def render_panel() -> Panel:
return _build_dashboard_panel(
total_files=total_files,
skipped_files=skipped_files,
completed_files=completed_files_count(),
total_segments=len(segments_doc.segments),
completed_segments=completed_segments,
pending_segments=pending_segments,
preview_lines=preview_lines,
progress_renderable=None,
active_workers=active_workers,
max_workers=max_workers,
in_cooldown=in_cooldown,
cooldown_remaining=cooldown_remaining,
)
with Live(Group(render_panel(), progress), console=console, refresh_per_second=5) as live:
try:
while True:
# Reload state to get pending segments for this pass
state_doc = load_state(settings.state_file)
pending_segments_list = [
seg
for seg in segments_doc.segments
if not state_doc.segments.get(seg.segment_id)
or state_doc.segments.get(seg.segment_id).status != SegmentStatus.COMPLETED
]
if not pending_segments_list:
break
pass_successes = 0
pass_failures: list[str] = []
# Use parallel translation with ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit all pending segments
future_to_segment = {}
for segment in pending_segments_list:
future = executor.submit(
_translate_segment,
segment,
provider,
source_language,
target_language,
)
future_to_segment[future] = segment
active_workers += 1
# Process results as they complete
try:
for future in as_completed(future_to_segment):
active_workers -= 1
segment = future_to_segment[future]
result = future.result()
# Update state based on result
if result.is_auto_copy:
mark_status(
settings.state_file,
result.segment_id,
SegmentStatus.COMPLETED,
translation=result.translation,
provider_name=None,
model_name=None,
error_message=None,
)
file_completed[segment.file_path] += 1
completed_segments += 1
pending_segments -= 1
text = _truncate_text(_strip_tags(result.translation))
preview_lines[preview_index] = f"[dim]{text}[/dim]"
pass_successes += 1
elif result.error:
logger.error("Translation failed for %s: %s", result.segment_id, result.error)
mark_status(
settings.state_file,
result.segment_id,
SegmentStatus.ERROR,
error_message=str(result.error),
)
error_msg = _truncate_text(str(result.error))
preview_lines[preview_index] = f"[red]{error_msg}[/red]"
pass_failures.append(result.segment_id)
# Track consecutive failures and trigger cooldown if needed
trans_state = load_state(settings.state_file)
consecutive = trans_state.consecutive_failures + 1
set_consecutive_failures(settings.state_file, consecutive)
if consecutive >= 3:
in_cooldown = True
cooldown_until = datetime.utcnow() + timedelta(minutes=30)
set_cooldown(settings.state_file, cooldown_until)
duration = 30 * 60
remaining = (cooldown_until - datetime.utcnow()).total_seconds()
while remaining > 0:
mins = int(remaining // 60)
secs = int(remaining % 60)
cooldown_remaining = f"{mins}m {secs}s"
live.update(Group(render_panel(), progress))
sleep_for = min(5, remaining)
time.sleep(sleep_for)
remaining = (cooldown_until - datetime.utcnow()).total_seconds()
set_cooldown(settings.state_file, None)
set_consecutive_failures(settings.state_file, 0)
in_cooldown = False
cooldown_remaining = ""
else:
mark_status(
settings.state_file,
result.segment_id,
SegmentStatus.COMPLETED,
translation=result.translation,
provider_name=result.provider_name,
model_name=result.model_name,
error_message=None,
)
file_completed[segment.file_path] += 1
completed_segments += 1
pending_segments -= 1
text = _truncate_text(_strip_tags(result.translation))
preview_lines[preview_index] = f"[green]{text}[/green]"
pass_successes += 1
# Reset consecutive failures on any success
set_consecutive_failures(settings.state_file, 0)
# Round-robin through preview slots
preview_index = (preview_index + 1) % max_workers
progress.advance(task_id)
live.update(Group(render_panel(), progress))
except KeyboardInterrupt:
console.print("\n[yellow]Interrupted by user. Canceling pending translations...[/yellow]")
executor.shutdown(wait=False, cancel_futures=True)
raise
# After each pass, check if we should retry failed segments
if pass_failures:
if pass_successes == 0:
# No progress made, stop trying
break
# Reset preview lines to "waiting…" for next pass
preview_lines = ["waiting…"] * max_workers
preview_index = 0
reset_error_segments(settings.state_file, pass_failures)
continue
except KeyboardInterrupt:
raise
# Note: Polish is now applied incrementally after each translation (see line 203)
# No separate polish pass needed at the end
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/languages.py | Python | from __future__ import annotations
_LANGUAGE_MAP = {
"auto": ("auto", "Auto"),
"automatic": ("auto", "Auto"),
"detect": ("auto", "Auto"),
"en": ("en", "English"),
"eng": ("en", "English"),
"english": ("en", "English"),
"zh": ("zh", "Chinese"),
"cn": ("zh-CN", "Simplified Chinese"),
"zh-cn": ("zh-CN", "Simplified Chinese"),
"zh_cn": ("zh-CN", "Simplified Chinese"),
"simplified": ("zh-CN", "Simplified Chinese"),
"simplified chinese": ("zh-CN", "Simplified Chinese"),
"zh-hant": ("zh-TW", "Traditional Chinese"),
"zh_tw": ("zh-TW", "Traditional Chinese"),
"traditional": ("zh-TW", "Traditional Chinese"),
"traditional chinese": ("zh-TW", "Traditional Chinese"),
"es": ("es", "Spanish"),
"spanish": ("es", "Spanish"),
"fr": ("fr", "French"),
"french": ("fr", "French"),
"de": ("de", "German"),
"german": ("de", "German"),
"ja": ("ja", "Japanese"),
"japanese": ("ja", "Japanese"),
"ko": ("ko", "Korean"),
"korean": ("ko", "Korean"),
}
def normalize_language(value: str) -> tuple[str, str]:
key = value.strip().lower()
if not key:
return ("auto", "Auto")
if key in _LANGUAGE_MAP:
return _LANGUAGE_MAP[key]
# Fall back to treating raw value as display name
return (value.strip(), value.strip())
def describe_language(code: str) -> str:
for stored_code, display in _LANGUAGE_MAP.values():
if stored_code == code:
return display
return code
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/polish.py | Python | """Text polishing functions using cjk-text-formatter.
This module provides backward-compatible wrappers around cjk-text-formatter
for use in TEPUB's translation pipeline.
"""
from __future__ import annotations
from cjk_text_formatter.polish import CHINESE_RE, polish_text
from state.models import SegmentStatus, StateDocument
# Alias for backward compatibility
polish_translation = polish_text
def target_is_chinese(language: str) -> bool:
"""Check if target language is Chinese.
Args:
language: Language name or code
Returns:
True if language is Chinese, False otherwise
"""
lower = language.lower()
if "chinese" in lower:
return True
return bool(CHINESE_RE.search(language))
def polish_state(state: StateDocument) -> StateDocument:
"""Polish all completed translations in a state document.
Args:
state: State document to polish
Returns:
New state document with polished translations
"""
updated_state = state.model_copy(deep=True)
for record in updated_state.segments.values():
if record.status != SegmentStatus.COMPLETED or not record.translation:
continue
record.translation = polish_text(record.translation)
return updated_state
def polish_if_chinese(
state_file_path,
target_language: str,
*,
load_fn,
save_fn,
console_print,
message_prefix: str = "",
) -> bool:
"""Polish state file if target language is Chinese and changes are needed.
This consolidates the common pattern of:
1. Check if target is Chinese
2. Load state
3. Polish it
4. Compare for changes
5. Save if changed
6. Print status
Args:
state_file_path: Path to state file
target_language: Target language string
load_fn: Function to load state (e.g., load_state)
save_fn: Function to save state (e.g., save_state)
console_print: Console print function
message_prefix: Optional prefix for console messages
Returns:
True if state was polished and saved, False otherwise
"""
if not target_is_chinese(target_language):
return False
try:
state = load_fn(state_file_path)
except FileNotFoundError:
return False
polished = polish_state(state)
if polished.model_dump() == state.model_dump():
return False
prefix = f"{message_prefix} " if message_prefix else ""
console_print(f"[cyan]{prefix}Formatting translated text for Chinese typography…[/cyan]")
save_fn(polished, state_file_path)
console_print("[green]Formatting complete.[/green]")
return True
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/prefilter.py | Python | from __future__ import annotations
import re
from state.models import Segment
_PUNCT_ONLY = re.compile(r"^[\s…—–―·•\*\&\^%$#@!~`´°¤§±×÷⇒→←↑↓│¦∗⊗∘\[\]{}()<>\\/\\|\\-]+$")
_NUMERIC_ONLY = re.compile(r"^[\s\d.,:;()\-–—〜~]+$")
_PAGE_MARKER = re.compile(r"^(page|p\.?|pp\.?)[\s\divxlc]+$", re.IGNORECASE)
_ISBN = re.compile(r"^isbn\b", re.IGNORECASE)
def _is_letter(char: str) -> bool:
return char.isalpha()
def should_auto_copy(segment: Segment) -> bool:
text = (segment.source_content or "").strip()
if not text:
return True
if _PUNCT_ONLY.match(text):
return True
if _NUMERIC_ONLY.match(text):
return True
if _PAGE_MARKER.match(text):
return True
if _ISBN.match(text):
return True
if len(text) <= 3 and not any(_is_letter(ch) for ch in text):
return True
return False
__all__ = ["should_auto_copy"]
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/prompt_builder.py | Python | from __future__ import annotations
from textwrap import dedent
from state.models import ExtractMode, Segment
from .languages import describe_language
# Default system prompt used when no custom prompt is configured
DEFAULT_SYSTEM_PROMPT = """
You are an expert translator and always excellent at preserving fidelity.
{language_instruction}
{mode_instruction}
Avoid repeating the source text or adding explanations unless strictly necessary for comprehension.
""".strip()
_PROMPT_PREAMBLE: str | None = None
def configure_prompt(preamble: str | None) -> None:
"""Configure custom prompt preamble.
Args:
preamble: Custom prompt text with optional placeholders:
{source_language}, {target_language}, {mode_instruction}
Pass None to use the built-in DEFAULT_SYSTEM_PROMPT.
"""
global _PROMPT_PREAMBLE
_PROMPT_PREAMBLE = preamble.strip() if preamble else None
def build_prompt(segment: Segment, source_language: str, target_language: str) -> str:
"""Build the complete translation prompt for a segment.
Args:
segment: The segment to translate
source_language: Source language code or 'auto'
target_language: Target language code
Returns:
Complete prompt with system instructions and source content
"""
mode_instruction = (
"Preserve HTML structure in the translation."
if segment.extract_mode == ExtractMode.HTML
else "Return a faithful translation of the prose without adding explanations."
)
display_source = describe_language(source_language)
display_target = describe_language(target_language)
if source_language == "auto" or display_source.lower() == "auto":
language_instruction = (
f"Detect the source language automatically and translate it into {display_target}."
)
else:
language_instruction = f"Translate from {display_source} into {display_target}."
# Use custom prompt if configured, otherwise use default
if _PROMPT_PREAMBLE:
intro = dedent(
_PROMPT_PREAMBLE.format(
source_language=display_source,
target_language=display_target,
mode_instruction=mode_instruction,
)
).strip()
else:
intro = DEFAULT_SYSTEM_PROMPT.format(
language_instruction=language_instruction,
mode_instruction=mode_instruction,
).strip()
return f"{intro}\n\nSOURCE:\n{segment.source_content}"
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/providers/__init__.py | Python | from __future__ import annotations
from config import ProviderConfig
from .anthropic import AnthropicProvider
from .base import BaseProvider, ProviderError, ProviderFatalError
from .deepl import DeepLProvider
from .gemini import GeminiProvider
from .grok import GrokProvider
from .ollama import OllamaProvider
from .openai import OpenAIProvider
def create_provider(config: ProviderConfig) -> BaseProvider:
registry = {
"openai": OpenAIProvider,
"ollama": OllamaProvider,
"gemini": GeminiProvider,
"grok": GrokProvider,
"anthropic": AnthropicProvider,
"deepl": DeepLProvider,
}
provider_cls = registry.get(config.name.lower())
if not provider_cls:
raise ProviderError(f"Unsupported provider: {config.name}")
return provider_cls(config)
__all__ = [
"BaseProvider",
"ProviderError",
"ProviderFatalError",
"create_provider",
]
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/providers/anthropic.py | Python | from __future__ import annotations
import os
# Optional import to avoid hard dependency during tests
try: # pragma: no cover
import anthropic
except ImportError: # pragma: no cover
anthropic = None
from config import ProviderConfig
from state.models import Segment
from translation import prompt_builder
from .base import BaseProvider, ProviderFatalError, ensure_translation_available
class AnthropicProvider(BaseProvider):
supports_html = True
def __init__(self, config: ProviderConfig):
super().__init__(config)
api_key = self.config.api_key or os.getenv("ANTHROPIC_API_KEY")
if not api_key:
raise ProviderFatalError("ANTHROPIC_API_KEY missing; cannot call Anthropic provider")
if anthropic is None:
raise ProviderFatalError("anthropic package not installed")
self._client = anthropic.Anthropic(api_key=api_key)
def translate(
self,
segment: Segment,
*,
source_language: str,
target_language: str,
) -> str:
prompt = prompt_builder.build_prompt(segment, source_language, target_language)
try:
response = self._client.messages.create(
model=self.config.model,
max_tokens=4096,
system="You are a precise literary translator.",
messages=[{"role": "user", "content": prompt}],
)
except anthropic.APIError as exc: # pragma: no cover - network dependent
raise ProviderFatalError(f"Anthropic request failed: {exc}") from exc
if not response.content:
raise ProviderFatalError("Anthropic response missing content")
text = response.content[0].text
return ensure_translation_available(text)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/providers/base.py | Python | from __future__ import annotations
import abc
from config import ProviderConfig
from state.models import Segment
class ProviderError(RuntimeError):
pass
class ProviderFatalError(ProviderError):
"""Fatal provider error that should abort the translation run."""
class BaseProvider(abc.ABC):
def __init__(self, config: ProviderConfig):
self.config = config
@property
def name(self) -> str:
return self.config.name
@property
def model(self) -> str:
return self.config.model
supports_html: bool = True
@abc.abstractmethod
def translate(self, segment: Segment, source_language: str, target_language: str) -> str:
raise NotImplementedError
def ensure_translation_available(text: str | None) -> str:
if not text:
raise ProviderError("Provider returned empty translation")
return text
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/providers/deepl.py | Python | from __future__ import annotations
import os
from typing import Any
import requests
from config import ProviderConfig
from state.models import Segment
from translation.languages import describe_language
from .base import BaseProvider, ProviderFatalError, ensure_translation_available
class DeepLProvider(BaseProvider):
supports_html = False
def __init__(self, config: ProviderConfig):
super().__init__(config)
if not self.config.base_url:
self.config.base_url = "https://api.deepl.com/v2/translate"
def translate(
self,
segment: Segment,
*,
source_language: str,
target_language: str,
) -> str:
api_key = self.config.api_key or os.getenv("DEEPL_API_KEY")
if not api_key:
raise ProviderFatalError("DEEPL_API_KEY missing; cannot call DeepL provider")
headers = {
"Authorization": f"DeepL-Auth-Key {api_key}",
}
target = _deepl_lang_code(target_language)
if not target:
raise ProviderFatalError(f"Unsupported target language for DeepL: {target_language}")
data = {
"text": segment.source_content,
"target_lang": target,
}
source = _deepl_lang_code(source_language)
if source and source.lower() != "auto":
data["source_lang"] = source
try:
response = requests.post(
self.config.base_url,
headers=headers,
data=data,
timeout=60,
)
response.raise_for_status()
except requests.exceptions.RequestException as exc: # pragma: no cover - network dependent
raise ProviderFatalError(f"DeepL request failed: {exc}") from exc
payload: Any = response.json()
translations = payload.get("translations") if isinstance(payload, dict) else None
if translations and isinstance(translations, list):
translated = translations[0].get("text")
return ensure_translation_available(translated)
raise ProviderFatalError("DeepL response missing translation")
def _deepl_lang_code(language: str) -> str | None:
display = describe_language(language).lower()
mapping = {
"german": "DE",
"english": "EN",
"british english": "EN-GB",
"american english": "EN-US",
"spanish": "ES",
"french": "FR",
"italian": "IT",
"dutch": "NL",
"polish": "PL",
"portuguese": "PT-PT",
"brazilian portuguese": "PT-BR",
"russian": "RU",
"japanese": "JA",
"chinese": "ZH",
"simplified chinese": "ZH",
"traditional chinese": "ZH", # DeepL returns simplified
"korean": "KO",
}
return mapping.get(display)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/providers/gemini.py | Python | from __future__ import annotations
import os
from typing import Any
# Optional import: delay until used to avoid hard dependency in test env
try: # pragma: no cover - imported lazily
from google import genai
except ImportError: # pragma: no cover
genai = None
from config import ProviderConfig
from state.models import Segment
from translation import prompt_builder
from .base import BaseProvider, ProviderFatalError, ensure_translation_available
class GeminiProvider(BaseProvider):
supports_html = True
def __init__(self, config: ProviderConfig):
super().__init__(config)
self._client = None
def _client_instance(self) -> genai.Client:
if self._client is None:
if genai is None:
raise ProviderFatalError("google-genai package not installed")
api_key = self.config.api_key or os.getenv("GEMINI_API_KEY")
if not api_key:
raise ProviderFatalError("GEMINI_API_KEY missing; cannot call Gemini provider")
self._client = genai.Client(api_key=api_key)
return self._client
def translate(
self,
segment: Segment,
*,
source_language: str,
target_language: str,
) -> str:
client = self._client_instance()
prompt = prompt_builder.build_prompt(segment, source_language, target_language)
try:
response = client.models.generate_content(
model=self.config.model,
contents=[{"role": "user", "parts": [prompt]}],
)
except Exception as exc: # pragma: no cover - network dependent
raise ProviderFatalError(f"Gemini request failed: {exc}") from exc
text: str | None = None
if hasattr(response, "text"):
text = response.text
elif hasattr(response, "candidates"):
candidates: Any = response.candidates # type: ignore[attr-defined]
if candidates:
parts = getattr(candidates[0], "content", None)
if parts and getattr(parts, "parts", None):
text = parts.parts[0].text # type: ignore[attr-defined]
return ensure_translation_available(text)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/providers/grok.py | Python | from __future__ import annotations
import os
from typing import Any
import requests
from config import ProviderConfig
from state.models import Segment
from translation import prompt_builder
from .base import BaseProvider, ProviderFatalError, ensure_translation_available
class GrokProvider(BaseProvider):
supports_html = True
def __init__(self, config: ProviderConfig):
super().__init__(config)
if not self.config.base_url:
self.config.base_url = "https://api.x.ai/v1/chat/completions"
def translate(
self,
segment: Segment,
*,
source_language: str,
target_language: str,
) -> str:
api_key = self.config.api_key or os.getenv("GROK_API_KEY")
if not api_key:
raise ProviderFatalError("GROK_API_KEY missing; cannot call Grok provider")
prompt = prompt_builder.build_prompt(segment, source_language, target_language)
payload = {
"model": self.config.model,
"messages": [
{"role": "system", "content": "You are a precise literary translator."},
{"role": "user", "content": prompt},
],
"stream": False,
}
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
headers.update(self.config.extra_headers)
try:
response = requests.post(
self.config.base_url,
json=payload,
headers=headers,
timeout=120,
)
response.raise_for_status()
except requests.exceptions.RequestException as exc: # pragma: no cover - network dependent
raise ProviderFatalError(f"Grok request failed: {exc}") from exc
data: Any = response.json()
output = None
if isinstance(data, dict):
choices = data.get("choices")
if choices and isinstance(choices, list):
first = choices[0]
if isinstance(first, dict):
message = first.get("message")
if isinstance(message, dict):
output = message.get("content")
if isinstance(output, list) and output:
# Some implementations return a list of dicts with text
maybe = output[0]
if isinstance(maybe, dict):
output = maybe.get("text")
return ensure_translation_available(output)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/providers/ollama.py | Python | from __future__ import annotations
import json
from typing import Any
import requests
from config import ProviderConfig
from state.models import Segment
from translation.prompt_builder import build_prompt
from .base import BaseProvider, ProviderError, ensure_translation_available
class OllamaProvider(BaseProvider):
supports_html = True
def __init__(self, config: ProviderConfig):
super().__init__(config)
if not self.config.base_url:
self.config.base_url = "http://localhost:11434/api/generate"
def translate(self, segment: Segment, source_language: str, target_language: str) -> str:
payload = {
"model": self.config.model,
"prompt": build_prompt(segment, source_language, target_language),
"stream": False,
}
response = requests.post(
self.config.base_url,
data=json.dumps(payload),
timeout=120,
headers={"Content-Type": "application/json"},
)
if response.status_code >= 400:
raise ProviderError(f"Ollama error {response.status_code}: {response.text}")
body: Any = response.json()
text = body.get("response") if isinstance(body, dict) else None
return ensure_translation_available(text)
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/providers/openai.py | Python | from __future__ import annotations
import json
import time
from typing import Any
import requests
from config import ProviderConfig
from state.models import Segment
from translation.prompt_builder import build_prompt
from .base import BaseProvider, ProviderError, ProviderFatalError, ensure_translation_available
class OpenAIProvider(BaseProvider):
supports_html = True
def __init__(self, config: ProviderConfig):
super().__init__(config)
if not self.config.base_url:
self.config.base_url = "https://api.openai.com/v1/responses"
def translate(self, segment: Segment, source_language: str, target_language: str) -> str:
if not self.config.api_key:
raise ProviderFatalError("OPENAI_API_KEY missing; cannot call OpenAI provider")
payload = {
"model": self.config.model,
"input": build_prompt(segment, source_language, target_language),
}
headers = {
"Authorization": f"Bearer {self.config.api_key}",
"Content-Type": "application/json",
}
headers.update(self.config.extra_headers)
last_exception: Exception | None = None
for attempt in range(3):
try:
response = requests.post(
self.config.base_url,
headers=headers,
data=json.dumps(payload),
timeout=60,
)
if response.status_code >= 400:
raise ProviderFatalError(
f"OpenAI API error {response.status_code}: {response.text}"
)
body: Any = response.json()
text = None
if isinstance(body, dict):
output = body.get("output") or body.get("choices")
if output and isinstance(output, list):
first = output[0]
if isinstance(first, dict):
text = first.get("text") or first.get("content")
if isinstance(text, list) and text:
maybe = text[0]
if isinstance(maybe, dict):
text = maybe.get("text")
return ensure_translation_available(text)
except requests.exceptions.RequestException as exc:
last_exception = exc
if attempt < 2:
time.sleep(2**attempt)
else:
raise ProviderFatalError(
f"OpenAI request failed after 3 attempts: {exc}"
) from exc
if last_exception:
raise ProviderFatalError(f"OpenAI request failed: {last_exception}") from last_exception
raise ProviderError("OpenAI provider failed without an exception")
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
src/translation/refusal_filter.py | Python | from __future__ import annotations
import re
_REFUSAL_PREFIXES: tuple[str, ...] = (
"i'm sorry",
"im sorry",
"sorry, i",
"sorry i",
"i cannot",
"i can't",
"i cant",
"抱歉,我",
"抱歉,無法",
"抱歉,无法",
"抱歉,我無法",
"抱歉,我无法",
"抱歉,我不能",
"抱歉,我不",
)
_WHITESPACE = re.compile(r"\s+")
def _normalise(text: str) -> str:
text = text.strip().lower()
text = text.replace("’", "'")
text = text.replace("`", "'")
text = _WHITESPACE.sub(" ", text)
return text
def looks_like_refusal(text: str | None, *, max_length: int = 400) -> bool:
if not text:
return False
stripped = text.strip()
if not stripped:
return False
normalised = _normalise(stripped)
prefix_window = normalised[:max_length]
for prefix in _REFUSAL_PREFIXES:
if prefix_window.startswith(prefix):
return True
return False
__all__ = ["looks_like_refusal"]
| xiaolai/tepub | 37 | TEPUB - Tools for EPUB: Translate books, create audiobooks, and export to various formats | Python | xiaolai | xiaolai | inblockchain |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.