file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
gemini_tts_stream_test.py | Python | #!/usr/bin/env python3
import base64
import os
import mimetypes
import wave
import struct
import argparse
from dotenv import load_dotenv
try:
import pyaudio
from google import genai
from google.genai import types
except ImportError:
print("Please install the required packages: pip install google-generativeai python-dotenv pyaudio")
import sys
sys.exit(1)
# Audio constants
SAMPLE_RATE = 24000
CHANNELS = 1
FORMAT = pyaudio.paInt16 # 16-bit audio format
def save_wav_file(file_name, audio_data, sample_rate=SAMPLE_RATE, channels=CHANNELS):
"""Save audio data as a proper WAV file with headers"""
with wave.open(file_name, 'wb') as wav_file:
wav_file.setnchannels(channels)
wav_file.setsampwidth(2) # 2 bytes for 16-bit audio
wav_file.setframerate(sample_rate)
wav_file.writeframes(audio_data)
print(f"Saved WAV file: {file_name}")
def play_audio(audio_data, sample_rate=SAMPLE_RATE, channels=CHANNELS):
"""Play audio data directly without saving to file"""
p = pyaudio.PyAudio()
# Open stream
stream = p.open(
format=pyaudio.paInt16,
channels=channels,
rate=sample_rate,
output=True
)
# Play audio
print("Playing audio...")
stream.write(audio_data)
# Cleanup
stream.stop_stream()
stream.close()
p.terminate()
print("Audio playback complete")
def generate(play_directly=False):
# Load environment variables
load_dotenv()
# Get API key from environment
api_key = os.environ.get("GEMINI_API_KEY")
if not api_key:
print("Gemini API key not found. Please add it to .env file")
import sys
sys.exit(1)
# Initialize client
client = genai.Client(
api_key=api_key,
)
# Create a sample text file for upload
with open("sample_input.txt", "w") as f:
f.write("Hello, this is a test file for Gemini TTS.")
try:
# Upload the sample file
files = [
client.files.upload(file="sample_input.txt"),
]
# Model configuration
model = "gemini-2.0-flash-exp-image-generation"
contents = [
types.Content(
role="user",
parts=[
types.Part.from_text(text="""Say with a cheerful, upbeat intonation: Have a wonderful day!"""),
],
),
types.Content(
role="model",
parts=[
types.Part.from_uri(
file_uri=files[0].uri,
mime_type=files[0].mime_type,
),
],
),
types.Content(
role="user",
parts=[
types.Part.from_text(text="""Generate a cheerful greeting with enthusiasm!"""),
],
),
]
# Configure TTS settings
generate_content_config = types.GenerateContentConfig(
response_modalities=[
"audio",
],
speech_config=types.SpeechConfig(
voice_config=types.VoiceConfig(
prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name="Aoede")
)
),
safety_settings=[
types.SafetySetting(
category="HARM_CATEGORY_CIVIC_INTEGRITY",
threshold="OFF", # Off
),
],
response_mime_type="text/plain",
)
print("Generating TTS content with Gemini...")
# Stream the response
for chunk in client.models.generate_content_stream(
model=model,
contents=contents,
config=generate_content_config,
):
if not chunk.candidates or not chunk.candidates[0].content or not chunk.candidates[0].content.parts:
continue
if chunk.candidates[0].content.parts[0].inline_data:
inline_data = chunk.candidates[0].content.parts[0].inline_data
audio_data = inline_data.data
print(f"Received audio data of mime type: {inline_data.mime_type}")
if play_directly:
# Play the audio directly
play_audio(audio_data)
else:
# Save as proper WAV file with headers
file_name = "gemini_tts_output.wav"
save_wav_file(file_name, audio_data)
else:
print(chunk.text)
except Exception as e:
print(f"Error: {e}")
finally:
# Clean up the sample file
if os.path.exists("sample_input.txt"):
os.remove("sample_input.txt")
def main():
parser = argparse.ArgumentParser(description="Test Gemini TTS with proper audio handling")
parser.add_argument("--play", action="store_true", help="Play audio directly instead of saving to file")
args = parser.parse_args()
generate(play_directly=args.play)
if __name__ == "__main__":
main() | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
gemini_tts_test.py | Python | #!/usr/bin/env python3
import argparse
import asyncio
import traceback
import os
import sys
import pyaudio
import websockets
from dotenv import load_dotenv
try:
from google import genai
from google.genai import types
except ImportError:
print("Please install the required packages: pip install google-generativeai python-dotenv")
sys.exit(1)
# Audio constants
FORMAT = pyaudio.paInt16
CHANNELS = 1
RECEIVE_SAMPLE_RATE = 24000
CHUNK_SIZE = 1024
# Model settings
MODEL = "models/gemini-2.0-flash-exp"
class GeminiTTS:
def __init__(self, api_key=None, speed_factor=1.0):
# Load environment variables from .env file
load_dotenv()
# Try to get API key from argument, environment variable, or .env file
self.api_key = api_key or os.environ.get("GEMINI_API_KEY")
if not self.api_key:
print("Gemini API key not found. Please add it to .env file or provide via --api-key")
print("Check .env.sample for the expected format")
sys.exit(1)
# Configure the Gemini API
self.client = genai.Client(
http_options={"api_version": "v1alpha"},
api_key=self.api_key
)
# Configure TTS settings
self.config = types.LiveConnectConfig(
response_modalities=["audio"],
speech_config=types.SpeechConfig(
voice_config=types.VoiceConfig(
prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name="Aoede")
)
),
)
# Create PyAudio instance
self.pya = pyaudio.PyAudio()
# Create audio queue for playback
self.audio_in_queue = None
# Set speed factor for playback (1.0 = normal speed, 1.25 = 25% faster)
self.speed_factor = speed_factor
# Test text for TTS conversion
self.test_text = "This is a test of Gemini's text to speech capabilities. If you can hear this, the implementation is working correctly. The quick brown fox jumps over the lazy dog. Testing, testing, one, two, three."
async def receive_audio(self, session):
"""Background task to read from websocket and write audio chunks to the queue"""
try:
while True:
turn = session.receive()
try:
async for response in turn:
if data := response.data:
self.audio_in_queue.put_nowait(data)
continue
if text := response.text:
print(text, end="")
except websockets.exceptions.ConnectionClosedError as e:
reason = getattr(e, 'reason', 'Internal error encountered')
print(f"\nConnection error from Gemini API: {reason}")
# Make sure to raise an exception that can be caught by the retry logic
# Re-raise with enough context to be caught by outer try/except
raise RuntimeError(f"Connection error: {reason}") from e
except Exception as e:
print(f"\nError receiving audio from Gemini API: {e}")
traceback.print_exc(limit=2)
# Re-raise all exceptions so they're caught by retry logic
raise RuntimeError(f"API error: {e}") from e
# If model is interrupted, clear the queue
while not self.audio_in_queue.empty():
self.audio_in_queue.get_nowait()
except Exception as e:
print(f"\nFatal error in receive_audio task: {e}")
traceback.print_exc(limit=2)
# Re-raise to ensure TaskGroup catches this
raise
async def play_audio(self):
"""Play audio from the queue"""
try:
# Initialize audio playback stream with adjusted rate for speed control
adjusted_rate = int(RECEIVE_SAMPLE_RATE * self.speed_factor)
stream = await asyncio.to_thread(
self.pya.open,
format=FORMAT,
channels=CHANNELS,
rate=adjusted_rate, # Use adjusted rate for speed control
output=True,
)
print(f"Playing audio at {self.speed_factor:.2f}x speed ({adjusted_rate} Hz)")
while True:
# Get audio data from queue
bytestream = await self.audio_in_queue.get()
# Play the audio
await asyncio.to_thread(stream.write, bytestream)
except Exception as e:
print(f"Error playing audio: {e}")
traceback.print_exception(type(e), e, e.__traceback__)
async def run(self, repeat_count=3, interval=5, max_retries=3):
"""Run the TTS system with the test text, repeating playback at specified intervals
Args:
repeat_count: Number of times to play the audio
interval: Seconds to wait between playbacks
max_retries: Maximum number of retries on API errors before giving up
"""
try:
# Play the audio multiple times with intervals
for i in range(repeat_count):
print(f"\nPlayback {i+1}/{repeat_count}")
# Retry logic for handling API errors
retry_count = 0
success = False
while not success and retry_count < max_retries:
try:
async with (
self.client.aio.live.connect(model=MODEL, config=self.config) as session,
asyncio.TaskGroup() as tg,
):
# Initialize audio queue
self.audio_in_queue = asyncio.Queue()
# Start audio receive and playback tasks
tg.create_task(self.receive_audio(session))
tg.create_task(self.play_audio())
# Send the test text to Gemini for TTS conversion with specific prompt
print(f"Sending test text to Gemini: '{self.test_text}'")
prompt = f"Read out loud the following text. No need to say yes, okay, and stuff like that. Just focus on reading it out loud by itself with nothing else. IMPORTANT: Skip all preambles like 'okay' or 'I'll read this'. ONLY read exactly these words. Do not ask if I want you to read anything else. Just read the following text and stop: {self.test_text}"
await session.send(input=prompt, end_of_turn=True)
# Wait for the audio to finish playing (approximate time)
await asyncio.sleep(10) # Adjust if needed based on text length
success = True
except Exception as e:
retry_count += 1
if retry_count >= max_retries:
print(f"Failed after {max_retries} attempts. Last error: {e}")
print("Exiting due to repeated failures.")
# Force exit directly from here
os._exit(1) # This is a hard exit that bypasses normal cleanup
else:
print(f"Error encountered: {e}. Retrying ({retry_count}/{max_retries})...")
await asyncio.sleep(1) # Brief pause before retry
# Wait for the specified interval before next playback if we're not on the last one
if i < repeat_count - 1 and success:
print(f"Waiting {interval} seconds before next playback...")
await asyncio.sleep(interval)
print("\nTest complete. Exiting.")
except asyncio.CancelledError:
print("Operation cancelled")
except Exception as e:
print(f"Error in TTS process: {e}")
traceback.print_exception(type(e), e, e.__traceback__)
finally:
# Clean up resources
self.pya.terminate()
def main():
parser = argparse.ArgumentParser(description="Test Gemini Text-to-Speech")
parser.add_argument("--api-key", type=str, help="Gemini API Key (or set in .env file)")
parser.add_argument("--repeat", type=int, default=3, help="Number of times to play the audio")
parser.add_argument("--interval", type=int, default=5, help="Seconds to wait between playbacks")
parser.add_argument("--speed", type=float, default=1.0, help="Playback speed factor (1.0=normal, 1.25=25% faster)")
args = parser.parse_args()
# Create and run the TTS system
tts = GeminiTTS(api_key=args.api_key, speed_factor=args.speed)
try:
asyncio.run(tts.run(repeat_count=args.repeat, interval=args.interval))
except RuntimeError as e:
if "Maximum retry attempts reached" in str(e):
print("Terminating program due to maximum retry failures.")
sys.exit(1)
else:
print(f"Error: {e}")
sys.exit(1)
if __name__ == "__main__":
main() | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
image_clipboard_test.py | Python | #!/usr/bin/env python3
"""
Test for clipboard image operations.
Creates a simple colored rectangle, copies it to clipboard,
and verifies we can retrieve it.
"""
import platform
import copykitten
import time
from PIL import Image, ImageDraw
import io
import subprocess
import os
print(f"Running on: {platform.system()} {platform.release()}")
print("Testing clipboard image operations")
# Step 1: Create a simple test image
print("\n1. Creating a simple test image (blue rectangle)...")
# Create a 200x100 blue rectangle image
img = Image.new('RGB', (200, 100), color=(0, 0, 255))
draw = ImageDraw.Draw(img)
# Add a red border
draw.rectangle([(0, 0), (199, 99)], outline=(255, 0, 0), width=5)
# Save image to a temporary file
temp_filename = "/tmp/test_rectangle.png"
img.save(temp_filename)
print(f" Created test image: {os.path.getsize(temp_filename)} bytes")
# Step 2: Copy image to clipboard
print("\n2. Copying image to clipboard...")
is_macos = platform.system() == 'Darwin'
if is_macos:
# Most reliable way - take a small screenshot directly to clipboard
print(" Taking a small screenshot to clipboard...")
# We'll just take a tiny screenshot - this is more reliable than trying to put our own image in the clipboard
try:
# -c means copy to clipboard, -R specifies region (x,y,width,height)
subprocess.run(['screencapture', '-c', '-R50,50,100,50'], check=True)
print(" Screenshot taken and copied to clipboard")
time.sleep(1) # Give time for clipboard to update
except Exception as e:
print(f" Error taking screenshot: {e}")
exit(1)
else:
print(" This test only works on macOS currently")
exit(1)
# Step 3: Verify text is no longer in clipboard
print("\n3. Checking clipboard for text (should fail)...")
try:
text = copykitten.paste()
print(f" Clipboard text: '{text}' (unexpected - clipboard should contain image only)")
except Exception as e:
print(f" No text in clipboard (expected): {e}")
# Step 4: Try to get the image from clipboard
print("\n4. Retrieving image from clipboard...")
try:
image_result = copykitten.paste_image()
if image_result:
# copykitten.paste_image() returns a tuple of (image_data, width, height)
if isinstance(image_result, tuple) and len(image_result) == 3:
image_data, width, height = image_result
print(f" Successfully retrieved image data: {len(image_data)} bytes")
print(f" Image dimensions: {width}x{height} pixels")
# Try to convert the raw image data to a PIL Image and save it
try:
from PIL import Image
from io import BytesIO
# Use BytesIO to create a file-like object from bytes
img = Image.frombytes('RGB', (width, height), image_data)
retrieved_filename = "/tmp/retrieved_image.png"
img.save(retrieved_filename)
print(f" Saved retrieved image to {retrieved_filename}")
print(f" Original image: /tmp/test_rectangle.png (different since we used screenshot)")
except Exception as e:
print(f" Error saving image: {e}")
else:
print(f" Unexpected return format: {type(image_result)}")
else:
print(" No image found in clipboard")
except Exception as e:
print(f" Error retrieving image: {e}")
print("\nTest completed successfully.")
print("NOTE: The clipboard image handling works! We can:")
print("1. Detect when an image is in the clipboard")
print("2. Access the image data, width, and height")
print("3. Process it with PIL if needed") | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
keyboard_handler.py | Python | #!/usr/bin/env python3
"""
Keyboard shortcut handler module for terminal applications
Provides keyboard shortcut detection and callback execution
"""
from pynput import keyboard
from pynput.keyboard import Controller, Key
class KeyboardShortcutHandler:
"""Handles keyboard shortcuts for terminal applications"""
def __init__(self, callback_functions):
"""
Initialize keyboard handler with callback functions
Args:
callback_functions: Dict with keys 'toggle', 'exit', 'status'
- toggle: Function to call when shortcut is triggered
- exit: Function to call when exit combo is triggered
- status: Function to update status messages
"""
self.keyboard_listener = None
self.is_running = True
self.callbacks = callback_functions
def _handle_keypress(self, key):
"""
Handle key press events
Args:
key: The key that was pressed
Returns:
True to continue listening, False to stop
"""
try:
# Check for special character "˛" which is produced by Shift+Alt+X on Mac (audio shortcut)
if isinstance(key, keyboard.KeyCode) and hasattr(key, 'char') and key.char == "˛":
self.callbacks['status']("Audio shortcut triggered: Shift+Alt+X (˛)")
# Delete the "˛" character
kb = Controller()
kb.press(Key.backspace)
kb.release(Key.backspace)
self.callbacks['toggle']("audio")
return True
# Check for special character "¸" which is produced by Shift+Alt+Z on Mac (video shortcut)
if isinstance(key, keyboard.KeyCode) and hasattr(key, 'char') and key.char == "¸":
self.callbacks['status']("Video shortcut triggered: Shift+Alt+Z (¸)")
# Delete the "¸" character
kb = Controller()
kb.press(Key.backspace)
kb.release(Key.backspace)
self.callbacks['toggle']("video")
return True
# Direct check for Ctrl+C similar to clipboard_to_llm.py
if key == keyboard.Key.ctrl_l and hasattr(key, 'vk'):
self.callbacks['status']("Exiting...")
self.is_running = False
return False # Stop listener
except Exception as e:
self.callbacks['status'](f"Error in keyboard listener: {e}")
return True # Continue listening
def _handle_key_release(self, key):
"""
Handle key release events
Args:
key: The key that was released
Returns:
True to continue listening, False to stop
"""
# We're not tracking keys anymore, just return running state
return self.is_running # Continue listening if app is running
def start(self):
"""Start listening for keyboard shortcuts"""
# Try to stop any existing listener first
if self.keyboard_listener is not None:
try:
self.keyboard_listener.stop()
except:
pass
self.keyboard_listener = None
# Create handler functions
def on_press(key):
return self._handle_keypress(key)
def on_release(key):
return self._handle_key_release(key)
try:
# Start the listener with a clean state
self.keyboard_listener = keyboard.Listener(on_press=on_press, on_release=on_release)
self.keyboard_listener.daemon = True
self.keyboard_listener.start()
self.callbacks['status']("Keyboard shortcut listener started")
# Give a moment for the listener to initialize
import time
time.sleep(0.1)
# Verify it actually started
if not self.keyboard_listener.is_alive():
raise Exception("Listener failed to start")
return True
except Exception as e:
self.callbacks['status'](f"Failed to start keyboard listener: {e}")
self.keyboard_listener = None
return False
def stop(self):
"""Stop the keyboard listener and release resources"""
if self.keyboard_listener:
try:
self.keyboard_listener.stop()
except Exception as e:
if self.callbacks and 'status' in self.callbacks:
self.callbacks['status'](f"Error stopping keyboard listener: {e}")
finally:
self.keyboard_listener = None
# Reset our running state to ensure a clean restart if needed
self.is_running = False | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
reading_metrics_web.py | Python | #!/usr/bin/env python3
"""
Web visualization for reading metrics.
Provides a web dashboard to display text-to-speech conversion statistics.
"""
import os
import csv
import json
import threading
import random
from datetime import datetime, timedelta
from flask import Flask, render_template, jsonify, redirect, url_for
from collections import defaultdict
app = Flask(__name__)
# Ensure templates directory exists
os.makedirs(os.path.join(os.path.dirname(__file__), "templates"), exist_ok=True)
# Load configuration
def load_config():
"""Load configuration from config.json or use defaults"""
config_path = os.path.join(os.path.dirname(__file__), "config.json")
defaults = {
"reading_metrics": {
"words_per_page": 325
}
}
if os.path.exists(config_path):
try:
with open(config_path, 'r') as f:
config = json.load(f)
# Add default reading metrics if not present
if "reading_metrics" not in config:
config["reading_metrics"] = defaults["reading_metrics"]
return config
except (json.JSONDecodeError, IOError) as e:
print(f"Error loading config.json: {e}. Using defaults.")
return defaults
else:
print("No config.json found. Using default settings.")
return defaults
# Get config values
config = load_config()
WORDS_PER_PAGE = config["reading_metrics"]["words_per_page"]
# Define CSV path in the project directory
READING_METRICS_CSV = os.path.join(os.path.dirname(__file__), "reading_metrics.csv")
# Create templates for the web dashboard
@app.route('/')
def index():
"""Redirect to the reading dashboard"""
return redirect(url_for('reading_dashboard'))
@app.route('/reading')
def reading_dashboard():
"""Render the reading dashboard page"""
return render_template('reading_dashboard.html')
@app.route('/reading/data')
def get_reading_data():
"""API endpoint to get reading metrics data"""
# Create mock data if CSV doesn't exist yet
if not os.path.exists(READING_METRICS_CSV):
create_mock_data()
# Read data from CSV
data = []
with open(READING_METRICS_CSV, 'r', newline='') as file:
reader = csv.DictReader(file)
for row in reader:
# Convert numeric strings to integers
row['characters'] = int(row['characters'])
row['words'] = int(row['words'])
row['paragraphs'] = int(row['paragraphs'])
# Parse timestamp
row['timestamp'] = datetime.fromisoformat(row['timestamp'])
data.append(row)
# Calculate totals
total_chars = sum(row['characters'] for row in data)
total_words = sum(row['words'] for row in data)
total_paragraphs = sum(row['paragraphs'] for row in data)
# Calculate pages read (using industry standard of 250 words per page)
pages_read = round(total_words / WORDS_PER_PAGE, 1)
# Group by day
daily_data = defaultdict(lambda: {"characters": 0, "words": 0, "paragraphs": 0})
for row in data:
day_key = row['timestamp'].strftime('%Y-%m-%d')
daily_data[day_key]['characters'] += row['characters']
daily_data[day_key]['words'] += row['words']
daily_data[day_key]['paragraphs'] += row['paragraphs']
# Get last 30 days
today = datetime.now().date()
daily_metrics = []
for i in range(30):
day = today - timedelta(days=i)
day_key = day.strftime('%Y-%m-%d')
daily_metrics.insert(0, {
"date": day_key,
"characters": daily_data[day_key]["characters"],
"words": daily_data[day_key]["words"],
"paragraphs": daily_data[day_key]["paragraphs"]
})
# Group by week
weekly_data = defaultdict(lambda: {"characters": 0, "words": 0, "paragraphs": 0})
for row in data:
week_key = row['timestamp'].strftime('%Y-W%W')
weekly_data[week_key]['characters'] += row['characters']
weekly_data[week_key]['words'] += row['words']
weekly_data[week_key]['paragraphs'] += row['paragraphs']
# Get last 12 weeks
weekly_metrics = []
for i in range(12):
week_date = today - timedelta(weeks=i)
week_key = week_date.strftime('%Y-W%W')
weekly_metrics.insert(0, {
"week": week_key,
"characters": weekly_data[week_key]["characters"],
"words": weekly_data[week_key]["words"],
"paragraphs": weekly_data[week_key]["paragraphs"]
})
# Group by month
monthly_data = defaultdict(lambda: {"characters": 0, "words": 0, "paragraphs": 0})
for row in data:
month_key = row['timestamp'].strftime('%Y-%m')
monthly_data[month_key]['characters'] += row['characters']
monthly_data[month_key]['words'] += row['words']
monthly_data[month_key]['paragraphs'] += row['paragraphs']
# Get last 6 months
monthly_metrics = []
for i in range(6):
# Calculate month by subtracting from current month
month_date = today.replace(day=1)
for _ in range(i):
# Move to previous month
if month_date.month == 1:
month_date = month_date.replace(year=month_date.year-1, month=12)
else:
month_date = month_date.replace(month=month_date.month-1)
month_key = month_date.strftime('%Y-%m')
monthly_metrics.insert(0, {
"month": month_key,
"characters": monthly_data[month_key]["characters"],
"words": monthly_data[month_key]["words"],
"paragraphs": monthly_data[month_key]["paragraphs"]
})
return jsonify({
"total_chars": total_chars,
"total_words": total_words,
"total_paragraphs": total_paragraphs,
"pages_read": pages_read,
"words_per_page_setting": WORDS_PER_PAGE,
"daily_metrics": daily_metrics,
"weekly_metrics": weekly_metrics,
"monthly_metrics": monthly_metrics
})
def create_mock_data():
"""Create mock data to initialize the reading metrics CSV"""
# Create CSV file with headers if it doesn't exist
if not os.path.exists(READING_METRICS_CSV):
os.makedirs(os.path.dirname(READING_METRICS_CSV), exist_ok=True)
with open(READING_METRICS_CSV, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['timestamp', 'characters', 'words', 'paragraphs'])
# Generate mock data for last 30 days
today = datetime.now()
days_with_data = random.sample(range(30), 15) # Randomly select 15 days out of 30
# Load existing data if any
existing_data = set()
try:
with open(READING_METRICS_CSV, 'r', newline='') as file:
reader = csv.reader(file)
next(reader) # Skip header
for row in reader:
existing_data.add(row[0]) # Add timestamp to set
except:
pass
# Generate and write mock data
with open(READING_METRICS_CSV, 'a', newline='') as file:
writer = csv.writer(file)
for day in days_with_data:
# Generate 1-3 entries per day
entries_per_day = random.randint(1, 3)
for _ in range(entries_per_day):
# Generate random hour and minute
hour = random.randint(9, 21)
minute = random.randint(0, 59)
# Create timestamp
date = today - timedelta(days=day)
timestamp = date.replace(hour=hour, minute=minute, second=0, microsecond=0).isoformat()
# Skip if this timestamp already exists
if timestamp in existing_data:
continue
# Generate random metrics
words = random.randint(50, 500)
chars = words * random.randint(4, 6) # Average 4-6 chars per word
paragraphs = random.randint(1, max(1, words // 50)) # Roughly 1 paragraph per 50 words
# Write to CSV
writer.writerow([timestamp, chars, words, paragraphs])
existing_data.add(timestamp)
# Reading dashboard template already exists in templates/reading_dashboard.html
# We don't need to recreate it each time since it has been customized
# Function to start the web server
def start_web_server(port=5051, debug=True):
"""Start the Flask web server in a background thread"""
# Ensure mock data exists
if not os.path.exists(READING_METRICS_CSV):
create_mock_data()
# Start server
threading.Thread(target=lambda: app.run(host='127.0.0.1', port=port, debug=debug), daemon=True).start()
print(f"Reading metrics web server started at http://127.0.0.1:{port}/")
if debug:
print("Debug mode enabled - templates will automatically reload when modified")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Start reading metrics web server")
parser.add_argument("-p", "--port", type=int, default=5051, help="Port to run web server on")
parser.add_argument("--no-debug", action="store_true", help="Disable debug mode (disables auto-reloading)")
args = parser.parse_args()
# Create mock data if needed
if not os.path.exists(READING_METRICS_CSV):
create_mock_data()
debug_mode = not args.no_debug
print(f"Starting reading metrics web server on http://127.0.0.1:{args.port}/")
print(f"Access the reading dashboard at http://127.0.0.1:{args.port}/reading")
if debug_mode:
print("Debug mode enabled - templates will automatically reload when modified")
print("Press Ctrl+C to stop the server")
# Start the web server in main thread
app.run(host='127.0.0.1', port=args.port, debug=debug_mode) | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
recorders/__init__.py | Python | """
Recorders module for handling screen and audio recording
"""
# Import core functionality to make it available at package level
from recorders.utils import list_audio_devices, list_screen_devices, combine_audio_video
from recorders.recorder import record_audio, record_screen | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
recorders/recorder.py | Python | #!/usr/bin/env python3
"""
Core recording functions for screen and audio capture
"""
import ffmpeg
import sounddevice as sd
import soundfile as sf
import numpy as np
import time
# Import utility functions from utils module
from recorders.utils import list_audio_devices, list_screen_devices
def record_audio(output_file, fs=44100, verbose=False, stop_event=None, status_callback=None):
"""
Record high-quality audio from default microphone until stop_event is set
Args:
output_file (str): Path to save the recording
fs (int): Sample rate in Hz
verbose (bool): Whether to show detailed output logs
stop_event (threading.Event): Event to signal when to stop recording
status_callback (callable): Optional callback to report status updates
Returns:
str: Path to saved audio file or None if failed
"""
# List available devices if verbose
if verbose:
list_audio_devices()
# Use default device
device_info = sd.query_devices(kind='input')
# Get device name and report it
mic_name = device_info['name']
if verbose:
print(f"Using audio device: {mic_name}")
# Report microphone information through the callback if provided
if status_callback:
status_callback(f"Using audio device: {mic_name}")
# Maximum buffer size (30 minutes of audio at given sample rate)
max_frames = int(1800 * fs)
# Create empty array for recording
recording = np.zeros((max_frames, device_info['max_input_channels']), dtype='float32')
if verbose:
print("Recording audio until screen recording completes...")
# Start recording
with sd.InputStream(samplerate=fs, device=None, channels=device_info['max_input_channels'], callback=None) as stream:
start_time = time.time()
stream.start()
# Read chunks of audio
chunk_size = 1024
offset = 0
while offset < max_frames:
# Calculate remaining frames
remaining = max_frames - offset
this_chunk = min(chunk_size, remaining)
# Read audio chunk
chunk, overflowed = stream.read(this_chunk)
if overflowed and verbose:
print("Warning: Audio buffer overflowed")
# Store chunk in recording array
if offset + len(chunk) <= max_frames:
recording[offset:offset+len(chunk)] = chunk
offset += len(chunk)
# Check if we should stop recording
if stop_event and stop_event.is_set():
if verbose:
print("Audio recording stopped by stop event")
break
stream.stop()
elapsed = time.time() - start_time
if verbose:
print(f"Audio recording complete: {elapsed:.2f} seconds")
# Trim the recording array to actual recorded length
recording = recording[:offset]
# Save to file
try:
if verbose:
print(f"Saving audio to {output_file}...")
sf.write(output_file, recording, fs)
if verbose:
print(f"Audio saved to {output_file}")
return output_file
except Exception as e:
print(f"Error saving audio file: {str(e)}")
return None
def record_screen(output_file, duration, framerate=30, resolution='1280x720', screen_index=None,
stop_event=None, verbose=False, on_process_start=None):
"""
Record screen only (no audio) using ffmpeg
Args:
output_file (str): Path to save the recording
duration (int): Recording duration in seconds
framerate (int): Frame rate for recording
resolution (str): Video resolution in format 'WIDTHxHEIGHT'
screen_index (int, optional): Screen index to capture, if None will list available screens
stop_event (threading.Event, optional): Event to signal manual interruption
verbose (bool, optional): Whether to show detailed output logs
on_process_start (callable, optional): Callback function to execute when ffmpeg process actually starts
Returns:
str: Path to saved video file or None if failed
"""
# List available screen devices
devices_info = list_screen_devices(print_output=False)
# If no screen index provided, use the last available screen index
if screen_index is None:
# Get the highest screen index available (usually the last screen)
if devices_info:
screen_index = max(devices_info.keys())
if verbose:
print(f"No screen index specified. Using last available screen index {screen_index}.")
else:
# Fallback to index 1 if no screens detected
screen_index = 1
if verbose:
print("No screens detected. Falling back to screen index 1.")
if verbose:
print(f"Using screen index: {screen_index}")
try:
import subprocess
import threading
import signal
import os
# We can't directly interrupt ffmpeg using stop_event with ffmpeg module,
# so create a subprocess and manage it manually
# Create input stream for screen only (no audio)
cmd = [
'ffmpeg', '-y',
'-f', 'avfoundation',
'-framerate', str(framerate),
'-capture_cursor', '1',
'-pix_fmt', 'uyvy422',
'-t', str(duration),
'-i', f"{screen_index}",
'-vcodec', 'h264',
'-vf', f'scale={resolution}', # Add scaling filter to force resolution
'-preset', 'ultrafast',
'-crf', '22',
output_file
]
# Add flags to hide ffmpeg output unless verbose is enabled
if not verbose:
cmd.insert(2, '-hide_banner')
cmd.insert(3, '-loglevel')
cmd.insert(4, 'error') # Only show errors
cmd.insert(5, '-nostats')
if verbose:
print(f"Starting screen recording for up to {duration} seconds...")
print(f"Running ffmpeg command: {' '.join(cmd)}")
if stop_event:
print(f"Press the designated key to stop recording early")
# Start ffmpeg process with appropriate redirection
if verbose:
process = subprocess.Popen(cmd)
else:
# Make sure we completely suppress all output when not in verbose mode
process = subprocess.Popen(
cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# Give ffmpeg a moment to initialize
time.sleep(0.5)
# Check if the process has started properly
if process.poll() is None: # None means it's still running
# Process has actually started, trigger the callback
if on_process_start:
on_process_start()
# Create a function to monitor stop_event
if stop_event:
def monitor_stop_event():
stop_event.wait() # Wait until stop_event is set
if process.poll() is None: # If process is still running
if verbose:
print("Manual stop requested, terminating recording...")
# Send SIGTERM to gracefully stop ffmpeg
if os.name == 'nt': # Windows
process.terminate()
else: # Unix/Mac
os.kill(process.pid, signal.SIGTERM)
# Start monitoring thread
monitor_thread = threading.Thread(target=monitor_stop_event)
monitor_thread.daemon = True
monitor_thread.start()
# Wait for process to complete
process.wait()
# Check return code
if process.returncode != 0 and process.returncode != -15:
stderr_msg = ""
if hasattr(process, 'stderr') and process.stderr:
stderr_msg = process.stderr.read().decode('utf-8')
if stderr_msg and "Interrupt" not in stderr_msg and "Operation not permitted" not in stderr_msg and verbose:
print(f"Error during screen recording: {stderr_msg}")
elif verbose:
print(f"Error during screen recording (return code: {process.returncode})")
return None
if verbose:
print(f"Screen recording completed and saved to {output_file}")
return output_file
except Exception as e:
if verbose:
print(f"Error during screen recording: {str(e)}")
return None | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
recorders/recording_handler.py | Python | #!/usr/bin/env python3
"""
Recording handler module to manage audio and video recording sessions
"""
import os
import time
import threading
from audio_recorder import record_audio_only
from screen_audio_recorder import record_screen_and_audio
class RecordingSession:
"""Manages a recording session with audio and video"""
def __init__(self, status_callback=None, recording_started_callback=None):
self.is_recording = False
self.recording_path = None
self.recording_thread = None
self.manual_stop_event = None
self.status_callback = status_callback
self.recording_started_callback = recording_started_callback
self.recording_mode = None # 'audio' or 'video'
self.recording_actually_started = False
def set_status(self, message):
"""Update status message via callback if provided"""
if self.status_callback:
self.status_callback(message)
def start(self, mode="audio"):
"""
Start recording session with audio only or screen and audio
Args:
mode (str): 'audio' for audio-only or 'video' for screen and audio
"""
if self.is_recording:
return False
# Create a new stop event for this recording session
self.manual_stop_event = threading.Event()
# Reset recording started flag
self.recording_actually_started = False
# Store the recording mode
self.recording_mode = mode
# Custom callback function for when ffmpeg/recording process actually starts
def on_recording_started():
self.recording_actually_started = True
if self.recording_started_callback:
self.recording_started_callback(mode)
# Set the output path based on recording mode
timestamp = int(time.time())
if mode == "audio":
output_file = f"recording_{timestamp}.wav"
# Create and start the recording thread for audio only
def recording_thread_func():
try:
# Signal that recording is about to start (preparation phase)
self.set_status("Preparing audio devices...")
# Call record_audio_only with a callback
self.recording_path = record_audio_only(
output_file=output_file,
duration=7200, # Set to 2 hours (7200 seconds)
verbose=False,
manual_stop_event=self.manual_stop_event,
on_recording_started=on_recording_started,
status_callback=self.status_callback
)
except Exception as e:
self.set_status(f"Audio recording error: {str(e)}")
self.recording_path = None
self.is_recording = False
else: # video mode
output_file = f"recording_{timestamp}.mp4"
# Create and start the recording thread for screen and audio
def recording_thread_func():
try:
# Signal that recording is about to start (preparation phase)
self.set_status("Preparing screen and audio devices...")
# Call record_screen_and_audio with a callback
self.recording_path = record_screen_and_audio(
output_file=output_file,
duration=7200, # Set to 2 hours (7200 seconds)
verbose=False,
manual_stop_event=self.manual_stop_event,
on_recording_started=on_recording_started,
status_callback=self.status_callback
)
except Exception as e:
self.set_status(f"Screen recording error: {str(e)}")
self.recording_path = None
self.is_recording = False
self.recording_thread = threading.Thread(target=recording_thread_func)
self.recording_thread.daemon = True
self.recording_thread.start()
self.is_recording = True
return True
def stop(self):
"""Stop the active recording session and return the recording path and mode"""
if not self.is_recording:
return None, None
self.set_status("Stopping recording...")
# Set the stop event to stop the recording
if self.manual_stop_event:
self.manual_stop_event.set()
# Wait for the recording thread to complete
if self.recording_thread and self.recording_thread.is_alive():
self.recording_thread.join() # Wait until thread completes
self.is_recording = False
# Return the recording path and mode directly
recording_path = self.recording_path
recording_mode = self.recording_mode
# Reset the instance state
self.recording_path = None
self.recording_mode = None
return recording_path, recording_mode
def get_recording_info(self):
"""Get information about the completed recording"""
if not self.recording_path or not os.path.exists(self.recording_path):
return "Recording failed or file not found"
# Get file size in MB
file_size = os.path.getsize(self.recording_path) / (1024 * 1024)
# Include recording mode information
mode_info = "Audio only" if self.recording_mode == "audio" else "Screen and audio"
return f"{mode_info} recording saved: {self.recording_path} ({file_size:.2f} MB)" | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
recorders/utils.py | Python | #!/usr/bin/env python3
"""
Utility functions for recording devices
"""
import sounddevice as sd
import subprocess
import ffmpeg
def list_audio_devices():
"""List all available audio input devices"""
print("Available audio input devices:")
devices = sd.query_devices()
for i, device in enumerate(devices):
if device['max_input_channels'] > 0: # Only input devices
print(f"[{i}] {device['name']} (Inputs: {device['max_input_channels']})")
return devices
def list_screen_devices(print_output=True):
"""
List available avfoundation devices (screens) with enhanced descriptions.
Args:
print_output (bool): Whether to print the device list
Returns:
dict: Dictionary mapping screen indices to screen names
"""
try:
# This command lists available devices on macOS
result = subprocess.run(
['ffmpeg', '-f', 'avfoundation', '-list_devices', 'true', '-i', ''],
stderr=subprocess.PIPE,
text=True
)
# Only print if requested
if print_output:
print(result.stderr)
# Get more detailed display information from system_profiler
display_info = {}
try:
displays = subprocess.run(
['system_profiler', 'SPDisplaysDataType'],
stdout=subprocess.PIPE,
text=True
)
# Extract display names with better parsing
display_data = displays.stdout
in_display_section = False
current_display = None
for line in display_data.split('\n'):
line = line.strip()
# Check if we've reached the displays section
if "Displays:" in line:
in_display_section = True
continue
if not in_display_section:
continue
# Start of a new display entry
if line and ":" in line and not line.startswith(" "):
current_display = line.split(":")[0].strip()
display_info[current_display] = {
"name": current_display,
"is_main": False,
"resolution": "",
"type": ""
}
# Properties within a display entry
elif current_display and ":" in line:
key, value = [x.strip() for x in line.split(":", 1)]
if key == "Main Display" and value == "Yes":
display_info[current_display]["is_main"] = True
elif key == "Resolution":
display_info[current_display]["resolution"] = value
elif key == "Display Type":
display_info[current_display]["type"] = value
except Exception as display_err:
if print_output:
print(f"Warning: Could not get detailed display info: {str(display_err)}")
# Parse the output to extract screen names
screens = {}
lines = result.stderr.split('\n')
for line in lines:
if '[AVFoundation indev' in line and 'Capture screen' in line:
parts = line.strip().split('] [')
if len(parts) >= 2:
index_part = parts[1].split(']')[0]
name_part = parts[1].split('] ')[1]
try:
index = int(index_part)
# Try to enhance screen descriptions with more details
if "Capture screen" in name_part and display_info:
screen_num = name_part.split("Capture screen ")[1]
# In macOS, Capture screen 0 is typically the main display
if screen_num == "0":
for display_name, info in display_info.items():
if info.get("is_main"):
display_type = info.get("type", "")
desc = display_name
if display_type:
desc = f"{display_name} ({display_type})"
name_part = f"Capture screen 0 - {desc}"
break
# Other displays
else:
# Find any non-main displays
non_main_displays = [d for d, info in display_info.items() if not info.get("is_main")]
if len(non_main_displays) >= int(screen_num):
display_name = non_main_displays[int(screen_num)-1]
info = display_info[display_name]
resolution = info.get("resolution", "").split(" @")[0]
if resolution:
name_part = f"Capture screen {screen_num} - {display_name} ({resolution})"
else:
name_part = f"Capture screen {screen_num} - {display_name}"
screens[index] = name_part
except:
pass
return screens
except Exception as e:
if print_output:
print(f"Error listing devices: {str(e)}")
return {}
def combine_audio_video(video_file, audio_file, output_file, verbose=False, time_diff=None):
"""
Combine separate video and audio files into a single output file with timing synchronization.
Adds black frames at beginning and end to align audio and video properly.
Args:
video_file (str): Path to video file
audio_file (str): Path to audio file
output_file (str): Path to output combined file
verbose (bool): Whether to show detailed output logs
time_diff (float): Time difference between audio and video completion
(audio_complete_time - screen_complete_time)
Returns:
str: Path to combined file or None if failed
"""
try:
import subprocess
import tempfile
import os
# Get audio duration using ffprobe
audio_duration_cmd = [
'ffprobe', '-v', 'error', '-show_entries', 'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1', audio_file
]
audio_duration = float(subprocess.check_output(audio_duration_cmd).decode('utf-8').strip())
# Get video duration using ffprobe
video_duration_cmd = [
'ffprobe', '-v', 'error', '-show_entries', 'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1', video_file
]
video_duration = float(subprocess.check_output(video_duration_cmd).decode('utf-8').strip())
if verbose:
print(f"Audio duration: {audio_duration:.4f} seconds")
print(f"Video duration: {video_duration:.4f} seconds")
print(f"Time difference: {time_diff:.4f} seconds")
# Calculate the duration for the beginning black screen
# The sequence is: audio starts, video starts, video ends, audio ends
# So beginning_duration = audio_duration - video_duration - time_diff
beginning_duration = audio_duration - video_duration - time_diff
ending_duration = time_diff
# Verify both durations are positive
if beginning_duration < 0 or ending_duration < 0:
raise ValueError(f"Invalid timing: beginning_duration={beginning_duration:.4f}, ending_duration={ending_duration:.4f}")
if verbose:
print(f"Adding black screen at beginning: {beginning_duration:.4f} seconds")
print(f"Adding black screen at end: {ending_duration:.4f} seconds")
# Get video dimensions from original video
video_info_cmd = [
'ffprobe', '-v', 'error', '-select_streams', 'v:0',
'-show_entries', 'stream=width,height,r_frame_rate',
'-of', 'csv=p=0', video_file
]
video_info = subprocess.check_output(video_info_cmd).decode('utf-8').strip().split(',')
width, height = int(video_info[0]), int(video_info[1])
# Parse framerate (which comes as a fraction like "30/1")
framerate_parts = video_info[2].split('/')
framerate = int(framerate_parts[0]) / int(framerate_parts[1])
# Create a temporary file for the padded video
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as padded_video:
padded_video_path = padded_video.name
# Create padded video with black frames at beginning and end
# This single command does everything in one step
filter_complex = (
f"color=black:s={width}x{height}:r={framerate}:d={beginning_duration}[start];"
f"color=black:s={width}x{height}:r={framerate}:d={ending_duration}[end];"
f"[start][0:v][end]concat=n=3:v=1:a=0"
)
padding_cmd = [
'ffmpeg', '-y',
'-i', video_file,
'-filter_complex', filter_complex,
'-c:v', 'libx264', '-pix_fmt', 'yuv420p',
padded_video_path
]
if verbose:
print(f"Creating padded video with black frames...")
print(f"Command: {' '.join(padding_cmd)}")
subprocess.run(
padding_cmd,
stdout=subprocess.DEVNULL if not verbose else None,
stderr=subprocess.DEVNULL if not verbose else None
)
# Combine padded video with full audio
output_cmd = [
'ffmpeg', '-y',
'-i', padded_video_path,
'-i', audio_file,
'-c:v', 'copy', # Copy video without re-encoding
'-c:a', 'aac', # Convert audio to AAC
'-strict', 'experimental',
output_file
]
if verbose:
print(f"Command: {' '.join(output_cmd)}")
subprocess.run(
output_cmd,
stdout=subprocess.DEVNULL if not verbose else None,
stderr=subprocess.DEVNULL if not verbose else None
)
# Clean up temporary files
try:
os.remove(padded_video_path)
except Exception as cleanup_e:
if verbose:
print(f"Warning during cleanup: {str(cleanup_e)}")
if verbose:
print(f"Combined file saved to: {output_file}")
return output_file
except Exception as e:
print(f"Error combining audio and video: {str(e)}")
return None | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
run.sh | Shell | #!/bin/bash
# Source the virtual environment directly
source "$(dirname "$0")/venv/bin/activate"
# Remind user about requirements
echo "Reminder: Install requirements with 'pip install -r requirements.txt' if needed"
# Check for portaudio
if command -v brew &> /dev/null && ! brew list portaudio &> /dev/null; then
echo "portaudio not found, but may be needed for PyAudio. Install with 'brew install portaudio' if you encounter issues."
fi
# Run the terminal video voice recorder
echo "Starting terminal video voice recorder..."
python terminal_video_voice_recorder.py | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
run_clipboard_to_llm.sh | Shell | #!/bin/bash
# Source the environment activation script
source "$(dirname "$0")/activate_env.sh"
# Run the clipboard to LLM script
echo "Starting Clipboard to LLM with TTS..."
echo "Press Shift+Alt+A (Å) to read clipboard content with TTS"
echo "Press Ctrl+C to exit"
python clipboard_to_llm.py | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
run_with_env.sh | Shell | #!/bin/bash
# Check if .env file exists
if [ ! -f .env ]; then
echo ".env file not found!"
exit 1
fi
# Toggle comments on lines 3 and 4
awk '{
if (NR == 3 || NR == 4) {
if ($0 ~ /^#/) {
print substr($0, 2)
} else {
print "#" $0
}
} else {
print $0
}
}' .env > .env.tmp && mv .env.tmp .env
# Run the clipboard to LLM script
bash run_clipboard_to_llm.sh | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
screen_audio_recorder.py | Python | #!/usr/bin/env python3
"""
High-quality screen and audio recorder
Records screen with ffmpeg and audio with sounddevice separately,
then combines them for optimal quality
"""
import subprocess
import time
import os
import tempfile
import threading
import ffmpeg
# Import utility functions and core recording functions
from recorders.utils import combine_audio_video, list_screen_devices, list_audio_devices
from recorders.recorder import record_audio, record_screen
def record_screen_and_audio(output_file='combined_recording.mp4', duration=7, verbose=False,
screen_index=None, manual_stop_event=None, on_recording_started=None,
status_callback=None):
"""
Record high-quality screen and audio simultaneously using threading,
with audio recording stopping when screen recording finishes
Args:
output_file (str): Final output file path
duration (int): Recording duration in seconds for screen recording
verbose (bool): Whether to show detailed output logs
screen_index (int, optional): Screen index to capture, if None will use default
manual_stop_event (threading.Event, optional): Event to trigger manual stopping from outside
on_recording_started (callable, optional): Callback function to execute when recording actually starts
status_callback (callable, optional): Callback function to report status updates
Returns:
str: Path to final combined file or None if failed
"""
# Import threading here to avoid global import
import threading
# Create temporary files
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_video:
temp_video_path = temp_video.name
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_audio:
temp_audio_path = temp_audio.name
# Variable to hold the result of audio recording
audio_result = [None] # Use list to allow modification in thread
# Create an event to signal when screen recording is done
stop_event = threading.Event()
# Create a default manual stop event if none is provided
if manual_stop_event is None:
manual_stop_event = threading.Event()
try:
if verbose:
print("=== Starting High-Quality Recording ===")
print(f"Screen recording duration: {duration} seconds")
print("Audio will record until screen recording completes")
if manual_stop_event:
print("Recording can be stopped early using external control")
print(f"Final output will be saved to: {output_file}")
# Get screen devices but don't print output
screen_devices = list_screen_devices(print_output=False)
# If no screen index provided, use the last available screen index
if screen_index is None:
# Get the highest screen index available (usually the last screen)
if screen_devices:
screen_to_use = max(screen_devices.keys())
else:
# Fallback to index 1 if no screens detected
screen_to_use = 1
else:
screen_to_use = screen_index
screen_name = screen_devices.get(screen_to_use, f"Unknown screen at index {screen_to_use}")
if verbose:
print(f"Screen to capture: {screen_name}")
# Define function to run audio recording in a thread
def audio_recording_thread():
try:
audio_result[0] = record_audio(temp_audio_path, verbose=verbose, stop_event=stop_event, status_callback=status_callback)
except Exception as e:
if verbose:
print(f"Error in audio recording thread: {str(e)}")
audio_result[0] = None
# Create thread for audio recording
audio_thread = threading.Thread(target=audio_recording_thread)
if verbose:
print("\nStarting recording now...")
# Start audio recording thread
audio_thread.start()
# Setup a callback for when the ffmpeg process actually starts
def on_screen_record_start():
if verbose:
print("Screen recording process has actually started")
# Call the external callback if provided
if on_recording_started:
on_recording_started()
# Start screen recording using the updated function and ensure all output is suppressed
# when not in verbose mode by passing verbose flag
screen_result = record_screen(
temp_video_path,
duration,
framerate=30,
resolution='1280x720',
screen_index=screen_to_use,
stop_event=manual_stop_event,
verbose=verbose,
on_process_start=on_screen_record_start
)
screen_complete_time = time.time()
# Signal audio thread to stop recording without duplicate message
stop_event.set()
# Wait for audio thread to complete
audio_thread.join()
audio_complete_time = time.time()
# Calculate the time difference
time_diff = audio_complete_time - screen_complete_time
if verbose:
print(f"Time difference between screen and audio completion: {time_diff:.4f} seconds")
# Check if both recordings succeeded
if not os.path.exists(temp_video_path) or not audio_result[0]:
if verbose:
print("Error: Screen or audio recording failed")
return None
if verbose:
print("\nCombining video and audio...")
result = combine_audio_video(temp_video_path, audio_result[0], output_file, verbose=verbose, time_diff=time_diff)
# Clean up temporary files
try:
os.remove(temp_video_path)
os.remove(temp_audio_path)
except:
pass
if verbose:
print("\n=== Recording Process Completed ===")
if result:
file_size = os.path.getsize(output_file) / (1024 * 1024) # Size in MB
print(f"Final file size: {file_size:.2f} MB")
print(f"Recording saved to: {output_file}")
return result
except Exception as e:
if verbose:
print(f"Error in recording process: {str(e)}")
# Set stop event to end audio recording if an error occurs
stop_event.set()
# Clean up temporary files
try:
os.remove(temp_video_path)
os.remove(temp_audio_path)
except:
pass
return None
if __name__ == "__main__":
import argparse
import threading
parser = argparse.ArgumentParser(description="High-quality screen and audio recorder")
parser.add_argument("-d", "--duration", type=int, default=7, help="Recording duration in seconds")
parser.add_argument("-o", "--output", type=str, default="combined_recording.mp4", help="Output file path")
parser.add_argument("-v", "--verbose", action="store_true", help="Show detailed logs during recording")
parser.add_argument("-s", "--screen", type=int, help="Screen index to capture (run with -l to see available screens)")
parser.add_argument("-l", "--list", action="store_true", help="List available screen and audio devices")
parser.add_argument("-k", "--key", type=str, default="q", help="Key to press to manually stop recording (defaults to 'q')")
parser.add_argument("--no-manual-interrupt", action="store_true", help="Disable manual interrupt capability")
args = parser.parse_args()
# List devices if requested
if args.list:
print("=== Available Screen Devices ===")
screens = list_screen_devices(print_output=False)
for index, name in sorted(screens.items()):
print(f"[{index}] {name}")
print("\n=== Available Audio Devices ===")
list_audio_devices()
exit(0)
# Create manual stop event
manual_stop_event = threading.Event()
# Setup keyboard listener if manual interrupt is enabled
if not args.no_manual_interrupt:
try:
from pynput import keyboard
def on_press(key):
try:
# Check if the pressed key matches the interrupt key
if hasattr(key, 'char') and key.char == args.key:
print(f"\nManual interrupt key '{args.key}' pressed.")
manual_stop_event.set()
return False # Stop listener
except AttributeError:
pass # Special key, ignore
# Start keyboard listener in a separate thread
print(f"Press '{args.key}' to stop recording early")
keyboard_listener = keyboard.Listener(on_press=on_press)
keyboard_listener.daemon = True
keyboard_listener.start()
except ImportError:
print("Warning: pynput module not found. Manual interrupt disabled.")
# Record screen and audio
record_screen_and_audio(
output_file=args.output,
duration=args.duration,
verbose=args.verbose,
screen_index=args.screen,
manual_stop_event=manual_stop_event
) | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
scrollable_chat.py | Python | #!/usr/bin/env python3
import curses
import time
def main(stdscr):
# Initialize curses
curses.curs_set(0) # Hide cursor
stdscr.clear()
stdscr.refresh()
# Initialize colors
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, 74, -1) # Light dusty blue for user messages
curses.init_pair(2, 173, -1) # Subtle coral/orange for AI messages
curses.init_pair(3, 145, -1) # Light grayish-lavender for info messages
# Base chat history
base_chat = [
("User", "Hello, how are you today?"),
("AI", "I'm doing well, thank you! How can I help you?"),
("User", "I'm working on a project and need some help with scrollable content."),
("AI", "I'd be happy to help with that. What specifically are you trying to implement?"),
("User", "I want to create a scrollable chat interface like this one."),
("AI", "That's a good project. You'll need to use curses pads for efficient scrolling. They allow you to create content larger than the visible screen."),
("User", "How do pads work exactly?"),
("AI", "Pads are like windows but can be larger than the physical screen. You create a pad with the total size needed, then use pad.refresh() to display just a portion of it on screen. By changing which portion you display, you create the scrolling effect."),
("User", "That makes sense. Any other tips?"),
("AI", "Yes, a few important things to keep in mind:\n\n1. Track scroll position carefully\n2. Handle window resizing\n3. Add visual indicators for scrolling\n4. Handle text wrapping for long messages\n5. Use proper error handling for boundary cases"),
("User", "Could you explain more about text wrapping?"),
("AI", "Sure! Text wrapping is crucial for a good chat UI. When messages are longer than the available width, you need to split them into multiple lines. In curses, you have to handle this manually by:\n\n1. Calculating the available width\n2. Splitting the text into chunks that fit that width\n3. Adding each chunk on a new line\n4. Keeping track of how many lines each message takes\n\nThis affects your scrolling calculations since a long message might take up 5-10 lines of vertical space."),
("User", "What about handling emojis and special characters?"),
("AI", "Great question! Unicode characters like emojis can be tricky in terminal interfaces:\n\n1. Make sure your terminal supports UTF-8\n2. Use proper encoding when displaying text\n3. Be aware that some emojis and special characters take up double width in terminals\n4. Test with various characters to ensure proper display\n\nSome terminals might not render all emojis correctly, so it's good to have fallbacks."),
("User", "How would you implement a scrollbar indicator?"),
("AI", "For a scrollbar indicator in a terminal UI, you have a few options:\n\n1. Simple approach: Use arrow characters (↑/↓) at the top and bottom of the viewport when there's more content in that direction\n\n2. Percentage indicator: Show a percentage (e.g., \"50%\") to indicate current position\n\n3. ASCII scrollbar: Create a vertical bar using characters like '█' for the thumb and '│' for the track\n\n4. Position indicator: Show something like \"Lines 10-20 of 50\"\n\nThe method you choose depends on available space and aesthetics."),
("User", "Are there any performance considerations?"),
("AI", "Yes, several performance considerations for terminal UIs:\n\n1. Minimize screen refreshes - only update when necessary\n\n2. For very large chat histories, consider lazy loading or only keeping a window of messages in memory\n\n3. Use curses' pad selective refresh rather than redrawing everything\n\n4. Batch updates rather than updating character by character\n\n5. When scrolling quickly, consider temporarily reducing rendering quality\n\n6. Profile your code to identify bottlenecks if scrolling feels sluggish"),
("User", "What's the best way to handle keyboard shortcuts?"),
("AI", "For keyboard shortcuts in a curses application:\n\n1. Define a clear mapping of keys to actions\n\n2. Use familiar shortcuts where possible (arrow keys for navigation, q for quit, etc.)\n\n3. Provide visual indicators of available shortcuts\n\n4. Handle key combinations with curses.getch() and bit manipulation\n\n5. Consider making shortcuts configurable\n\n6. Group related functions under similar keys (h/j/k/l for vim-like navigation)\n\n7. Add a help screen accessible via '?' or 'h' showing all shortcuts")
]
# Create a longer chat history by repeating elements with slight modifications
chat_history = []
for i in range(3): # Repeat 3 times
for sender, message in base_chat:
if i > 0:
# Add a slight modification to repeated messages
message = f"{message} [Copy {i}]"
chat_history.append((sender, message))
# Set up the pad for scrollable content
pad_height = len(chat_history) * 10 # Estimate height (will be more than needed)
try:
while True:
# Get current dimensions
height, width = stdscr.getmaxyx()
# Create a pad with plenty of space
chat_pad = curses.newpad(pad_height, width - 2)
# Fill the pad with content
y = 0
for i, (sender, message) in enumerate(chat_history):
# Choose color based on sender
color = curses.color_pair(1) if sender == "User" else curses.color_pair(2)
# Print the sender
prefix = "You: " if sender == "User" else "AI: "
chat_pad.addstr(y, 0, prefix, color | curses.A_BOLD)
# Print the message with simple wrapping
available_width = width - 8 # Leave some margin
# Split message by paragraphs
paragraphs = message.split('\n')
for paragraph in paragraphs:
# Print the first part of the message on the same line as the sender
if paragraph == paragraphs[0]:
chat_pad.addstr(y, 5, paragraph[:available_width-5], color)
text = paragraph[available_width-5:]
y += 1
else:
text = paragraph
chat_pad.addstr(y, 0, "", color) # Start a new paragraph
y += 1
# Wrap the rest of the text
while text:
chat_pad.addstr(y, 2, text[:available_width-2], color)
text = text[available_width-2:]
y += 1
# Add a blank line between messages
y += 1
# Set up scrolling
max_scroll = max(0, y - (height - 4))
scroll_pos = 0
# Display header
stdscr.clear()
header = "Scrollable Chat Demo"
stdscr.addstr(0, (width - len(header)) // 2, header, curses.color_pair(2) | curses.A_BOLD)
instructions = "Use UP/DOWN to scroll, q to quit"
stdscr.addstr(1, (width - len(instructions)) // 2, instructions, curses.color_pair(3))
# Main display loop
while True:
# Show content from the pad
try:
chat_pad.refresh(scroll_pos, 0, 3, 1, height - 2, width - 2)
# Show scroll indicators if needed
if scroll_pos > 0:
stdscr.addstr(3, width - 3, "↑", curses.color_pair(3))
if scroll_pos < max_scroll:
stdscr.addstr(height - 2, width - 3, "↓", curses.color_pair(3))
# Add a more detailed position indicator
if max_scroll > 0:
# Calculate percentage and create a progress bar
percent = int((scroll_pos / max_scroll) * 100)
bar_width = 10
filled = int((bar_width * percent) / 100)
bar = '█' * filled + '░' * (bar_width - filled)
# Create position indicator
pos_indicator = f" {percent}% [{bar}]"
try:
stdscr.addstr(height - 1, width - len(pos_indicator) - 1, pos_indicator, curses.color_pair(3))
except curses.error:
# If terminal too small for full bar, show just percentage
try:
stdscr.addstr(height - 1, width - 6, f" {percent}%", curses.color_pair(3))
except:
pass
stdscr.refresh()
except curses.error:
# Handle potential errors during display
pass
# Get user input
try:
key = stdscr.getch()
except:
break
# Process input
if key == ord('q'):
return
elif key == curses.KEY_UP and scroll_pos > 0:
scroll_pos -= 1
elif key == curses.KEY_DOWN and scroll_pos < max_scroll:
scroll_pos += 1
elif key == curses.KEY_NPAGE: # Page Down
scroll_pos = min(max_scroll, scroll_pos + height // 2)
elif key == curses.KEY_PPAGE: # Page Up
scroll_pos = max(0, scroll_pos - height // 2)
elif key == curses.KEY_HOME:
scroll_pos = 0
elif key == curses.KEY_END:
scroll_pos = max_scroll
elif key == curses.KEY_RESIZE:
# Terminal was resized, restart the inner loop
break
except Exception as e:
# Exit gracefully on error
curses.endwin()
print(f"An error occurred: {e}")
return
if __name__ == "__main__":
curses.wrapper(main) | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
show_files.py | Python | #!/usr/bin/env python3
"""
Show contents of multiple files with clear formatting.
Usage:
python show_files.py file1.py file2.py file3.py | pbcopy
"""
import sys
import os
def show_file_content(file_path):
"""Display file path and content with clear formatting"""
try:
# Only process if file exists
if not os.path.exists(file_path):
print(f"Error: File not found: {file_path}", file=sys.stderr)
return False
# Get absolute path for clarity
abs_path = os.path.abspath(file_path)
# Print file header
print(f"\n{'=' * 80}")
print(f"FILE: {abs_path}")
print(f"{'=' * 80}\n")
# Print file content
with open(file_path, 'r') as f:
print(f.read())
return True
except Exception as e:
print(f"Error reading file {file_path}: {e}", file=sys.stderr)
return False
def main():
"""Process each filename provided as an argument"""
if len(sys.argv) < 2:
print(f"Usage: python {sys.argv[0]} file1 [file2 file3 ...]", file=sys.stderr)
print(f"Example to copy to clipboard: python {sys.argv[0]} file1.py file2.py | pbcopy", file=sys.stderr)
sys.exit(1)
# Process each file
success_count = 0
for file_path in sys.argv[1:]:
if show_file_content(file_path):
success_count += 1
# Report summary to stderr (won't be included in pbcopy)
print(f"\nProcessed {success_count} of {len(sys.argv) - 1} files successfully", file=sys.stderr)
if __name__ == "__main__":
main() | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
speed_up_audio.py | Python | #!/usr/bin/env python3
import argparse
import librosa
import soundfile as sf
import os
def speed_up_audio(input_file, output_file=None, speed_factor=1.5, preserve_pitch=True):
"""
Speed up an audio file without changing the pitch.
Args:
input_file (str): Path to the input audio file
output_file (str, optional): Path to save the output audio file. If None,
creates a file with "_speedup" suffix.
speed_factor (float): Speed factor (1.0 = original speed, 2.0 = twice as fast)
preserve_pitch (bool): Whether to preserve pitch (True) or not (False)
Returns:
str: Path to the output audio file
"""
if not os.path.exists(input_file):
raise FileNotFoundError(f"Input file not found: {input_file}")
# Create output filename if not provided
if output_file is None:
base, ext = os.path.splitext(input_file)
output_file = f"{base}_speedup{ext}"
# Load the audio file
print(f"Loading audio file: {input_file}")
y, sr = librosa.load(input_file, sr=None) # sr=None preserves original sample rate
# Get original duration
original_duration = librosa.get_duration(y=y, sr=sr)
# Speed up the audio without changing pitch
if preserve_pitch:
# Time-stretch without pitch change
print(f"Processing audio at {speed_factor}x speed (preserving pitch)...")
y_fast = librosa.effects.time_stretch(y, rate=speed_factor)
else:
# Simple resampling (changes pitch)
print(f"Processing audio at {speed_factor}x speed (with pitch change)...")
y_fast = librosa.effects.pitch_shift(y, sr=sr, n_steps=12*librosa.hz_to_midi(speed_factor))
# Get processed duration
processed_duration = librosa.get_duration(y=y_fast, sr=sr)
# Save the processed audio
print(f"Saving processed audio to: {output_file}")
sf.write(output_file, y_fast, sr)
# Report duration change
print(f"\nAudio Information:")
print(f" Original duration: {original_duration:.2f} seconds")
print(f" Processed duration: {processed_duration:.2f} seconds")
print(f" Time saved: {original_duration - processed_duration:.2f} seconds ({speed_factor:.1f}x faster)")
return output_file
def main():
parser = argparse.ArgumentParser(description="Speed up audio without changing pitch")
parser.add_argument("input_file", help="Path to the input audio file")
parser.add_argument("-o", "--output-file", help="Path to save the output audio file")
parser.add_argument("-s", "--speed", type=float, default=1.5,
help="Speed factor (1.0 = original speed, 2.0 = twice as fast)")
parser.add_argument("--change-pitch", action="store_true",
help="Allow pitch to change with speed (faster = higher pitch)")
args = parser.parse_args()
try:
output_file = speed_up_audio(
args.input_file,
args.output_file,
args.speed,
preserve_pitch=not args.change_pitch
)
print(f"Successfully processed audio: {output_file}")
except Exception as e:
print(f"Error: {e}")
if __name__ == "__main__":
main() | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
start_dashboards.sh | Shell | #!/bin/bash
# Script to launch both typing and reading metrics web dashboards in parallel
# This starts the typing metrics server on port 5050 and reading metrics on port 5051
# Determine the script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$SCRIPT_DIR"
# Activate the virtual environment if it exists
if [ -f "./venv/bin/activate" ]; then
echo "Activating virtual environment..."
source ./venv/bin/activate
elif [ -f "./activate_env.sh" ]; then
echo "Activating environment..."
source ./activate_env.sh
fi
# Function to open browser based on the operating system
open_browser() {
url=$1
case "$(uname -s)" in
Darwin*) # macOS
open "$url"
;;
Linux*) # Linux
if command -v xdg-open > /dev/null; then
xdg-open "$url"
elif command -v gnome-open > /dev/null; then
gnome-open "$url"
else
echo "Could not detect the web browser to use."
fi
;;
CYGWIN*|MINGW*|MSYS*) # Windows
start "$url"
;;
*)
echo "Unknown operating system. Please open $url manually."
;;
esac
}
# Start typing metrics web dashboard in background
start_typing_dashboard() {
echo "Starting typing metrics web dashboard on http://127.0.0.1:5050/"
python typing_metrics_web.py &
typing_pid=$!
sleep 0.5
open_browser "http://127.0.0.1:5050/"
echo "Typing metrics dashboard started with PID: $typing_pid"
return $typing_pid
}
# Start reading metrics web dashboard in background
start_reading_dashboard() {
echo "Starting reading metrics web dashboard on http://127.0.0.1:5051/reading"
python reading_metrics_web.py -p 5051 &
reading_pid=$!
# Don't automatically open the reading dashboard in browser
echo "Reading metrics dashboard started with PID: $reading_pid"
return $reading_pid
}
# Start both dashboards
echo "Launching both dashboards..."
start_typing_dashboard
typing_pid=$?
start_reading_dashboard
reading_pid=$?
echo "Both dashboards are running"
echo "Press Ctrl+C to stop all servers"
# Wait for user to press Ctrl+C
trap "kill $typing_pid $reading_pid 2>/dev/null; echo 'Shutting down dashboards...'" INT TERM
wait
# Deactivate virtual environment on exit
if type deactivate &>/dev/null; then
deactivate
fi | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
start_reading_metrics_web.sh | Shell | #!/bin/bash
# Script to launch the reading metrics web dashboard
# This starts the web server on port 5051
# Determine the script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$SCRIPT_DIR"
# Activate the virtual environment if it exists
if [ -f "./venv/bin/activate" ]; then
echo "Activating virtual environment..."
source ./venv/bin/activate
elif [ -f "./activate_env.sh" ]; then
echo "Activating environment..."
source ./activate_env.sh
fi
# Function to open browser based on the operating system
open_browser() {
url=$1
case "$(uname -s)" in
Darwin*) # macOS
open "$url"
;;
Linux*) # Linux
if command -v xdg-open > /dev/null; then
xdg-open "$url"
elif command -v gnome-open > /dev/null; then
gnome-open "$url"
else
echo "Could not detect the web browser to use."
fi
;;
CYGWIN*|MINGW*|MSYS*) # Windows
start "$url"
;;
*)
echo "Unknown operating system. Please open $url manually."
;;
esac
}
# Launch the reading metrics web dashboard
echo "Starting reading metrics web dashboard on http://127.0.0.1:5051/"
echo "Press Ctrl+C to stop the server"
# Open the browser after a minimal delay to ensure the server has started
(sleep 0.5 && open_browser "http://127.0.0.1:5051/reading") &
# Start the server
python reading_metrics_web.py -p 5051
# Deactivate virtual environment on exit
if type deactivate &>/dev/null; then
deactivate
fi | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
start_typing_metrics_web.sh | Shell | #!/bin/bash
# Script to launch the typing metrics web dashboard
# This starts the web server on port 5050
# Determine the script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$SCRIPT_DIR"
# Activate the virtual environment if it exists
if [ -f "./venv/bin/activate" ]; then
echo "Activating virtual environment..."
source ./venv/bin/activate
elif [ -f "./activate_env.sh" ]; then
echo "Activating environment..."
source ./activate_env.sh
fi
# Function to open browser based on the operating system
open_browser() {
url=$1
case "$(uname -s)" in
Darwin*) # macOS
open "$url"
;;
Linux*) # Linux
if command -v xdg-open > /dev/null; then
xdg-open "$url"
elif command -v gnome-open > /dev/null; then
gnome-open "$url"
else
echo "Could not detect the web browser to use."
fi
;;
CYGWIN*|MINGW*|MSYS*) # Windows
start "$url"
;;
*)
echo "Unknown operating system. Please open $url manually."
;;
esac
}
# Launch the typing metrics web dashboard
echo "Starting typing metrics web dashboard on http://127.0.0.1:5050/"
echo "Press Ctrl+C to stop the server"
# Open the browser after a minimal delay to ensure the server has started
(sleep 0.5 && open_browser "http://127.0.0.1:5050/") &
# Start the server
python typing_metrics_web.py
# Deactivate virtual environment on exit
if type deactivate &>/dev/null; then
deactivate
fi | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
templates/dashboard.html | HTML | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Typing Time Saved</title>
<link rel="icon" href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>📈</text></svg>">
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
margin: 0;
padding: 20px;
background-color: #f5f5f7;
color: #333;
}
.container {
max-width: 1200px;
margin: 0 auto;
}
.header {
text-align: center;
margin-bottom: 30px;
}
.header h1 {
margin-bottom: 5px;
color: #2c3e50;
}
.stats-container {
display: flex;
justify-content: space-around;
flex-wrap: wrap;
margin-bottom: 30px;
}
.stat-card {
background-color: white;
border-radius: 8px;
padding: 20px;
text-align: center;
margin: 10px;
flex: 1;
min-width: 200px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
}
.stat-card h2 {
margin: 0;
font-size: 16px;
font-weight: 500;
color: #666;
}
.stat-card .value {
font-size: 36px;
font-weight: 700;
color: #3498db;
margin: 10px 0;
}
.chart-container {
background-color: white;
border-radius: 8px;
padding: 20px;
margin-bottom: 30px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
}
.chart-container h2 {
margin-top: 0;
color: #2c3e50;
}
.time-period-nav {
display: flex;
justify-content: center;
margin-bottom: 20px;
}
.time-period-nav button {
background-color: #f8f9fa;
border: 1px solid #dee2e6;
padding: 8px 16px;
margin: 0 4px;
cursor: pointer;
border-radius: 4px;
font-weight: 500;
transition: all 0.2s;
}
.time-period-nav button.active {
background-color: #3498db;
color: white;
border-color: #3498db;
}
.time-period-nav button:hover:not(.active) {
background-color: #e9ecef;
}
.display-toggle {
display: flex;
}
.display-toggle button {
background-color: #f8f9fa;
border: 1px solid #dee2e6;
padding: 6px 14px;
margin: 0 2px;
cursor: pointer;
border-radius: 4px;
font-weight: 500;
transition: all 0.2s;
}
.display-toggle button.active {
background-color: #3498db;
color: white;
border-color: #3498db;
}
.display-toggle button:hover:not(.active) {
background-color: #e9ecef;
}
.chart-wrapper {
height: 300px;
position: relative;
}
.refresh-note {
text-align: center;
color: #888;
font-size: 14px;
margin-top: 30px;
}
.tooltip {
position: relative;
display: inline-block;
cursor: help;
}
.tooltip .tooltiptext {
visibility: hidden;
width: 200px;
background-color: #555;
color: #fff;
text-align: center;
border-radius: 6px;
padding: 5px;
position: absolute;
z-index: 1;
bottom: 125%;
left: 50%;
margin-left: -100px;
opacity: 0;
transition: opacity 0.3s;
}
.tooltip:hover .tooltiptext {
visibility: visible;
opacity: 1;
}
.dashboard-links {
text-align: center;
margin-top: 30px;
padding: 10px;
}
.dashboard-links a {
display: inline-block;
color: #2c3e50;
text-decoration: none;
padding: 8px 16px;
margin: 0 8px;
border-radius: 4px;
border: 1px solid #dee2e6;
transition: all 0.2s;
}
.dashboard-links a:hover {
background-color: #f8f9fa;
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>Typing Time Saved</h1>
<p>Statistics on time saved using transcription</p>
</div>
<div class="stats-container">
<div class="stat-card">
<h2>
Total Estimated Time Saved
<span class="tooltip">ⓘ
<span class="tooltiptext">Based on <span id="wpm-setting"></span> WPM typing speed</span>
</span>
</h2>
<div class="value" id="time-saved"></div>
<div>hours:minutes</div>
</div>
<div class="stat-card">
<h2>Characters Transcribed</h2>
<div class="value" id="total-chars"></div>
<div>characters</div>
</div>
<div class="stat-card">
<h2>Words Transcribed</h2>
<div class="value" id="total-words"></div>
<div>words</div>
</div>
<div class="stat-card">
<h2>
Pages Equivalent
<span class="tooltip">ⓘ
<span class="tooltiptext">Based on <span id="words-per-page"></span> words per page</span>
</span>
</h2>
<div class="value" id="total-pages"></div>
<div>pages</div>
</div>
</div>
<div class="chart-container">
<div style="display: flex; justify-content: space-between; align-items: center;">
<h2>Typing Metrics Over Time</h2>
<div class="display-toggle">
<button class="toggle-btn active" data-display="minutes">Minutes</button>
<button class="toggle-btn" data-display="pages">Pages</button>
</div>
</div>
<div class="time-period-nav">
<button class="period-btn active" data-period="daily">Daily</button>
<button class="period-btn" data-period="weekly">Weekly</button>
<button class="period-btn" data-period="monthly">Monthly</button>
</div>
<div class="chart-wrapper">
<canvas id="timeChart"></canvas>
</div>
</div>
<div class="dashboard-links">
<a href="http://127.0.0.1:5051/reading">Reading Metrics Dashboard</a>
</div>
<div class="refresh-note">
Data refreshes automatically when you open this page. Close and reopen to see the latest metrics.
</div>
</div>
<script>
// Chart.js initialization
let timeChart;
let currentPeriod = 'daily';
let currentDisplay = 'minutes'; // Default display mode
// Fetch data and initialize
fetchDataAndUpdateUI();
// Add event listeners to time period buttons
document.querySelectorAll('.period-btn').forEach(button => {
button.addEventListener('click', () => {
document.querySelectorAll('.period-btn').forEach(btn => btn.classList.remove('active'));
button.classList.add('active');
currentPeriod = button.getAttribute('data-period');
updateChart();
});
});
// Add event listeners to display toggle buttons
document.querySelectorAll('.toggle-btn').forEach(button => {
button.addEventListener('click', () => {
document.querySelectorAll('.toggle-btn').forEach(btn => btn.classList.remove('active'));
button.classList.add('active');
currentDisplay = button.getAttribute('data-display');
updateChart();
});
});
function fetchDataAndUpdateUI() {
fetch('/data')
.then(response => response.json())
.then(data => {
// Update summary statistics
document.getElementById('time-saved').textContent = formatTime(data.time_saved_minutes);
document.getElementById('total-chars').textContent = formatNumber(data.total_chars);
document.getElementById('total-words').textContent = formatNumber(data.total_words);
document.getElementById('total-pages').textContent = formatNumber(data.total_pages);
document.getElementById('words-per-page').textContent = formatNumber(data.words_per_page);
document.getElementById('wpm-setting').textContent = formatNumber(data.wpm_setting);
// Store data globally for chart updates
window.metricsData = data;
// Initialize chart
updateChart();
})
.catch(error => console.error('Error fetching data:', error));
}
function updateChart() {
const data = window.metricsData;
if (!data) return;
let chartData;
let labels;
let values;
const wordsPerPage = data.words_per_page;
// Select appropriate data based on current time period
if (currentPeriod === 'daily') {
chartData = data.daily_metrics;
labels = chartData.map(item => formatDate(item.date));
values = currentDisplay === 'minutes'
? chartData.map(item => item.time_saved_minutes)
: chartData.map(item => item.pages);
} else if (currentPeriod === 'weekly') {
chartData = data.weekly_metrics;
labels = chartData.map(item => formatWeek(item.week));
values = currentDisplay === 'minutes'
? chartData.map(item => item.time_saved_minutes)
: chartData.map(item => item.pages);
} else {
chartData = data.monthly_metrics;
labels = chartData.map(item => formatMonth(item.month));
values = currentDisplay === 'minutes'
? chartData.map(item => item.time_saved_minutes)
: chartData.map(item => item.pages);
}
// Determine label and y-axis text based on display mode
const displayLabel = currentDisplay === 'minutes' ? 'Time Saved (minutes)' : 'Pages Typed';
const yAxisTitle = currentDisplay === 'minutes' ? 'Minutes' : 'Pages';
// Destroy previous chart instance if it exists
if (timeChart) {
timeChart.destroy();
}
// Create new chart
const ctx = document.getElementById('timeChart').getContext('2d');
timeChart = new Chart(ctx, {
type: 'bar',
data: {
labels: labels,
datasets: [{
label: displayLabel,
data: values,
backgroundColor: 'rgba(52, 152, 219, 0.7)',
borderColor: 'rgba(52, 152, 219, 1)',
borderWidth: 1
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: yAxisTitle
}
}
}
}
});
}
// Helper formatting functions
function formatNumber(num) {
return num.toLocaleString();
}
function formatTime(minutes) {
const hours = Math.floor(minutes / 60);
const mins = Math.round(minutes % 60);
return `${hours}:${mins.toString().padStart(2, '0')}`;
}
function formatDate(dateStr) {
const date = new Date(dateStr);
return date.toLocaleDateString(undefined, { month: 'short', day: 'numeric' });
}
function formatWeek(weekStr) {
// Extract year and week number from YYYY-WW format
const [year, weekNum] = weekStr.split('-W');
return `Week ${weekNum}`;
}
function formatMonth(monthStr) {
const date = new Date(monthStr + '-01');
return date.toLocaleDateString(undefined, { month: 'short', year: 'numeric' });
}
</script>
</body>
</html> | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
templates/reading_dashboard.html | HTML | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Reading Time Saved</title>
<link rel="icon" href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>📚</text></svg>">
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
margin: 0;
padding: 20px;
background-color: #f5f5f7;
color: #333;
}
.container {
max-width: 1200px;
margin: 0 auto;
}
.header {
text-align: center;
margin-bottom: 30px;
}
.header h1 {
margin-bottom: 5px;
color: #2c3e50;
}
.stats-container {
display: flex;
justify-content: space-around;
flex-wrap: wrap;
margin-bottom: 30px;
}
.stat-card {
background-color: white;
border-radius: 8px;
padding: 20px;
text-align: center;
margin: 10px;
flex: 1;
min-width: 200px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
}
.stat-card h2 {
margin: 0;
font-size: 16px;
font-weight: 500;
color: #666;
}
.stat-card .value {
font-size: 36px;
font-weight: 700;
color: #e67e22;
margin: 10px 0;
}
.chart-container {
background-color: white;
border-radius: 8px;
padding: 20px;
margin-bottom: 30px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
}
.chart-container h2 {
margin-top: 0;
color: #2c3e50;
}
.time-period-nav {
display: flex;
justify-content: center;
margin-bottom: 20px;
}
.time-period-nav button {
background-color: #f8f9fa;
border: 1px solid #dee2e6;
padding: 8px 16px;
margin: 0 4px;
cursor: pointer;
border-radius: 4px;
font-weight: 500;
transition: all 0.2s;
}
.time-period-nav button.active {
background-color: #e67e22;
color: white;
border-color: #e67e22;
}
.time-period-nav button:hover:not(.active) {
background-color: #e9ecef;
}
.display-toggle {
display: flex;
}
.display-toggle button {
background-color: #f8f9fa;
border: 1px solid #dee2e6;
padding: 6px 14px;
margin: 0 2px;
cursor: pointer;
border-radius: 4px;
font-weight: 500;
transition: all 0.2s;
}
.display-toggle button.active {
background-color: #e67e22;
color: white;
border-color: #e67e22;
}
.display-toggle button:hover:not(.active) {
background-color: #e9ecef;
}
.chart-wrapper {
height: 300px;
position: relative;
}
.refresh-note {
text-align: center;
color: #888;
font-size: 14px;
margin-top: 30px;
}
.dashboard-links {
text-align: center;
margin-top: 30px;
padding: 10px;
}
.dashboard-links a {
display: inline-block;
color: #2c3e50;
text-decoration: none;
padding: 8px 16px;
margin: 0 8px;
border-radius: 4px;
border: 1px solid #dee2e6;
transition: all 0.2s;
}
.dashboard-links a:hover {
background-color: #f8f9fa;
}
.tooltip {
position: relative;
display: inline-block;
cursor: help;
}
.tooltip .tooltiptext {
visibility: hidden;
width: 200px;
background-color: #555;
color: #fff;
text-align: center;
border-radius: 6px;
padding: 5px;
position: absolute;
z-index: 1;
bottom: 125%;
left: 50%;
margin-left: -100px;
opacity: 0;
transition: opacity 0.3s;
}
.tooltip:hover .tooltiptext {
visibility: visible;
opacity: 1;
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>Reading Metrics Dashboard</h1>
<p>Statistics on text-to-speech conversions</p>
</div>
<div class="stats-container">
<div class="stat-card">
<h2>Words Processed</h2>
<div class="value" id="total-words"></div>
<div>words</div>
</div>
<div class="stat-card">
<h2>Characters Processed</h2>
<div class="value" id="total-chars"></div>
<div>characters</div>
</div>
<div class="stat-card">
<h2>Paragraphs Read</h2>
<div class="value" id="total-paragraphs"></div>
<div>paragraphs</div>
</div>
<div class="stat-card">
<h2>
Pages Read
<span class="tooltip">ⓘ
<span class="tooltiptext">Estimated based on <span id="words-per-page"></span> words per page</span>
</span>
</h2>
<div class="value" id="pages-read"></div>
<div>pages</div>
</div>
</div>
<div class="chart-container">
<div style="display: flex; justify-content: space-between; align-items: center;">
<h2>Reading Volume Over Time</h2>
<div class="display-toggle">
<button class="toggle-btn active" data-display="words">Words</button>
<button class="toggle-btn" data-display="pages">Pages</button>
</div>
</div>
<div class="time-period-nav">
<button class="period-btn active" data-period="daily">Daily</button>
<button class="period-btn" data-period="weekly">Weekly</button>
<button class="period-btn" data-period="monthly">Monthly</button>
</div>
<div class="chart-wrapper">
<canvas id="readingChart"></canvas>
</div>
</div>
<div class="dashboard-links">
<a href="http://127.0.0.1:5050/">Typing Metrics Dashboard</a>
</div>
<div class="refresh-note">
Data refreshes automatically when you open this page. Close and reopen to see the latest metrics.
</div>
</div>
<script>
// Chart.js initialization
let readingChart;
let currentPeriod = 'daily';
let currentDisplay = 'words'; // Default display mode
// Fetch data and initialize
fetchDataAndUpdateUI();
// Add event listeners to time period buttons
document.querySelectorAll('.period-btn').forEach(button => {
button.addEventListener('click', () => {
document.querySelectorAll('.period-btn').forEach(btn => btn.classList.remove('active'));
button.classList.add('active');
currentPeriod = button.getAttribute('data-period');
updateChart();
});
});
// Add event listeners to display toggle buttons
document.querySelectorAll('.toggle-btn').forEach(button => {
button.addEventListener('click', () => {
document.querySelectorAll('.toggle-btn').forEach(btn => btn.classList.remove('active'));
button.classList.add('active');
currentDisplay = button.getAttribute('data-display');
updateChart();
});
});
function fetchDataAndUpdateUI() {
fetch('/reading/data')
.then(response => response.json())
.then(data => {
// Update summary statistics
document.getElementById('pages-read').textContent = formatNumber(data.pages_read);
document.getElementById('total-words').textContent = formatNumber(data.total_words);
document.getElementById('total-chars').textContent = formatNumber(data.total_chars);
document.getElementById('total-paragraphs').textContent = formatNumber(data.total_paragraphs);
document.getElementById('words-per-page').textContent = formatNumber(data.words_per_page_setting);
// Store data globally for chart updates
window.metricsData = data;
// Initialize chart
updateChart();
})
.catch(error => console.error('Error fetching data:', error));
}
function updateChart() {
const data = window.metricsData;
if (!data) return;
let chartData;
let labels;
let values;
const wordsPerPage = data.words_per_page_setting;
// Select appropriate data based on current time period
if (currentPeriod === 'daily') {
chartData = data.daily_metrics;
labels = chartData.map(item => formatDate(item.date));
values = currentDisplay === 'words'
? chartData.map(item => item.words)
: chartData.map(item => parseFloat((item.words / wordsPerPage).toFixed(1)));
} else if (currentPeriod === 'weekly') {
chartData = data.weekly_metrics;
labels = chartData.map(item => formatWeek(item.week));
values = currentDisplay === 'words'
? chartData.map(item => item.words)
: chartData.map(item => parseFloat((item.words / wordsPerPage).toFixed(1)));
} else {
chartData = data.monthly_metrics;
labels = chartData.map(item => formatMonth(item.month));
values = currentDisplay === 'words'
? chartData.map(item => item.words)
: chartData.map(item => parseFloat((item.words / wordsPerPage).toFixed(1)));
}
// Determine label and y-axis text based on display mode
const displayLabel = currentDisplay === 'words' ? 'Words Read' : 'Pages Read';
const yAxisTitle = currentDisplay === 'words' ? 'Words' : 'Pages';
// Destroy previous chart instance if it exists
if (readingChart) {
readingChart.destroy();
}
// Create new chart
const ctx = document.getElementById('readingChart').getContext('2d');
readingChart = new Chart(ctx, {
type: 'bar',
data: {
labels: labels,
datasets: [{
label: displayLabel,
data: values,
backgroundColor: 'rgba(230, 126, 34, 0.7)',
borderColor: 'rgba(230, 126, 34, 1)',
borderWidth: 1
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: yAxisTitle
}
}
}
}
});
}
// Helper formatting functions
function formatNumber(num) {
return num.toLocaleString();
}
function formatDate(dateStr) {
const date = new Date(dateStr);
return date.toLocaleDateString(undefined, { month: 'short', day: 'numeric' });
}
function formatWeek(weekStr) {
// Extract year and week number from YYYY-WW format
const [year, weekNum] = weekStr.split('-W');
return `Week ${weekNum}`;
}
function formatMonth(monthStr) {
const date = new Date(monthStr + '-01');
return date.toLocaleDateString(undefined, { month: 'short', year: 'numeric' });
}
</script>
</body>
</html> | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
terminal_ui.py | Python | #!/usr/bin/env python3
"""
Terminal UI module for curses-based interfaces.
Provides reusable UI components and display functions.
"""
import curses
def init_curses(stdscr):
"""Initialize curses environment"""
curses.noecho() # Don't echo keypresses
curses.cbreak() # React to keys instantly
stdscr.keypad(True) # Enable keypad mode
# Try to enable colors if terminal supports it
if curses.has_colors():
curses.start_color()
curses.use_default_colors() # Use terminal's default colors for better visibility
# Slightly more vibrant but still subtle colors
curses.init_pair(1, 209, -1) # Title - slightly brighter coral/orange
curses.init_pair(2, 68, -1) # Highlight - slightly brighter blue
curses.init_pair(3, 147, -1) # Footer - slightly brighter grayish-lavender
return stdscr
def cleanup_curses(stdscr):
"""Clean up curses on exit"""
if stdscr:
stdscr.keypad(False)
curses.echo()
curses.nocbreak()
curses.endwin()
def display_screen_template(stdscr, title, content, status_message="", footer_text=None):
"""Common screen display template to reduce code duplication"""
if not stdscr:
return
# Clear screen
stdscr.clear()
# Get terminal dimensions
height, width = stdscr.getmaxyx()
# Display border and title
stdscr.addstr(0, 0, "=" * (width-1))
# Title with color if available
if curses.has_colors():
stdscr.addstr(1, 0, title.center(width-1), curses.color_pair(1))
else:
stdscr.addstr(1, 0, title.center(width-1))
stdscr.addstr(2, 0, "=" * (width-1))
# Display content
line_num = 4
for line in content:
stdscr.addstr(line_num, 0, line)
line_num += 1
# Display footer
footer_line = height - 3
# Footer with color if available
if curses.has_colors():
color = curses.color_pair(3)
else:
color = curses.A_NORMAL
if footer_text:
stdscr.addstr(footer_line, 0, footer_text, color)
else:
stdscr.addstr(footer_line - 5, 0, "Press ⇧⌥X (Shift+Alt+X) for audio-only recording", color)
stdscr.addstr(footer_line - 2, 0, "Press ⇧⌥Z (Shift+Alt+Z) for screen+audio recording", color)
stdscr.addstr(footer_line + 1, 0, "Press Ctrl+C to exit", color)
# Bottom border
stdscr.addstr(height-1, 0, "=" * (width-1))
# Display status message if any
if status_message:
msg_y = height - 12 # Move status message much higher to avoid overlapping with any instructions
stdscr.addstr(msg_y, 0, status_message, curses.A_DIM)
# Update the screen
stdscr.refresh() | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
terminal_video_voice_recorder.py | Python | #!/usr/bin/env python3
"""
Terminal-based voice recording handler with simple UI
Uses curses for proper terminal management
"""
import curses
import threading
import time
import os
from keyboard_handler import KeyboardShortcutHandler
from terminal_ui import init_curses, cleanup_curses, display_screen_template
from recorders.recording_handler import RecordingSession
from transcription_handler import TranscriptionHandler
class CursesShortcutHandler:
"""Terminal UI with keyboard shortcut support using curses"""
def __init__(self):
self.is_running = True
self.stdscr = None
self.status_message = ""
# Initialize recording session handler with both status and recording started callbacks
self.recording_session = RecordingSession(
status_callback=self.set_status_message,
recording_started_callback=self.on_recording_started
)
# Initialize transcription handler
self.transcription_handler = TranscriptionHandler(
ui_callback=self.display_screen_template,
status_callback=self.set_status_message
)
# Initialize keyboard handler with callbacks
self.keyboard_handler = KeyboardShortcutHandler({
'toggle': self.toggle_recording,
'exit': self.set_exit,
'status': self.set_status_message
})
def set_status_message(self, message):
"""Set status message and refresh screen"""
self.status_message = message
self.refresh_screen()
def set_exit(self):
"""Set exit flag"""
self.is_running = False
def on_recording_started(self, mode):
"""
Callback that triggers when recording has actually started
Args:
mode (str): 'audio' for audio-only or 'video' for screen and audio
"""
# Now we know recording has actually started, show the recording screen
self.show_recording_screen(mode)
def init_curses(self):
"""Initialize curses environment"""
self.stdscr = curses.initscr()
init_curses(self.stdscr)
def cleanup_curses(self):
"""Clean up curses on exit"""
cleanup_curses(self.stdscr)
def start_keyboard_listener(self):
"""Start the keyboard shortcut listener"""
self.keyboard_handler.start()
def toggle_recording(self, mode="audio"):
"""
Toggle recording state when shortcut is pressed
Args:
mode (str): 'audio' for audio-only or 'video' for screen and audio
"""
try:
if self.recording_session.is_recording:
recording_path, recording_mode = self.recording_session.stop()
if recording_path:
self.show_recording_done_screen(recording_path, recording_mode)
# Pass directly to transcription
self.transcription_handler.transcribe(recording_path, recording_mode)
else:
self.show_main_screen()
else:
# Display a "preparing to record" screen first
self.show_preparing_screen(mode)
# Start the recording - the on_recording_started callback will
# handle displaying the recording screen when recording actually starts
self.recording_session.start(mode)
except Exception as e:
self.status_message = f"Error in toggle_recording: {e}"
self.refresh_screen()
def refresh_screen(self):
"""Force screen refresh"""
if self.stdscr:
self.stdscr.refresh()
def display_screen_template(self, title, content, footer_text=None):
"""Common screen display template to reduce code duplication"""
display_screen_template(self.stdscr, title, content, self.status_message, footer_text)
def show_main_screen(self):
"""Display the main screen with options"""
content = [
"Status: Ready",
"",
"Recording options:",
"• Audio only (⇧⌥X): Record voice without capturing screen",
"• Screen + Audio (⇧⌥Z): Record both screen and voice"
]
self.display_screen_template("AUDIO/VIDEO RECORDER", content)
def show_recording_screen(self, mode="audio"):
"""
Display recording screen
Args:
mode (str): 'audio' for audio-only or 'video' for screen and audio
"""
# The status message will contain the microphone information (added via callback)
if mode == "audio":
content = ["Voice Recording active...", "Capturing audio only"]
footer = "Press ⇧⌥X (Shift+Alt+X) to stop recording"
self.display_screen_template("VOICE RECORDING IN PROGRESS", content, footer)
else: # video mode
content = ["Screen Recording active...", "Capturing screen and audio"]
footer = "Press ⇧⌥Z (Shift+Alt+Z) to stop recording"
self.display_screen_template("SCREEN RECORDING IN PROGRESS", content, footer)
def show_preparing_screen(self, mode="audio"):
"""
Display a screen showing that recording is being prepared
Args:
mode (str): 'audio' for audio-only or 'video' for screen and audio
"""
if mode == "audio":
content = ["Preparing voice recording...", "Setting up audio device"]
footer = "Press ⇧⌥X (Shift+Alt+X) to cancel"
self.display_screen_template("PREPARING VOICE RECORDING", content, footer)
else: # video mode
content = ["Preparing screen recording...", "Setting up screen capture and audio device"]
footer = "Press ⇧⌥Z (Shift+Alt+Z) to cancel"
self.display_screen_template("PREPARING SCREEN RECORDING", content, footer)
def show_recording_done_screen(self, recording_path, recording_mode):
"""Display recording done screen with recording path info"""
content = [
"Your recording has been completed.",
"",
"Processing recording..."
]
if recording_path:
content.append(f"Recording saved to: {recording_path}")
# Show the processing screen with appropriate title based on recording mode
title = "VOICE RECORDING DONE!" if recording_mode == "audio" else "SCREEN RECORDING DONE!"
self.display_screen_template(title, content)
else:
error_type = "Voice" if recording_mode == "audio" else "Screen"
content.append(f"Error: {error_type} recording failed or was interrupted")
title = "VOICE RECORDING DONE!" if recording_mode == "audio" else "SCREEN RECORDING DONE!"
self.display_screen_template(title, content)
def run(self):
"""Main application loop"""
try:
# Initialize curses
self.init_curses()
# Start keyboard listener
self.start_keyboard_listener()
# Display main screen
self.show_main_screen()
# Track when we last verified the listener
last_listener_check = time.time()
# Keep application running until exit signal
while self.is_running:
# Periodically check if keyboard listener is still active
current_time = time.time()
if current_time - last_listener_check > 5: # Check every 5 seconds
if self.keyboard_handler.keyboard_listener is None or not self.keyboard_handler.keyboard_listener.is_alive():
self.set_status_message("Keyboard listener died - restarting...")
self.start_keyboard_listener()
self.set_status_message("Keyboard listener restarted")
last_listener_check = current_time
time.sleep(0.1) # Small sleep to prevent CPU usage
except KeyboardInterrupt:
self.status_message = "Exiting..."
self.refresh_screen()
finally:
# Properly clean up all resources
try:
# Clean up keyboard handler
if hasattr(self, 'keyboard_handler') and self.keyboard_handler:
self.set_status_message("Cleaning up keyboard listener...")
self.keyboard_handler.stop()
time.sleep(0.1) # Give a moment for keyboard listener to stop cleanly
# Clean up recording session if any is active
if hasattr(self, 'recording_session') and self.recording_session:
if self.recording_session.is_recording:
self.set_status_message("Stopping active recording...")
self.recording_session.stop()
time.sleep(0.1)
# Clean up transcription handler
if hasattr(self, 'transcription_handler') and self.transcription_handler:
self.set_status_message("Cleaning up transcription handler...")
# Add any necessary cleanup for transcription_handler
except Exception as e:
self.set_status_message(f"Error during cleanup: {e}")
finally:
# Always clean up curses at the end
self.cleanup_curses()
if __name__ == "__main__":
app = CursesShortcutHandler()
app.run() | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
test_error_handling.py | Python | #!/usr/bin/env python3
import unittest
from unittest.mock import MagicMock
def test_error_handler():
"""
Simplified test function that mimics the error handling in recorder.py
"""
process = MagicMock()
process.returncode = 1
# Test case 1: Normal error with stderr
process.stderr = MagicMock()
process.stderr.read.return_value = b"Some ffmpeg error"
stderr_msg = ""
if hasattr(process, 'stderr') and process.stderr:
stderr_msg = process.stderr.read().decode('utf-8')
verbose = True # Set to True for testing
if stderr_msg and "Interrupt" not in stderr_msg and "Operation not permitted" not in stderr_msg and verbose:
print(f"Error during screen recording: {stderr_msg}")
elif verbose:
print(f"Error during screen recording (return code: {process.returncode})")
# Test case 2: Interrupt error
process.stderr.read.return_value = b"Interrupt by user"
stderr_msg = ""
if hasattr(process, 'stderr') and process.stderr:
stderr_msg = process.stderr.read().decode('utf-8')
verbose = True # Set to True for testing
if stderr_msg and "Interrupt" not in stderr_msg and "Operation not permitted" not in stderr_msg and verbose:
print(f"Error during screen recording: {stderr_msg}")
elif verbose:
print(f"Error during screen recording (return code: {process.returncode})")
if __name__ == "__main__":
test_error_handler() | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
test_typing_metrics.py | Python | #!/usr/bin/env python3
"""
Test script for typing metrics functionality.
Generates sample data and starts the web server.
"""
import os
import time
import random
from datetime import datetime, timedelta
from typing_metrics import record_transcription
from typing_metrics_web import start_web_server
def generate_sample_data(num_entries=30):
"""
Generate sample transcription data for testing
Args:
num_entries (int): Number of sample entries to generate
"""
# Remove existing CSV if it exists
csv_path = os.path.join(os.path.dirname(__file__), "typing_metrics.csv")
if os.path.exists(csv_path):
os.remove(csv_path)
# Generate random transcription data for the past 30 days
today = datetime.now()
for i in range(num_entries):
# Random date within the past 30 days
days_ago = random.randint(0, 29)
entry_date = today - timedelta(days=days_ago)
# Create a random transcription
# Between 50 and 500 words with avg 5 chars per word
word_count = random.randint(50, 500)
char_count = word_count * random.randint(4, 7)
# Generate a fake transcription
transcription = 'x' * char_count
# Record it
record_transcription(transcription)
# Add some time variation
time.sleep(0.1)
print(f"Generated {num_entries} sample transcription entries")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Test typing metrics functionality")
parser.add_argument("--generate", action="store_true", help="Generate sample data")
parser.add_argument("--port", type=int, default=5050, help="Port for web server")
args = parser.parse_args()
# Generate sample data if requested
if args.generate:
generate_sample_data()
# Start the web server
print(f"Starting web server at http://127.0.0.1:{args.port}/")
print("Press Ctrl+C to stop the server")
try:
# Import Flask app and run it directly (non-daemon) to keep script running
from typing_metrics_web import app
app.run(host='127.0.0.1', port=args.port, debug=False)
except KeyboardInterrupt:
print("\nServer stopped") | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
transcription_handler.py | Python | #!/usr/bin/env python3
"""
Transcription handler for audio and video recordings.
Manages the transcription process and results presentation.
"""
import os
import threading
import time
from audio_transcription import transcribe_audio
from video_transcription import transcribe_video
from type_text import type_text
from typing_metrics import record_transcription
class TranscriptionHandler:
"""Handles transcription of audio and video recordings"""
def __init__(self, ui_callback=None, status_callback=None):
"""
Initialize the transcription handler
Args:
ui_callback: Function to call to update UI with content
status_callback: Function to call to update status message
"""
self.ui_callback = ui_callback
self.status_callback = status_callback
self.transcription = None
self.transcription_path = None
def set_status(self, message):
"""Update status via callback"""
if self.status_callback:
self.status_callback(message)
def save_transcription_text(self, text):
"""
Save the transcription text to a file
Args:
text (str): The text to save
Returns:
str: Path to the saved file
"""
try:
# Create a filename with timestamp similar to recordings
timestamp = int(time.time())
filename = f"recording_{timestamp}.txt"
# Write the text to the file
with open(filename, 'w') as f:
f.write(text)
# Return the relative path, consistent with how we show recording paths
return filename
except Exception as e:
self.set_status(f"Error saving transcription text: {str(e)}")
return None
def transcribe(self, recording_path, recording_mode):
"""
Start transcription process in a separate thread
Args:
recording_path: Path to the recording file
recording_mode: 'audio' or 'video'
"""
# Start transcription in a separate thread
transcription_thread = threading.Thread(
target=self._transcribe_thread_func,
args=(recording_path, recording_mode)
)
transcription_thread.daemon = True
transcription_thread.start()
return transcription_thread
def _transcribe_thread_func(self, recording_path, recording_mode):
"""Thread function to handle transcription process"""
try:
if recording_mode == "audio":
self.set_status("Transcribing audio with Gemini AI...")
self.transcription = transcribe_audio(
audio_file_path=recording_path,
verbose=False
)
else: # video mode
self.set_status("Transcribing video with Gemini AI...")
self.transcription = transcribe_video(
video_file_path=recording_path,
verbose=False
)
# Keep all recording files (both audio and video)
if self.transcription and os.path.exists(recording_path):
self.set_status("Transcription complete! Recording file preserved.")
# Save transcription to a file and show results
self.transcription_path = self.save_transcription_text(self.transcription)
# Record metrics for successful transcription
record_transcription(self.transcription)
self.show_transcription(recording_path)
except Exception as e:
self.set_status(f"Transcription error: {str(e)}")
# Fall back to showing just the recording path if transcription fails
self.show_recording_path(recording_path, recording_mode)
def show_transcription(self, recording_path):
"""Display transcription and type it at cursor position"""
# If transcription failed or is empty, fall back to showing the recording path
if not self.transcription:
return
# Prepare transcription display content
# Truncate the transcription to around 5 lines for display
transcription_display = self.transcription[:500]
if len(self.transcription) > 500:
transcription_display += "..."
# Split into lines for display
display_lines = []
for i in range(0, len(transcription_display), 60):
display_lines.append(transcription_display[i:i+60])
# The transcription will be typed at the cursor position via type_text
# (type_text handles clipboard operations internally)
# Display information about the transcription
content = [
"Your recording has been transcribed!",
"",
"Transcription preview:",
"" # Add a blank line after the preview heading
]
# Add truncated transcription lines
content.extend(display_lines)
# Add separator and info about typing
content.append("")
content.append("")
content.append("-----")
content.append("")
content.append("Full transcription inserted at your cursor position.")
# Add note about recording file location
if os.path.exists(recording_path):
content.append("")
content.append("")
content.append(f"Recording file preserved at: {recording_path}")
# Add note about transcription text file
if self.transcription_path and os.path.exists(self.transcription_path):
content.append("")
content.append("")
content.append(f"Transcription saved to: {self.transcription_path}")
# Update UI via callback
if self.ui_callback:
self.ui_callback("TRANSCRIPTION COMPLETE!", content)
# Type the transcription at the cursor position without countdown or verbose output
type_text(self.transcription, countdown=False, verbose=False)
def show_recording_path(self, recording_path, recording_mode):
"""Display recording path info when transcription fails"""
# Determine correct message based on recording mode
recording_type = "voice" if recording_mode == "audio" else "screen"
# Check if the recording file still exists
if recording_path and not os.path.exists(recording_path):
content = [
"Your recording has been completed, but the file is no longer available.",
"",
"The recording file may have been deleted or moved."
]
# Update UI via callback
if self.ui_callback:
self.ui_callback("RECORDING UNAVAILABLE", content)
return
# Get file size in MB if file exists
if recording_path and os.path.exists(recording_path):
file_size = os.path.getsize(recording_path) / (1024 * 1024)
recording_info = f"{recording_type.capitalize()} recording saved: {recording_path} ({file_size:.2f} MB)"
# Display information about the recording
content = [
f"Your {recording_type} recording has been completed.",
"",
"Recording information:",
recording_info,
"",
"Recording path inserted at your cursor position."
]
# Add note about transcription failure
content.append("")
content.append("Note: Transcription was not completed. The recording file is preserved.")
# Set appropriate title
title = "VOICE RECORDING DONE!" if recording_mode == "audio" else "SCREEN RECORDING DONE!"
# Update UI via callback
if self.ui_callback:
self.ui_callback(title, content)
# Type the recording path at the cursor position without countdown or verbose output
if recording_path and os.path.exists(recording_path):
type_text(recording_path, countdown=False, verbose=False)
else:
# Handle case where recording path doesn't exist
content = [
f"Your {recording_type} recording has been completed, but the file was not saved.",
"",
"The recording file may have been deleted or was not created properly."
]
# Set appropriate title
title = "RECORDING UNAVAILABLE"
# Update UI via callback
if self.ui_callback:
self.ui_callback(title, content) | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
transcription_prompts.py | Python | #!/usr/bin/env python3
"""
Shared transcription prompts and utilities for Gemini AI transcription
"""
import os
def load_common_words():
"""
Load common words from the common_words.txt file
Returns:
list: List of common words to incorporate in prompts
"""
common_words = []
common_words_path = os.path.join(os.path.dirname(__file__), "common_words.txt")
if os.path.exists(common_words_path):
try:
with open(common_words_path, "r") as f:
for line in f:
# Skip comments and empty lines
line = line.strip()
if line and not line.startswith("#"):
common_words.append(line)
except Exception as e:
print(f"Error loading common words: {e}") if __debug__ else None
return common_words
def get_common_words_section():
"""
Get the formatted common words section for prompts
Returns:
str: Formatted common words section for prompts
"""
common_words = load_common_words()
common_words_section = ""
if common_words:
common_words_section = "\n IMPORTANT TERMS TO PRESERVE EXACTLY:\n - " + "\n - ".join(common_words) + "\n"
return common_words_section
def get_common_instructions():
"""
Get the common instructions section used in both audio and video transcription prompts
Returns:
str: Formatted common instructions
"""
return """
- Clean up speech disfluencies: remove filler words, repetitions, stutters, false starts, self-corrections, and verbal crutches
- You MUST NOT include phrases like "Here's the transcript:" or any other headers
- You MUST NOT add timestamps or speaker attributions
- You MUST NOT include any introductory or concluding remarks
- You MUST begin immediately with the transcribed content
- Use punctuation that naturally fits the context - not every phrase needs a period (question marks for questions, colons for introductions, no punctuation for fragments or headers, etc.)
- Preserve speech tone and emotion - use exclamation marks for excitement, enthusiasm, or strong emotions, even if it's subtle or mild
- If the speaker uses incomplete sentences or fragments, preserve them when they're intentional
- Preserve contractions exactly as spoken (e.g., "I'm" stays as "I'm", not expanded to "I am")
- IMPORTANT: Structure paragraphs properly: all sentences in the same paragraph MUST be together with NO single line breaks between them; use DOUBLE line breaks (empty line) between separate paragraphs
- CRITICAL: Prioritize semantic meaning over pauses when structuring text:
* If a speaker pauses mid-sentence and then continues the same thought, format as a single coherent sentence
* Make paragraph breaks based on topic/subject changes, not based on pauses or timing in speech
* Never insert line breaks that split a logical thought unit
- Preserve the original meaning while substantially improving speech clarity
- ALWAYS maintain the exact capitalization of proper names and terms (e.g., "Claude Code" with both capital Cs)
- IMPORTANT: Preserve evaluative terms EXACTLY as spoken (e.g., "very good" must not be changed to "pretty good")
- Format lists properly: one item per line with preserved numbering or bullets
- Improve sentence flow: avoid starting with "But" or "And" and combine sentences with appropriate conjunctions when needed"""
def get_common_goal():
"""
Get the common goal statement used in both audio and video transcription prompts
Returns:
str: Formatted common goal statement
"""
return """
Your goal is to produce a transcript that reads as if it were written text rather than spoken words.
Make it concise, clear, and professional - as if it had been carefully edited for publication."""
def get_audio_transcription_prompt():
"""
Get the complete prompt for audio transcription
Returns:
str: Complete audio transcription prompt with common words
"""
common_words_section = get_common_words_section()
common_instructions = get_common_instructions()
common_goal = get_common_goal()
return f"""
Create a natural, context-appropriate transcription of this audio recording, removing speech disfluencies but preserving the speaker's intent and style.
IMPORTANT:
- If there is any audio, attempt to transcribe it even if it seems like background noise
- Only if there is absolutely no audio at all (complete silence), return exactly "NO_AUDIO"
- If you've confirmed there is audio but cannot detect any speech, return "NO_AUDIBLE_SPEECH"{common_words_section}
Critical instructions:
{common_instructions}
- Preserve all technical terms, names, and specialized vocabulary accurately
{common_goal}
"""
def get_video_transcription_prompt():
"""
Get the complete prompt for video transcription
Returns:
str: Complete video transcription prompt with common words
"""
common_words_section = get_common_words_section()
common_instructions = get_common_instructions()
common_goal = get_common_goal()
return f"""
Create a natural, context-appropriate transcription of this video, removing speech disfluencies while carefully using the visual content as context and preserving the speaker's intent and style.
IMPORTANT:
- If there is any audio, attempt to transcribe it even if it seems like background noise
- Only if there is absolutely no audio at all (complete silence), return exactly "NO_AUDIO"
- If you've confirmed there is audio but cannot detect any speech, return "NO_AUDIBLE_SPEECH"
- You MUST return these indicators even if there is visual content/activity on the screen{common_words_section}
Critical instructions:
{common_instructions}
- Pay careful attention to text and names visible on screen (file names, people names, place names)
- When the speaker refers to on-screen elements, preserve those references accurately
- Pay special attention to cursor position as it indicates context for insertion points and formatting
- Note the formatting around the cursor position as it affects how content should be structured
- Capture technical terms, code, and commands with 100% accuracy
- Follow the specific capitalization patterns shown on-screen for names, brands, and technical terms
{common_goal}
""" | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
type_text.py | Python | #!/usr/bin/env python3
"""
Module for typing text at the cursor position.
Using clipboard-based approach with text-only preservation.
"""
import time
import sys
import platform
import traceback
from pynput.keyboard import Controller, Key, Listener
import copykitten
# Check if we're on macOS
is_macos = platform.system() == 'Darwin'
# Only print system info when running the file directly, not when imported
if __name__ == "__main__":
print(f"Running on: {platform.system()} {platform.release()}")
# For debugging key events
key_events = []
def on_press(key):
"""Monitor key presses for debugging"""
try:
key_events.append(f"Press: {key}")
except:
pass
def on_release(key):
"""Monitor key releases for debugging"""
try:
key_events.append(f"Release: {key}")
except:
pass
def test_permission(verbose=False):
"""Test if we have accessibility permissions by trying to press and release a harmless key."""
try:
if verbose:
print("Initializing keyboard controller...")
keyboard = Controller()
# Try to press and immediately release a modifier key that won't have any effect
if verbose:
print("Testing with Alt key press/release...")
keyboard.press(Key.alt)
time.sleep(0.1)
keyboard.release(Key.alt)
return True
except Exception as e:
if verbose:
print(f"Permission test failed: {e}")
traceback.print_exc()
return False
def type_text(text, countdown=False, verbose=False):
"""
Type the given text at the current cursor position using clipboard.
Preserves original text clipboard content.
Args:
text (str): The text to type
countdown (bool): Whether to show a countdown before typing (default: False)
verbose (bool): Whether to print debug information (default: False)
"""
try:
keyboard = Controller()
# Save current clipboard text content
original_text = None
try:
if verbose:
print("Saving original clipboard text...")
original_text = copykitten.paste()
except Exception as e:
if verbose:
print(f"No text in clipboard or error: {e}")
# Give user time to position cursor if countdown is enabled
if countdown:
if verbose:
print("\nPositioning cursor in 3 seconds...")
for i in range(3, 0, -1):
if verbose:
print(f"{i}...")
time.sleep(1)
if verbose:
print("Now pasting text via clipboard...")
print(f"About to paste: '{text}'")
# Copy text to clipboard
copykitten.copy(text)
# Paste using keyboard shortcut
if is_macos:
keyboard.press(Key.cmd)
keyboard.press('v')
keyboard.release('v')
keyboard.release(Key.cmd)
else:
keyboard.press(Key.ctrl)
keyboard.press('v')
keyboard.release('v')
keyboard.release(Key.ctrl)
# Small delay to ensure paste completes
time.sleep(0.1)
# Restore original clipboard content
if verbose:
print("Restoring original clipboard content...")
if original_text:
copykitten.copy(original_text)
else:
copykitten.clear()
# Print debug info if verbose
if verbose and key_events:
print("\nDebug - Recorded key events:")
for event in key_events[-10:]: # Show last 10 events
print(f" {event}")
return True
except Exception as e:
if verbose:
print(f"Error while pasting: {e}")
traceback.print_exc()
return False
if __name__ == "__main__":
# When running the script directly, we want verbose output
verbose_mode = True
print(f"Running on: {platform.system()} {platform.release()}")
# First check if we have permission
print("Testing accessibility permissions...")
permission_ok = test_permission(verbose=verbose_mode)
if not permission_ok:
print("\nERROR: Missing accessibility permissions!")
print("Please go to System Settings > Privacy & Security > Accessibility")
print("Add and enable your terminal application (Terminal, iTerm2, or VS Code)")
sys.exit(1)
print("Accessibility permissions seem OK")
# Start key listener for debugging
print("Starting key event listener for debugging...")
listener = Listener(on_press=on_press, on_release=on_release)
listener.start()
# Text to type (can be modified as needed)
text_to_type = "Hello, this is a test of programmatic typing!\nIt handles special characters: !@#$%^&*()\nAnd has\ttabs and\nnewlines too."
# Execute typing
print("\nPreparing to type text. Please click where you want to type...")
success = type_text(text_to_type, countdown=True, verbose=verbose_mode)
if success:
print("\nText typing completed successfully.")
else:
print("\nFailed to type text.")
# Additional troubleshooting info for macOS
if is_macos:
print("\nMacOS Troubleshooting:")
print("1. Ensure terminal has Accessibility permission in System Settings")
print("2. Try running the script with 'sudo' for full permissions")
print("3. Make sure the target application allows keyboard input")
# Stop the key listener
listener.stop()
time.sleep(0.5) # Give time for listener to stop cleanly | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
typing_metrics.py | Python | #!/usr/bin/env python3
"""
Simple typing metrics tracker module.
Records character and word counts from transcriptions.
"""
import os
import csv
from datetime import datetime
def ensure_csv_exists(csv_path):
"""
Create CSV file with headers if it doesn't exist
Args:
csv_path (str): Path to CSV file
"""
if not os.path.exists(csv_path):
os.makedirs(os.path.dirname(csv_path), exist_ok=True)
with open(csv_path, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['timestamp', 'characters', 'words'])
def record_transcription(text):
"""
Record metrics for a completed transcription
Args:
text (str): The transcribed text
"""
# Skip empty transcriptions
if not text:
return
# Skip if result contains one of the special messages defined in transcription_prompts.py
if text == "NO_AUDIO" or text == "NO_AUDIBLE_SPEECH":
return
# Calculate metrics
char_count = len(text)
word_count = len(text.split())
# Get current timestamp
timestamp = datetime.now().isoformat()
# Define CSV path in the project directory
csv_path = os.path.join(os.path.dirname(__file__), "typing_metrics.csv")
# Ensure CSV exists
ensure_csv_exists(csv_path)
# Write to CSV file
with open(csv_path, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([timestamp, char_count, word_count]) | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
typing_metrics_web.py | Python | #!/usr/bin/env python3
"""
Web visualization for typing metrics.
Provides a simple web dashboard to display typing time saved statistics.
"""
import os
import csv
import json
import threading
from datetime import datetime, timedelta
from flask import Flask, render_template, jsonify
from collections import defaultdict
app = Flask(__name__)
# Ensure templates directory exists
os.makedirs(os.path.join(os.path.dirname(__file__), "templates"), exist_ok=True)
# Load configuration
def load_config():
"""Load configuration from config.json or use defaults"""
config_path = os.path.join(os.path.dirname(__file__), "config.json")
defaults = {
"typing_metrics": {
"wpm": 40,
"chars_per_word": 5
}
}
if os.path.exists(config_path):
try:
with open(config_path, 'r') as f:
return json.load(f)
except (json.JSONDecodeError, IOError) as e:
print(f"Error loading config.json: {e}. Using defaults.")
return defaults
else:
print("No config.json found. Using default settings.")
return defaults
# Get config values
config = load_config()
WPM = config["typing_metrics"]["wpm"]
CHARS_PER_WORD = config["typing_metrics"]["chars_per_word"]
# Get words per page from reading_metrics if available, or use default
WORDS_PER_PAGE = config.get("reading_metrics", {}).get("words_per_page", 325)
# Create templates for the web dashboard
@app.route('/')
def index():
"""Render the main dashboard page"""
return render_template('dashboard.html')
@app.route('/data')
def get_data():
"""API endpoint to get metrics data"""
csv_path = os.path.join(os.path.dirname(__file__), "typing_metrics.csv")
# Return empty data if CSV doesn't exist yet
if not os.path.exists(csv_path):
return jsonify({
"total_chars": 0,
"total_words": 0,
"total_pages": 0,
"time_saved_minutes": 0,
"words_per_page": WORDS_PER_PAGE,
"daily_metrics": [],
"weekly_metrics": [],
"monthly_metrics": []
})
# Read data from CSV
data = []
with open(csv_path, 'r', newline='') as file:
reader = csv.DictReader(file)
for row in reader:
# Convert numeric strings to integers
row['characters'] = int(row['characters'])
row['words'] = int(row['words'])
# Parse timestamp
row['timestamp'] = datetime.fromisoformat(row['timestamp'])
data.append(row)
# Calculate totals
total_chars = sum(row['characters'] for row in data)
total_words = sum(row['words'] for row in data)
# Group by day
daily_data = defaultdict(lambda: {"characters": 0, "words": 0})
for row in data:
day_key = row['timestamp'].strftime('%Y-%m-%d')
daily_data[day_key]['characters'] += row['characters']
daily_data[day_key]['words'] += row['words']
# Get last 30 days
today = datetime.now().date()
daily_metrics = []
for i in range(30):
day = today - timedelta(days=i)
day_key = day.strftime('%Y-%m-%d')
daily_metrics.insert(0, {
"date": day_key,
"characters": daily_data[day_key]["characters"],
"words": daily_data[day_key]["words"],
"time_saved_minutes": round(daily_data[day_key]["characters"] / (WPM * CHARS_PER_WORD) * 60 / 60, 1),
"pages": round(daily_data[day_key]["words"] / WORDS_PER_PAGE, 1)
})
# Group by week
weekly_data = defaultdict(lambda: {"characters": 0, "words": 0})
for row in data:
week_key = row['timestamp'].strftime('%Y-W%W')
weekly_data[week_key]['characters'] += row['characters']
weekly_data[week_key]['words'] += row['words']
# Get last 12 weeks
weekly_metrics = []
for i in range(12):
week_date = today - timedelta(weeks=i)
week_key = week_date.strftime('%Y-W%W')
weekly_metrics.insert(0, {
"week": week_key,
"characters": weekly_data[week_key]["characters"],
"words": weekly_data[week_key]["words"],
"time_saved_minutes": round(weekly_data[week_key]["characters"] / (WPM * CHARS_PER_WORD) * 60 / 60, 1),
"pages": round(weekly_data[week_key]["words"] / WORDS_PER_PAGE, 1)
})
# Group by month
monthly_data = defaultdict(lambda: {"characters": 0, "words": 0})
for row in data:
month_key = row['timestamp'].strftime('%Y-%m')
monthly_data[month_key]['characters'] += row['characters']
monthly_data[month_key]['words'] += row['words']
# Get last 6 months
monthly_metrics = []
for i in range(6):
# Calculate month by subtracting from current month
month_date = today.replace(day=1)
for _ in range(i):
# Move to previous month
if month_date.month == 1:
month_date = month_date.replace(year=month_date.year-1, month=12)
else:
month_date = month_date.replace(month=month_date.month-1)
month_key = month_date.strftime('%Y-%m')
monthly_metrics.insert(0, {
"month": month_key,
"characters": monthly_data[month_key]["characters"],
"words": monthly_data[month_key]["words"],
"time_saved_minutes": round(monthly_data[month_key]["characters"] / (WPM * CHARS_PER_WORD) * 60 / 60, 1),
"pages": round(monthly_data[month_key]["words"] / WORDS_PER_PAGE, 1)
})
# Calculate total pages
total_pages = round(total_words / WORDS_PER_PAGE, 1)
return jsonify({
"total_chars": total_chars,
"total_words": total_words,
"total_pages": total_pages,
"time_saved_minutes": round(total_chars / (WPM * CHARS_PER_WORD) * 60 / 60, 1),
"wpm_setting": WPM,
"words_per_page": WORDS_PER_PAGE,
"daily_metrics": daily_metrics,
"weekly_metrics": weekly_metrics,
"monthly_metrics": monthly_metrics
})
def create_templates():
"""Create HTML templates for the dashboard only if they don't exist"""
templates_dir = os.path.join(os.path.dirname(__file__), "templates")
dashboard_path = os.path.join(templates_dir, "dashboard.html")
# Skip template creation if it already exists
if os.path.exists(dashboard_path):
print("Dashboard template already exists, skipping creation")
return
# Function to start the web server
def start_web_server(port=5050, debug=True):
"""Start the Flask web server in a background thread"""
# Create templates first
create_templates()
# Start server
threading.Thread(target=lambda: app.run(host='127.0.0.1', port=port, debug=debug), daemon=True).start()
print(f"Typing metrics web server started at http://127.0.0.1:{port}/")
if debug:
print("Debug mode enabled - templates will automatically reload when modified")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Start typing metrics web server")
parser.add_argument("-p", "--port", type=int, default=5050, help="Port to run web server on")
parser.add_argument("--no-debug", action="store_true", help="Disable debug mode (disables auto-reloading)")
args = parser.parse_args()
# Make sure templates exist
create_templates()
debug_mode = not args.no_debug
print(f"Starting typing metrics web server on http://127.0.0.1:{args.port}/")
if debug_mode:
print("Debug mode enabled - templates will automatically reload when modified")
print("Press Ctrl+C to stop the server")
# Start the web server in main thread
app.run(host='127.0.0.1', port=args.port, debug=debug_mode) | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
video_transcription.py | Python | #!/usr/bin/env python3
"""
Video transcription module for Gemini 2.0 Flash Thinking
Processes video files and returns transcribed text with minimal debug output
"""
import os
import sys
import base64
from dotenv import load_dotenv
import google.generativeai as genai
from transcription_prompts import get_video_transcription_prompt
# Load environment variables from .env file
load_dotenv()
def transcribe_video(video_file_path=None, verbose=False):
"""
Process a video with Gemini and transcribe its content
Args:
video_file_path (str, optional): Path to the video file to process
verbose (bool): Whether to show detailed output logs
Returns:
str: The transcription text if successful, None otherwise
"""
# Configure Gemini client - use GEMINI_API_KEY from .env file
api_key = os.environ.get("GEMINI_API_KEY")
if not api_key:
if verbose:
print("Error: GEMINI_API_KEY not found in environment variables")
print("Please make sure you have a .env file with your GEMINI_API_KEY")
return None
# Use default file if none provided
if not video_file_path:
video_file_path = os.path.join(os.path.dirname(__file__), "combined_recording.mp4")
if verbose:
print(f"No video file specified, using default: {video_file_path}")
# Check if the video file exists
if not os.path.exists(video_file_path):
if verbose:
print(f"Error: Video file '{video_file_path}' not found.")
return None
try:
# Configure Gemini API
genai.configure(api_key=api_key)
# Read the video file
if verbose:
print(f"Reading video file: {video_file_path}")
with open(video_file_path, "rb") as f:
video_data = f.read()
# Initialize the model
# Previous model: standard flash model
# model = genai.GenerativeModel("gemini-2.0-flash")
# Tried model: pro experimental model (was too slow)
# model = genai.GenerativeModel("gemini-2.5-pro-exp-03-25")
# Current model: flash-thinking experimental model
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
# Create parts for the generation
video_part = {"mime_type": "video/mp4", "data": video_data}
# Get transcription prompt from shared module
transcription_prompt = get_video_transcription_prompt()
if verbose:
print("Sending request to Gemini Flash Thinking Experimental...")
print("\n--- Gemini Response ---")
# Generate the response
response = model.generate_content([transcription_prompt, video_part])
if verbose:
print(response.text)
print("\n--- End of Response ---")
# Return the transcription text with whitespace stripped
return response.text.strip()
except Exception as e:
if verbose:
print(f"Error during video processing: {str(e)}")
return None
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Transcribe video using Gemini AI")
parser.add_argument("-f", "--file", type=str, help="Path to video file to transcribe")
parser.add_argument("-v", "--verbose", action="store_true", help="Show detailed output")
args = parser.parse_args()
result = transcribe_video(
video_file_path=args.file,
verbose=args.verbose
)
if result:
if not args.verbose:
print(result) # Only print result directly in non-verbose mode
else:
print("Transcription failed or returned no results.") | ykdojo/osh | 7 | OSH - Automate 99% of your typing for free | Python | ykdojo | YK | Eventual |
dashboard/server.js | JavaScript | const http = require('http');
const fs = require('fs');
const path = require('path');
const { execSync, spawn } = require('child_process');
// SSE clients
const sseClients = new Set();
// Watch Docker events for safeclaw containers
let dockerEvents;
function startDockerEvents() {
dockerEvents = spawn('docker', ['events', '--filter', 'name=safeclaw', '--format', '{{.Action}}']);
dockerEvents.stdout.on('data', (data) => {
const action = data.toString().trim();
if (['start', 'stop', 'die', 'destroy', 'create'].includes(action)) {
// Notify all SSE clients
sseClients.forEach(res => {
res.write(`data: ${action}\n\n`);
});
}
});
dockerEvents.on('error', () => {});
dockerEvents.on('close', () => {
// Restart if it dies
setTimeout(startDockerEvents, 1000);
});
}
startDockerEvents();
const PORT = 7680;
const TEMPLATE_PATH = path.join(__dirname, 'template.html');
function getSessions() {
const sessions = [];
// Get all safeclaw containers (running and stopped)
try {
const output = execSync(
`docker ps -a --format '{{.Names}}\\t{{.Status}}' --filter 'name=safeclaw'`,
{ encoding: 'utf8' }
);
output.trim().split('\n').filter(Boolean).forEach(line => {
const [name, status] = line.split('\t');
const isRunning = status.startsWith('Up');
let port = null;
let volume = '-';
if (isRunning) {
// Get port for running containers
try {
const portOutput = execSync(
`docker ps --format '{{.Ports}}' --filter 'name=^${name}$'`,
{ encoding: 'utf8' }
).trim();
const portMatch = portOutput.match(/:(\d+)->7681/);
port = portMatch ? portMatch[1] : null;
} catch (e) {}
}
// Get volume mount (exclude internal projects mount)
try {
const inspect = execSync(
`docker inspect ${name} --format '{{range .Mounts}}{{if eq .Type "bind"}}{{.Source}}:{{.Destination}}\n{{end}}{{end}}'`,
{ encoding: 'utf8' }
).trim();
const mounts = inspect.split('\n').filter(m =>
m && !m.endsWith(':/home/sclaw/.claude/projects')
);
volume = mounts.join(', ') || '-';
} catch (e) {}
sessions.push({
name,
port,
url: port ? `http://localhost:${port}` : null,
volume,
active: isRunning
});
});
} catch (e) {}
// Sort: active first, then by name
sessions.sort((a, b) => {
if (a.active !== b.active) return b.active - a.active;
return a.name.localeCompare(b.name);
});
return sessions;
}
function stopContainer(name) {
try {
execSync(`docker stop -t 1 ${name}`, { encoding: 'utf8' });
return true;
} catch (e) {
return false;
}
}
function deleteContainer(name) {
try {
execSync(`docker rm ${name}`, { encoding: 'utf8' });
return true;
} catch (e) {
return false;
}
}
function createContainer(options) {
try {
const scriptPath = path.join(__dirname, '..', 'scripts', 'new.sh');
let args = '-n'; // always skip browser open (we handle it in frontend)
if (options.name) {
args += ` -s ${options.name}`;
}
if (options.volume) {
args += ` -v ${options.volume}`;
}
if (options.query) {
// Escape quotes in the query
const escapedQuery = options.query.replace(/"/g, '\\"');
args += ` -q "${escapedQuery}"`;
}
const output = execSync(`${scriptPath} ${args}`, { encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] });
// Extract URL from output
const urlMatch = output.match(/http:\/\/localhost:\d+/);
const url = urlMatch ? urlMatch[0] : null;
return { success: true, url };
} catch (e) {
// execSync error has stderr as Buffer
let error = 'Failed to create session';
if (e.stderr && e.stderr.length > 0) {
error = e.stderr.toString().trim();
} else if (e.message) {
error = e.message;
}
return { success: false, error };
}
}
function startContainer(name) {
try {
execSync(`docker start ${name}`, { encoding: 'utf8' });
// Start ttyd inside the container
const secretsDir = process.env.HOME + '/.config/safeclaw/.secrets';
let envFlags = '';
try {
const files = fs.readdirSync(secretsDir);
files.forEach(f => {
const val = fs.readFileSync(`${secretsDir}/${f}`, 'utf8').trim();
envFlags += ` -e ${f}=${val}`;
});
} catch (e) {}
const sessionName = name.replace('safeclaw-', '');
const title = `SafeClaw - ${sessionName}`;
execSync(`docker exec ${envFlags} -d ${name} ttyd -W -t titleFixed="${title}" -p 7681 /home/sclaw/ttyd-wrapper.sh`, { encoding: 'utf8' });
// Get the port
const portInfo = execSync(`docker ps --filter "name=^${name}$" --format "{{.Ports}}"`, { encoding: 'utf8' }).trim();
const portMatch = portInfo.match(/:(\d+)->/);
const port = portMatch ? portMatch[1] : '7681';
return { success: true, url: `http://localhost:${port}` };
} catch (e) {
return { success: false };
}
}
function renderContent(sessions) {
if (sessions.length === 0) {
return `<div class="empty">
<p>no sessions</p>
<table class="help">
<tr><td><code>./scripts/run.sh</code></td><td>default session</td></tr>
<tr><td><code>./scripts/run.sh -s name</code></td><td>named session</td></tr>
<tr><td><code>./scripts/run.sh -n</code></td><td>skip opening browser</td></tr>
<tr><td><code>./scripts/run.sh -v ~/myproject:/home/sclaw/myproject</code></td><td>mount volume</td></tr>
<tr><td><code>./scripts/run.sh -q "question"</code></td><td>start with query</td></tr>
</table>
<p class="tip">tip: ${['in a session, press q or scroll to the bottom to exit scroll mode and resume typing', 'on this dashboard, press tab and enter to quickly create a new session', 'run node scripts/manage-env.js to manage environment variables'][Math.floor(Math.random() * 3)]}</p>
</div>`;
}
const sessionRows = sessions.map(s => {
const displayName = s.name.replace('safeclaw-', '');
const displayUrl = s.url ? s.url.replace('http://', '') : '';
const urlCell = s.active
? `<a href="${s.url}" target="_blank">${displayUrl}</a>`
: `<button class="start-btn" onclick="startSession('${s.name}')">start</button>`;
const actionBtn = s.active
? `<button class="stop-btn" onclick="stopSession('${s.name}', this)">stop</button>`
: `<button class="delete-btn" onclick="deleteSession('${s.name}', this)">delete</button>`;
return `
<tr class="${s.active ? '' : 'inactive-row'}" data-name="${s.name}" data-url="${s.url || ''}">
<td><a href="#" class="session-name" onclick="showSessionInfo('${s.name}'); return false;">${displayName}</a></td>
<td>${urlCell}</td>
<td class="volume">${s.volume || '-'}</td>
<td>${actionBtn}</td>
</tr>
`;
}).join('');
const activeSessions = sessions.filter(s => s.active);
const iframes = activeSessions.map(s => `
<div class="frame" id="frame-${s.name}">
<div class="frame-bar">
<span>${s.name.replace('safeclaw-', '')}</span>
<div class="frame-actions">
<a href="#" class="frame-stop" onclick="stopSessionLink('${s.name}', this); return false;">stop</a>
<a href="#" onclick="document.querySelector('#frame-${s.name} iframe').src='${s.url}'; return false;">refresh</a>
<a href="${s.url}" target="_blank">open</a>
</div>
</div>
<iframe src="${s.url}"></iframe>
</div>
`).join('');
return `
<div class="table-wrapper">
<table class="sessions">
<thead><tr><th>Session</th><th>URL</th><th>Volume <span class="info-icon">i<span class="tooltip">Conversation history is persisted via a volume mount not shown here</span></span></th><th></th></tr></thead>
<tbody>${sessionRows}</tbody>
</table>
</div>
${activeSessions.length > 0 ? `<div class="frames${activeSessions.length === 1 ? ' single' : ''}">${iframes}</div>` : ''}
`;
}
const server = http.createServer((req, res) => {
const url = new URL(req.url, `http://localhost:${PORT}`);
if (url.pathname === '/api/sessions') {
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(getSessions()));
} else if (url.pathname === '/api/stop' && req.method === 'POST') {
let body = '';
req.on('data', chunk => body += chunk);
req.on('end', () => {
const { name } = JSON.parse(body);
const success = stopContainer(name);
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ success }));
});
} else if (url.pathname === '/api/delete' && req.method === 'POST') {
let body = '';
req.on('data', chunk => body += chunk);
req.on('end', () => {
const { name } = JSON.parse(body);
const success = deleteContainer(name);
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ success }));
});
} else if (url.pathname === '/api/start' && req.method === 'POST') {
let body = '';
req.on('data', chunk => body += chunk);
req.on('end', () => {
const { name } = JSON.parse(body);
const result = startContainer(name);
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(result));
});
} else if (url.pathname === '/api/create' && req.method === 'POST') {
let body = '';
req.on('data', chunk => body += chunk);
req.on('end', () => {
const options = JSON.parse(body);
const result = createContainer(options);
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(result));
});
} else if (url.pathname === '/api/events') {
// Server-Sent Events for real-time updates
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive'
});
res.write('data: connected\n\n');
sseClients.add(res);
req.on('close', () => sseClients.delete(res));
} else {
const template = fs.readFileSync(TEMPLATE_PATH, 'utf8');
const content = renderContent(getSessions());
const html = template.replace('{{CONTENT}}', content);
res.writeHead(200, { 'Content-Type': 'text/html' });
res.end(html);
}
});
server.listen(PORT, '127.0.0.1', () => {
console.log(`Dashboard: http://localhost:${PORT}`);
});
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
dashboard/template.html | HTML | <!DOCTYPE html>
<html>
<head>
<title>SafeClaw</title>
<style>
* { box-sizing: border-box; margin: 0; padding: 0; }
body {
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, monospace;
background: #141210;
color: #c9c5c0;
padding: 16px clamp(8px, 2vw, 16px);
}
header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 18px;
}
h1 {
font-size: 20px;
font-weight: normal;
}
h1 span { color: #d4a55a; }
h1 em { color: #8b949e; font-style: normal; }
.new-btn {
background: transparent;
border: 1px solid #3a5040;
color: #70a080;
padding: 6px 12px;
border-radius: 4px;
cursor: pointer;
font-family: inherit;
font-size: 14px;
}
.new-btn:hover {
background: #3a5040;
color: #c9c5c0;
}
.table-wrapper {
overflow-x: auto;
margin-bottom: 24px;
max-width: 100%;
}
table {
width: 100%;
border-collapse: collapse;
table-layout: fixed;
}
th, td {
padding: 8px 12px;
text-align: left;
border-bottom: 1px solid #2a2724;
}
th {
color: #8b949e;
font-weight: normal;
font-size: 12px;
}
td a {
color: #d4a55a;
text-decoration: none;
}
td a:hover { text-decoration: underline; }
.sessions th:nth-child(4), .sessions td:nth-child(4) { width: 85px; }
.sessions td:nth-child(2) {
max-width: 30vw;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
direction: rtl;
text-align: left;
}
.info-icon {
display: inline-flex;
align-items: center;
justify-content: center;
width: 14px;
height: 14px;
border-radius: 50%;
border: 1px solid #555;
font-size: 10px;
font-style: italic;
font-family: serif;
color: #888;
cursor: help;
vertical-align: middle;
margin-left: -2px;
position: relative;
top: -1px;
}
.info-icon:hover { color: #ccc; border-color: #999; }
.info-icon .tooltip {
display: none;
position: absolute;
top: calc(100% + 6px);
right: 0;
background: #2a2724;
border: 1px solid #444;
color: #ccc;
font-size: 12px;
font-style: normal;
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, monospace;
padding: 6px 10px;
border-radius: 4px;
white-space: nowrap;
z-index: 10;
}
.info-icon:hover .tooltip { display: block; }
.volume {
color: #b0b8c0;
font-size: 12px;
max-width: 250px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
direction: rtl;
text-align: left;
}
.volume:hover {
max-width: none;
overflow: visible;
white-space: normal;
word-break: break-all;
direction: ltr;
}
.frames {
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: 16px;
}
.frames.single {
grid-template-columns: 1fr;
}
@media (max-width: 900px) {
.frames {
grid-template-columns: 1fr;
}
}
.frame {
background: #1c1917;
border: 1px solid #2a2724;
border-radius: 6px;
overflow: hidden;
}
.frame-bar {
padding: 8px 12px;
border-bottom: 1px solid #2a2724;
display: flex;
justify-content: space-between;
font-size: 12px;
}
.frame-actions {
display: flex;
gap: 12px;
}
.frame-bar a {
color: #d4a55a;
text-decoration: none;
}
iframe {
width: 100%;
height: 400px;
border: none;
background: #000;
}
.empty {
color: #8b949e;
padding: 32px 16px;
text-align: center;
max-width: 700px;
margin: 0 auto;
}
.empty > p {
font-size: 20px;
margin-bottom: 24px;
}
.help {
margin: 0 auto;
border-collapse: collapse;
max-width: 100%;
}
.help td {
padding: 8px 12px;
font-size: 16px;
vertical-align: top;
}
.help td:first-child {
max-width: 350px;
word-break: break-all;
}
.help code {
color: #c9c5c0;
}
.help td:last-child {
color: #6b6560;
word-wrap: break-word;
}
.tip {
margin-top: 24px;
font-size: 13px;
color: #6b6560;
}
.stop-btn {
background: transparent;
border: 1px solid #5c4033;
color: #a08070;
padding: 2px 6px;
border-radius: 4px;
cursor: pointer;
font-family: inherit;
font-size: 12px;
}
.stop-btn:hover {
background: #5c4033;
color: #c9c5c0;
}
.inactive {
color: #6b6560;
}
.inactive-row td {
opacity: 0.6;
}
.session-name {
color: #c9c5c0;
text-decoration: none;
}
.session-name:hover {
color: #d4a55a;
}
.delete-btn {
background: transparent;
border: 1px solid #4a3030;
color: #a07070;
padding: 2px 6px;
border-radius: 4px;
cursor: pointer;
font-family: inherit;
font-size: 12px;
}
.delete-btn:hover {
background: #4a3030;
color: #c9c5c0;
}
.start-btn {
background: transparent;
border: 1px solid #3a5040;
color: #70a080;
padding: 2px 6px;
border-radius: 4px;
cursor: pointer;
font-family: inherit;
font-size: 12px;
}
.start-btn:hover {
background: #3a5040;
color: #c9c5c0;
}
/* Modal */
.modal-overlay {
display: none;
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: rgba(0, 0, 0, 0.7);
z-index: 100;
justify-content: center;
align-items: center;
}
.modal-overlay.open { display: flex; }
.modal {
background: #1c1917;
border: 1px solid #2a2724;
border-radius: 8px;
padding: 24px;
min-width: 400px;
max-width: 90vw;
}
.modal h2 {
font-size: 16px;
font-weight: normal;
margin-bottom: 6px;
color: #c9c5c0;
}
.modal-subtitle {
font-size: 12px;
color: #6b6560;
margin-bottom: 20px;
}
.modal-field {
margin-bottom: 16px;
}
.modal-field label {
display: block;
font-size: 12px;
color: #8b949e;
margin-bottom: 6px;
}
.modal-field input[type="text"] {
width: 100%;
padding: 8px 10px;
background: #141210;
border: 1px solid #2a2724;
border-radius: 4px;
color: #c9c5c0;
font-family: inherit;
font-size: 14px;
}
.modal-field input[type="text"]:focus {
outline: none;
border-color: #3a5040;
}
.modal-field input[type="text"]::placeholder {
color: #6b6560;
}
.modal-hint {
font-size: 11px;
color: #6b6560;
margin-top: 4px;
}
.modal-hint.error {
color: #a07070;
}
.modal-field input[type="text"].error {
border-color: #4a3030;
}
.modal-field-checkbox {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 16px;
}
.modal-field-checkbox input[type="checkbox"] {
width: 16px;
height: 16px;
accent-color: #70a080;
color-scheme: dark;
}
.modal-field-checkbox label {
font-size: 14px;
color: #c9c5c0;
cursor: pointer;
}
.modal-actions {
display: flex;
justify-content: flex-end;
gap: 12px;
margin-top: 24px;
}
.modal-cancel {
background: transparent;
border: 1px solid #2a2724;
color: #8b949e;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
font-family: inherit;
font-size: 14px;
}
.modal-cancel:hover {
background: #2a2724;
color: #c9c5c0;
}
.modal-submit {
background: #3a5040;
border: 1px solid #3a5040;
color: #c9c5c0;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
font-family: inherit;
font-size: 14px;
}
.modal-submit:hover {
background: #4a6050;
}
</style>
<script>
let activeConfirm = null;
function stopSession(name, btn) {
// If already confirming, do the stop
if (btn.dataset.confirming === 'true') {
fetch('/api/stop', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name })
}).then(() => {
// Update row to inactive
const row = document.querySelector(`tr[data-name="${name}"]`);
if (row) {
row.classList.add('inactive-row');
row.querySelector('td:nth-child(2)').innerHTML = `<button class="start-btn" onclick="startSession('${name}')">start</button>`;
row.querySelector('td:nth-child(4)').innerHTML = `<button class="delete-btn" onclick="deleteSession('${name}', this)">delete</button>`;
}
// Remove iframe
const frame = document.getElementById(`frame-${name}`);
if (frame) frame.remove();
updateFramesLayout();
});
return;
}
// Cancel any other active confirm
if (activeConfirm && activeConfirm !== btn) {
resetButton(activeConfirm);
}
// Show confirm state
btn.dataset.confirming = 'true';
btn.dataset.originalText = btn.textContent;
btn.textContent = 'stop?';
activeConfirm = btn;
// Auto-cancel after 3 seconds
btn.timeout = setTimeout(() => resetButton(btn), 3000);
}
function resetButton(btn) {
if (btn.timeout) clearTimeout(btn.timeout);
btn.dataset.confirming = 'false';
btn.textContent = btn.dataset.originalText || 'stop';
if (activeConfirm === btn) activeConfirm = null;
}
// Click elsewhere to cancel
document.addEventListener('click', (e) => {
if (activeConfirm && !activeConfirm.contains(e.target)) {
resetButton(activeConfirm);
}
});
// Stop from iframe header (same logic, works with links)
function stopSessionLink(name, link) {
if (link.dataset.confirming === 'true') {
fetch('/api/stop', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name })
}).then(() => {
// Update row to inactive
const row = document.querySelector(`tr[data-name="${name}"]`);
if (row) {
row.classList.add('inactive-row');
row.querySelector('td:nth-child(2)').innerHTML = `<button class="start-btn" onclick="startSession('${name}')">start</button>`;
row.querySelector('td:nth-child(4)').innerHTML = `<button class="delete-btn" onclick="deleteSession('${name}', this)">delete</button>`;
}
// Remove iframe
const frame = document.getElementById(`frame-${name}`);
if (frame) frame.remove();
updateFramesLayout();
});
return;
}
if (activeConfirm && activeConfirm !== link) {
resetButton(activeConfirm);
}
link.dataset.confirming = 'true';
link.dataset.originalText = link.textContent;
link.textContent = 'stop?';
activeConfirm = link;
link.timeout = setTimeout(() => resetButton(link), 3000);
}
function deleteSession(name, btn) {
// If already confirming, do the delete
if (btn.dataset.confirming === 'true') {
fetch('/api/delete', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name })
}).then(() => {
// Remove row
const row = document.querySelector(`tr[data-name="${name}"]`);
if (row) row.remove();
// Remove iframe if exists
const frame = document.getElementById(`frame-${name}`);
if (frame) frame.remove();
updateFramesLayout();
// Reload if no sessions left
const remaining = document.querySelectorAll('.sessions tbody tr');
if (remaining.length === 0) location.reload();
});
return;
}
// Cancel any other active confirm
if (activeConfirm && activeConfirm !== btn) {
resetButton(activeConfirm);
}
// Show confirm state
btn.dataset.confirming = 'true';
btn.dataset.originalText = btn.textContent;
btn.textContent = 'delete?';
activeConfirm = btn;
// Auto-cancel after 3 seconds
btn.timeout = setTimeout(() => resetButton(btn), 3000);
}
async function startSession(name) {
const res = await fetch('/api/start', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name })
});
const data = await res.json();
if (data.success) {
const row = document.querySelector(`tr[data-name="${name}"]`);
if (row) {
const url = data.url || row.dataset.url;
const displayUrl = url.replace('http://', '');
row.classList.remove('inactive-row');
row.querySelector('td:nth-child(2)').innerHTML = `<a href="${url}" target="_blank">${displayUrl}</a>`;
row.querySelector('td:nth-child(4)').innerHTML = `<button class="stop-btn" onclick="stopSession('${name}', this)">stop</button>`;
// Add iframe (only if not already present)
const displayName = name.replace('safeclaw-', '');
let frames = document.querySelector('.frames');
// Create frames container if it doesn't exist
if (!frames) {
const tableWrapper = document.querySelector('.table-wrapper');
if (tableWrapper) {
tableWrapper.insertAdjacentHTML('afterend', '<div class="frames single"></div>');
frames = document.querySelector('.frames');
}
}
if (frames && !document.getElementById(`frame-${name}`)) {
frames.insertAdjacentHTML('beforeend', `
<div class="frame" id="frame-${name}">
<div class="frame-bar">
<span>${displayName}</span>
<div class="frame-actions">
<a href="#" class="frame-stop" onclick="stopSessionLink('${name}', this); return false;">stop</a>
<a href="#" onclick="document.querySelector('#frame-${name} iframe').src='${url}'; return false;">refresh</a>
<a href="${url}" target="_blank">open</a>
</div>
</div>
<iframe src="${url}"></iframe>
</div>
`);
}
}
}
}
// SSE connection for real-time updates when containers change externally
const events = new EventSource('/api/events');
events.onmessage = (e) => {
// Reload page when containers are created/modified outside the dashboard
if (['create', 'start', 'stop', 'die', 'destroy'].includes(e.data)) {
location.reload();
}
};
function updateFramesLayout() {
const frames = document.querySelector('.frames');
if (frames) {
const count = frames.querySelectorAll('.frame').length;
frames.classList.toggle('single', count === 1);
}
}
function openNewSessionModal() {
document.getElementById('new-session-modal').classList.add('open');
document.getElementById('session-name').focus();
validateSessionName();
}
function closeNewSessionModal() {
document.getElementById('new-session-modal').classList.remove('open');
// Reset form
document.getElementById('session-name').value = '';
document.getElementById('session-volume').value = '';
document.getElementById('session-query').value = '';
document.getElementById('session-open-tab').checked = false;
clearSessionNameError();
}
function getExistingSessionNames() {
const rows = document.querySelectorAll('.sessions tbody tr[data-name]');
return Array.from(rows).map(row => row.dataset.name.replace('safeclaw-', ''));
}
function sessionNameExists(name) {
return getExistingSessionNames().includes(name);
}
function showSessionNameError(name) {
const input = document.getElementById('session-name');
const hint = document.getElementById('session-name-hint');
input.classList.add('error');
hint.classList.add('error');
hint.textContent = `session "${name}" already seems to exist`;
}
function clearSessionNameError() {
const input = document.getElementById('session-name');
const hint = document.getElementById('session-name-hint');
input.classList.remove('error');
hint.classList.remove('error');
hint.textContent = 'use a unique name to create a new session';
}
function validateSessionName() {
const input = document.getElementById('session-name');
const pos = input.selectionStart;
const had = input.value;
input.value = input.value.replace(/ /g, '-').toLowerCase();
if (input.value !== had) input.setSelectionRange(pos, pos);
const name = input.value.trim() || 'default';
if (sessionNameExists(name)) {
showSessionNameError(name);
} else {
clearSessionNameError();
}
}
async function submitNewSession() {
const name = document.getElementById('session-name').value.replace(/ /g, '-').toLowerCase().trim() || 'default';
const volume = document.getElementById('session-volume').value.trim();
const query = document.getElementById('session-query').value.trim();
const openTab = document.getElementById('session-open-tab').checked;
// Check if session already exists
if (sessionNameExists(name)) {
showSessionNameError(name);
return;
}
const res = await fetch('/api/create', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name, volume, query, openTab })
});
const data = await res.json();
if (data.success) {
closeNewSessionModal();
if (openTab && data.url) {
window.open(data.url, '_blank');
}
location.reload();
} else {
alert(data.error || 'Failed to create session');
}
}
function showSessionInfo(name) {
const displayName = name.replace('safeclaw-', '');
document.getElementById('session-info-title').textContent = displayName;
document.getElementById('session-exec-cmd').value = `docker exec -it ${name} tmux attach -t main`;
document.getElementById('session-info-modal').classList.add('open');
}
function closeSessionInfoModal() {
document.getElementById('session-info-modal').classList.remove('open');
}
function copyExecCmd() {
const cmd = document.getElementById('session-exec-cmd').value;
navigator.clipboard.writeText(cmd);
const btn = document.querySelector('#session-info-modal .modal-submit');
btn.textContent = 'copied';
setTimeout(() => btn.textContent = 'copy', 1500);
}
// Close modal on escape
document.addEventListener('keydown', (e) => {
if (e.key === 'Escape') {
closeNewSessionModal();
closeSessionInfoModal();
}
});
// Close modal on overlay click
document.addEventListener('click', (e) => {
if (e.target.classList.contains('modal-overlay')) {
closeNewSessionModal();
closeSessionInfoModal();
}
});
</script>
</head>
<body>
<header>
<h1><span>safeclaw</span> <em>sessions</em></h1>
<button class="new-btn" onclick="openNewSessionModal()">+ new</button>
</header>
{{CONTENT}}
<div id="new-session-modal" class="modal-overlay">
<div class="modal">
<h2>new session</h2>
<p class="modal-subtitle">each session runs in its own isolated container</p>
<div class="modal-field">
<label for="session-name">session name</label>
<input type="text" id="session-name" placeholder="default" oninput="validateSessionName()">
<p class="modal-hint" id="session-name-hint">use a unique name to create a new session</p>
</div>
<div class="modal-field">
<label for="session-volume">volume mount</label>
<input type="text" id="session-volume" placeholder="~/myproject:/home/sclaw/myproject">
</div>
<div class="modal-field">
<label for="session-query">initial query</label>
<input type="text" id="session-query" placeholder="Research topic X...">
</div>
<div class="modal-field-checkbox">
<input type="checkbox" id="session-open-tab">
<label for="session-open-tab">open in new tab</label>
</div>
<div class="modal-actions">
<button class="modal-cancel" onclick="closeNewSessionModal()">cancel</button>
<button class="modal-submit" onclick="submitNewSession()">create</button>
</div>
</div>
</div>
<div id="session-info-modal" class="modal-overlay">
<div class="modal">
<h2 id="session-info-title">session</h2>
<div class="modal-field">
<label>enter container</label>
<input type="text" id="session-exec-cmd" readonly onclick="this.select()">
</div>
<div class="modal-actions">
<button class="modal-cancel" onclick="closeSessionInfoModal()">close</button>
<button class="modal-submit" onclick="copyExecCmd()">copy</button>
</div>
</div>
</div>
</body>
</html>
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
scripts/build.sh | Shell | #!/bin/bash
# Build the safeclaw image and remove stale container
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
CONTAINER_NAME="safeclaw"
echo "Building image..."
docker build -t safeclaw "$PROJECT_DIR" || exit 1
# Remove old container so run.sh creates a fresh one from the new image
if docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
echo "Removing old container..."
docker rm -f "$CONTAINER_NAME" > /dev/null
fi
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
scripts/manage-env.js | JavaScript | #!/usr/bin/env node
// Manage SafeClaw environment variables
const fs = require('fs');
const path = require('path');
const { select, input, password, confirm } = require('@inquirer/prompts');
const SECRETS_DIR = path.join(
process.env.XDG_CONFIG_HOME || path.join(process.env.HOME, '.config'),
'safeclaw',
'.secrets'
);
function getKeys() {
if (!fs.existsSync(SECRETS_DIR)) return [];
return fs.readdirSync(SECRETS_DIR).filter(f => {
const stat = fs.statSync(path.join(SECRETS_DIR, f));
return stat.isFile();
});
}
async function manageKeys() {
const keys = getKeys();
if (keys.length === 0) {
console.log('\nNo keys found.\n');
return;
}
const choices = [
...keys.map(key => ({ name: key, value: key })),
{ name: '← Back', value: 'back' }
];
console.log(`\nKeys from ${SECRETS_DIR}/\n`);
const keyToDelete = await select({
message: 'Select key to delete:',
choices
});
if (keyToDelete === 'back') {
return;
}
const confirmed = await confirm({
message: `Delete ${keyToDelete}?`,
default: false
});
if (confirmed) {
fs.unlinkSync(path.join(SECRETS_DIR, keyToDelete));
console.log(`\nDeleted ${keyToDelete}\n`);
} else {
console.log('\nCancelled.\n');
}
}
async function addKey() {
console.log('\nExample: OPENAI_API_KEY\n');
const name = await input({
message: 'Name:',
validate: val => {
if (!val) return 'Name is required';
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(val)) {
return 'Use letters, numbers, and underscores only';
}
return true;
}
});
const value = await password({
message: 'Value:',
mask: '*',
validate: val => val ? true : 'Value is required'
});
fs.mkdirSync(SECRETS_DIR, { recursive: true, mode: 0o700 });
const filePath = path.join(SECRETS_DIR, name);
fs.writeFileSync(filePath, value, { mode: 0o600 });
console.log(`\nSaved to ${filePath}`);
console.log('Restart SafeClaw to use: ./scripts/run.sh\n');
}
async function mainMenu() {
while (true) {
const action = await select({
message: 'What would you like to do?',
choices: [
{ name: 'Manage keys', value: 'manage' },
{ name: 'Add key', value: 'add' },
{ name: 'Exit', value: 'exit' }
]
});
switch (action) {
case 'manage':
await manageKeys();
break;
case 'add':
await addKey();
break;
case 'exit':
return;
}
}
}
mainMenu().catch(err => {
console.error(err);
process.exit(1);
});
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
scripts/new.sh | Shell | #!/bin/bash
# Create a new session (called from dashboard UI)
# Checks for required tokens before calling run.sh
SECRETS_DIR="${XDG_CONFIG_HOME:-$HOME/.config}/safeclaw/.secrets"
# Check for required token
if [ ! -f "$SECRETS_DIR/CLAUDE_CODE_OAUTH_TOKEN" ]; then
echo "ERROR: No Claude Code token found." >&2
echo "Run ./scripts/run.sh first to set up authentication." >&2
exit 1
fi
# Pass all arguments to run.sh
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
exec "$SCRIPT_DIR/run.sh" "$@"
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
scripts/run.sh | Shell | #!/bin/bash
# Start/reuse container, inject auth tokens, start ttyd web terminal
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
SAFECLAW_DIR="${XDG_CONFIG_HOME:-$HOME/.config}/safeclaw"
SECRETS_DIR="$SAFECLAW_DIR/.secrets"
SESSIONS_DIR="$SAFECLAW_DIR/sessions"
SESSION_NAME=""
VOLUME_MOUNT=""
NO_OPEN=false
QUERY=""
# Parse arguments
while getopts "s:v:nq:" opt; do
case $opt in
s)
SESSION_NAME="$OPTARG"
;;
v)
VOLUME_MOUNT="$OPTARG"
;;
n)
NO_OPEN=true
;;
q)
QUERY="$OPTARG"
;;
*)
echo "Usage: $0 [-s session_name] [-v /host/path:/container/path] [-n] [-q \"question\"]"
exit 1
;;
esac
done
# Set container name based on session (default to "default")
SESSION_NAME="${SESSION_NAME:-default}"
CONTAINER_NAME="safeclaw-${SESSION_NAME}"
# Find available port (starting from 7681)
find_available_port() {
local port=7681
while docker ps --format '{{.Ports}}' | grep -q ":${port}->"; do
port=$((port + 1))
done
echo $port
}
# Get port for this container (reuse existing or find new)
get_container_port() {
local existing_port=$(docker ps --format '{{.Names}} {{.Ports}}' | grep "^${CONTAINER_NAME} " | sed -n 's/.*:\([0-9]*\)->7681.*/\1/p')
if [ -n "$existing_port" ]; then
echo "$existing_port"
else
find_available_port
fi
}
PORT=$(get_container_port)
# Check if image exists
if ! docker images -q safeclaw | grep -q .; then
echo "Error: Image 'safeclaw' not found. Run ./scripts/build.sh first."
exit 1
fi
# If volume mount requested and container exists, remove it to recreate with new mount
if [ -n "$VOLUME_MOUNT" ] && docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
echo "Volume mount requested. Removing existing container..."
docker rm -f "$CONTAINER_NAME" > /dev/null
fi
# Check if container exists
if docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
echo "Reusing running container: $CONTAINER_NAME"
# Restart ttyd/tmux so fresh env vars take effect
docker exec "$CONTAINER_NAME" pkill -f ttyd 2>/dev/null
docker exec "$CONTAINER_NAME" tmux kill-server 2>/dev/null
sleep 1
else
echo "Starting existing container: $CONTAINER_NAME"
docker start "$CONTAINER_NAME" > /dev/null
fi
else
echo "Creating container: $CONTAINER_NAME"
# Create session data directory for Claude history persistence
SESSION_DATA_DIR="$SESSIONS_DIR/$SESSION_NAME"
mkdir -p "$SESSION_DATA_DIR"
VOLUME_FLAGS="-v $SESSION_DATA_DIR:/home/sclaw/.claude/projects"
if [ -n "$VOLUME_MOUNT" ]; then
VOLUME_FLAGS="$VOLUME_FLAGS -v $VOLUME_MOUNT"
echo "Mounting volume: $VOLUME_MOUNT"
fi
docker run -d --ipc=host --name "$CONTAINER_NAME" -p 127.0.0.1:${PORT}:7681 $VOLUME_FLAGS safeclaw sleep infinity > /dev/null
fi
# === Claude Code token setup ===
mkdir -p "$SECRETS_DIR"
if [ ! -f "$SECRETS_DIR/CLAUDE_CODE_OAUTH_TOKEN" ]; then
echo ""
echo "=== Claude Code setup ==="
echo ""
echo "No Claude Code token found. Let's set one up."
echo ""
echo "Run this command in another terminal:"
echo ""
echo " claude setup-token"
echo ""
echo "It will generate a long-lived OAuth token (valid for 1 year)."
echo "Paste the token below."
echo ""
while true; do
read -p "Token: " claude_token
if [ -n "$claude_token" ]; then
echo "$claude_token" > "$SECRETS_DIR/CLAUDE_CODE_OAUTH_TOKEN"
echo "Saved."
break
fi
echo "Token is required. Please run 'claude setup-token' and paste the result."
done
fi
# === GitHub CLI token setup ===
if [ ! -f "$SECRETS_DIR/GH_TOKEN" ]; then
echo ""
echo "=== GitHub CLI setup ==="
echo ""
echo "No GitHub token found. Let's set one up."
echo ""
echo "We recommend creating a separate GitHub account for SafeClaw"
echo "so you can scope its permissions independently."
echo ""
echo "Once logged in, run this in another terminal:"
echo ""
echo " gh auth token"
echo ""
echo "Or create a Personal Access Token at:"
echo " https://github.com/settings/tokens"
echo ""
echo "Paste the token below."
echo ""
read -p "Token: " gh_token
if [ -n "$gh_token" ]; then
echo "$gh_token" > "$SECRETS_DIR/GH_TOKEN"
echo "Saved."
else
echo "No token provided, skipping. You can set it up later by re-running this script."
fi
fi
# Persist secrets inside container as /home/sclaw/.env
# This is the single source of truth for env vars - sourced by .bashrc via BASH_ENV
docker exec "$CONTAINER_NAME" sh -c 'rm -f /home/sclaw/.env && touch /home/sclaw/.env && chmod 600 /home/sclaw/.env'
for secret_file in "$SECRETS_DIR"/*; do
if [ -f "$secret_file" ]; then
docker exec "$CONTAINER_NAME" sh -c "echo 'export $(basename "$secret_file")=$(cat "$secret_file")' >> /home/sclaw/.env"
fi
done
# Set git config from GitHub account if logged in
if [ -f "$SECRETS_DIR/GH_TOKEN" ]; then
docker exec "$CONTAINER_NAME" bash -c '
if gh auth status >/dev/null 2>&1; then
USER_DATA=$(gh api user 2>/dev/null)
if [ -n "$USER_DATA" ]; then
NAME=$(echo "$USER_DATA" | jq -r ".name // .login")
LOGIN=$(echo "$USER_DATA" | jq -r ".login")
EMAIL=$(echo "$USER_DATA" | jq -r ".email // empty")
# Use noreply email if no public email
[ -z "$EMAIL" ] && EMAIL="${LOGIN}@users.noreply.github.com"
git config --global user.name "$NAME"
git config --global user.email "$EMAIL"
fi
fi
'
fi
# Set title based on session name
TITLE="SafeClaw - ${SESSION_NAME}"
# Start ttyd with web terminal
docker exec -d "$CONTAINER_NAME" \
ttyd -W -t titleFixed="$TITLE" -p 7681 /home/sclaw/ttyd-wrapper.sh
echo ""
echo "SafeClaw is running at: http://localhost:${PORT}"
# Query mode - send query to the interactive session
if [ -n "$QUERY" ]; then
echo "Starting session and sending query..."
# Start tmux session directly (same as ttyd-wrapper.sh does)
docker exec "$CONTAINER_NAME" bash -c '
if ! tmux has-session -t main 2>/dev/null; then
tmux -f /dev/null new -d -s main
tmux set -t main status off
tmux set -t main mouse on
tmux send-keys -t main "claude --dangerously-skip-permissions" Enter
fi
'
# Wait for Claude Code to initialize
sleep 3
# Send the query
docker exec "$CONTAINER_NAME" tmux send-keys -t main "$QUERY" Enter
sleep 0.5
docker exec "$CONTAINER_NAME" tmux send-keys -t main Enter
echo "Query sent: $QUERY"
fi
echo ""
echo "To stop: docker stop $CONTAINER_NAME"
# Open in browser (unless -n flag)
if [ "$NO_OPEN" = false ]; then
if command -v open >/dev/null 2>&1; then
open "http://localhost:${PORT}"
elif command -v xdg-open >/dev/null 2>&1; then
xdg-open "http://localhost:${PORT}"
fi
fi
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
scripts/setup-gemini.sh | Shell | #!/bin/bash
# Set up Gemini CLI API key for SafeClaw
SECRETS_DIR="$HOME/.config/safeclaw/.secrets"
TOKEN_FILE="$SECRETS_DIR/GEMINI_API_KEY"
mkdir -p "$SECRETS_DIR"
echo "Gemini CLI Setup"
echo "================"
echo ""
echo "Get your API key from: https://aistudio.google.com/apikey"
echo ""
read -p "Paste your Gemini API key: " api_key
if [ -z "$api_key" ]; then
echo "No key provided. Aborting."
exit 1
fi
echo -n "$api_key" > "$TOKEN_FILE"
chmod 600 "$TOKEN_FILE"
echo ""
echo "Saved to $TOKEN_FILE"
echo "Stop and start the container from the dashboard to apply."
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
scripts/setup-slack.sh | Shell | #!/bin/bash
# Set up Slack integration for SafeClaw
SECRETS_DIR="${XDG_CONFIG_HOME:-$HOME/.config}/safeclaw/.secrets"
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
MANIFEST="$SCRIPT_DIR/../setup/slack-manifest.json"
echo ""
echo "=== Slack Setup ==="
echo ""
echo "Setup method:"
echo " [Q] Quick - create app from manifest (channels, users, search)"
echo " [M] Manual - create app from scratch (more control, optional DM access)"
echo ""
read -p "Choose [Q/m]: " setup_method
echo ""
if [[ "$setup_method" =~ ^[Mm]$ ]]; then
# Manual setup
echo "1. Go to https://api.slack.com/apps"
echo "2. Click 'Create New App' > 'From scratch'"
echo "3. Name it (e.g., 'SafeClaw') and select your workspace"
echo "4. Go to 'OAuth & Permissions'"
echo ""
echo "Which token type?"
echo " [B] Bot Token - can only read channels the bot is added to"
echo " [U] User Token - can read all channels you're in"
echo ""
read -p "Choose [B/u]: " token_choice
echo ""
if [[ "$token_choice" =~ ^[Uu]$ ]]; then
scope_section="User Token Scopes"
token_prefix="xoxp-"
else
scope_section="Bot Token Scopes"
token_prefix="xoxb-"
fi
echo "Add these scopes to '$scope_section':"
echo " - channels:read, channels:history (public channels)"
echo " - groups:read, groups:history (private channels)"
echo " - users:read (user profiles)"
echo " - search:read (search messages)"
echo " - (optional) im:read, im:history (DMs)"
echo " - (optional) mpim:read, mpim:history (group DMs)"
echo ""
echo "5. Left sidebar > 'Install App' > 'Install to Workspace'"
echo "6. Copy the token (starts with $token_prefix)"
else
# Quick setup via manifest
echo "1. Go to https://api.slack.com/apps?new_app=1"
echo "2. Choose 'From a manifest'"
echo "3. Select your workspace"
echo "4. Switch to JSON tab and paste this manifest:"
echo ""
cat "$MANIFEST"
echo ""
echo "5. Click 'Create'"
echo "6. Go to 'Install App' > 'Install to Workspace'"
echo "7. Copy the User OAuth Token (starts with xoxp-)"
fi
echo ""
read -p "Paste token: " slack_token
if [ -z "$slack_token" ]; then
echo "No token provided, skipping Slack setup."
else
mkdir -p "$SECRETS_DIR"
echo "$slack_token" > "$SECRETS_DIR/SLACK_TOKEN"
echo ""
echo "Saved to $SECRETS_DIR/SLACK_TOKEN"
echo ""
echo "Restart SafeClaw to use Slack:"
echo " ./scripts/run.sh"
fi
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
setup/tools/slack-read.js | JavaScript | #!/usr/bin/env node
// Slack Read-Only Tool
// Usage: node slack-read.js <command> [args]
const { WebClient } = require('@slack/web-api');
// Get token from env var
function getToken(envVarName = 'SLACK_TOKEN') {
const token = process.env[envVarName];
if (!token) {
console.error(`${envVarName} env var not set.`);
console.error('Run: ./scripts/setup-slack.sh');
process.exit(1);
}
return token;
}
// Parse --token flag
let tokenEnvVar = 'SLACK_TOKEN';
const rawArgs = process.argv.slice(2);
const tokenFlagIdx = rawArgs.indexOf('--token');
if (tokenFlagIdx !== -1) {
tokenEnvVar = rawArgs[tokenFlagIdx + 1];
if (!tokenEnvVar) {
console.error('--token requires an env var name');
process.exit(1);
}
rawArgs.splice(tokenFlagIdx, 2);
}
const client = new WebClient(getToken(tokenEnvVar));
// Commands
const commands = {
async channels() {
const result = await client.conversations.list({ types: 'public_channel,private_channel' });
for (const ch of result.channels) {
const type = ch.is_private ? 'private' : 'public';
console.log(`${ch.id}\t${ch.name}\t(${type})`);
}
},
async dms() {
const result = await client.conversations.list({ types: 'im,mpim' });
for (const ch of result.channels) {
const type = ch.is_mpim ? 'group-dm' : 'dm';
console.log(`${ch.id}\t${ch.name || ch.user || 'unnamed'}\t(${type})`);
}
},
async history(channelId, limit = 20) {
if (!channelId) {
console.error('Usage: slack-read.js history <channel_id> [limit]');
process.exit(1);
}
const result = await client.conversations.history({
channel: channelId,
limit: parseInt(limit)
});
for (const msg of result.messages.reverse()) {
const time = new Date(msg.ts * 1000).toISOString();
const user = msg.user || 'unknown';
console.log(`[${time}] ${user}: ${msg.text}`);
}
},
async search(query, limit = 20) {
if (!query) {
console.error('Usage: slack-read.js search <query> [limit]');
process.exit(1);
}
const result = await client.search.messages({
query,
count: parseInt(limit)
});
for (const match of result.messages.matches) {
const time = new Date(match.ts * 1000).toISOString();
const user = match.user || match.username || 'unknown';
const channel = match.channel?.name || 'unknown';
console.log(`[${time}] #${channel} ${user}: ${match.text}`);
console.log('---');
}
},
async users() {
const result = await client.users.list();
for (const user of result.members) {
if (!user.deleted && !user.is_bot) {
console.log(`${user.id}\t${user.name}\t${user.real_name || ''}`);
}
}
},
async info(channelId) {
if (!channelId) {
console.error('Usage: slack-read.js info <channel_id>');
process.exit(1);
}
const result = await client.conversations.info({ channel: channelId });
const ch = result.channel;
console.log(`Name: ${ch.name}`);
console.log(`ID: ${ch.id}`);
console.log(`Type: ${ch.is_private ? 'private' : 'public'}`);
console.log(`Members: ${ch.num_members || 'unknown'}`);
console.log(`Topic: ${ch.topic?.value || 'none'}`);
console.log(`Purpose: ${ch.purpose?.value || 'none'}`);
},
help() {
console.log(`Slack Read-Only Tool
Usage: slack-read.js [--token ENV_VAR] <command> [args]
Options:
--token ENV_VAR Use a different env var (default: SLACK_TOKEN)
Commands:
channels List all channels (public & private)
dms List DMs and group DMs
history <id> [n] Read last n messages from channel (default: 20)
search <query> [n] Search messages (default: 20 results)
users List workspace users
info <id> Get channel info
help Show this help
`);
}
};
// Main
async function main() {
const [command, ...args] = rawArgs;
if (!command || !commands[command]) {
commands.help();
process.exit(command ? 1 : 0);
}
try {
await commands[command](...args);
} catch (err) {
console.error('Error:', err.message);
process.exit(1);
}
}
main();
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
setup/ttyd-wrapper.sh | Shell | #!/bin/bash
# Start tmux session with claude
# Attach to existing session, or create new one with claude
if tmux has-session -t main 2>/dev/null; then
exec tmux attach -t main
else
# Create session
tmux -f /dev/null new -d -s main
tmux set -t main status off
tmux set -t main mouse on
# Start claude (env vars are loaded via BASH_ENV -> .bashrc -> .env)
tmux send-keys -t main 'claude --dangerously-skip-permissions' Enter
exec tmux attach -t main
fi
| ykdojo/safeclaw | 90 | The easiest way to run multiple Claude Code sessions, each in its own container, with a dashboard to manage them all. Quick setup with battle-tested sensible defaults and skills. | HTML | ykdojo | YK | Eventual |
index.html | HTML | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Simple Invaders</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<div class="game-container">
<div id="gameInfo">
<div id="scoreBoard">Score: 0</div>
<div id="stageDisplay">Stage: 0</div>
</div>
<canvas id="gameCanvas"></canvas>
</div>
<script src="script.js"></script>
</body>
</html>
| ykdojo/space-invaders | 2 | JavaScript | ykdojo | YK | Eventual | |
script.js | JavaScript | const canvas = document.getElementById('gameCanvas');
const scoreBoard = document.getElementById('scoreBoard');
const stageDisplay = document.getElementById('stageDisplay');
const ctx = canvas.getContext('2d');
// --- Game Configuration ---
const canvasWidth = 800;
const canvasHeight = 600;
canvas.width = canvasWidth;
canvas.height = canvasHeight;
const playerColor = '#607D8B'; // Blue Grey
const invaderColor = '#FF5722'; // Deep Orange
const bulletColor = '#009688'; // Teal
let score = 0;
let stage = 0;
const backgroundColor = '#ffffff'; // White
// --- Player ---
const playerWidth = 50;
const playerHeight = 20;
const playerSpeed = 7;
let player = {
x: canvasWidth / 2 - playerWidth / 2,
y: canvasHeight - playerHeight - 20,
width: playerWidth,
height: playerHeight,
dx: 0 // Horizontal speed
};
function drawPlayer() {
ctx.fillStyle = playerColor;
ctx.fillRect(player.x, player.y, player.width, player.height);
}
function movePlayer() {
player.x += player.dx;
// Keep player within bounds
if (player.x < 0) {
player.x = 0;
}
if (player.x + player.width > canvasWidth) {
player.x = canvasWidth - player.width;
}
}
// --- Invaders ---
const invaderWidth = 40;
const invaderHeight = 20;
const invaderPadding = 15;
const invaderOffsetTop = 30;
const invaderOffsetLeft = 30;
const invaderRowCount = 3;
const invaderColumnCount = 6;
const initialInvaderSpeed = 0.8; // Base speed for stage 0
const initialInvaderSideSpeed = 0.5; // Base side speed for stage 0
const speedIncreasePerStage = 0.3; // How much speed increases per stage
let currentInvaderSpeed = initialInvaderSpeed;
let currentInvaderSideSpeed = initialInvaderSideSpeed;
let invaders = [];
let invaderDirection = 1; // 1 for right, -1 for left
function createInvaders() {
invaders = []; // Reset invaders
for (let c = 0; c < invaderColumnCount; c++) {
for (let r = 0; r < invaderRowCount; r++) {
invaders.push({
x: invaderOffsetLeft + c * (invaderWidth + invaderPadding),
y: invaderOffsetTop + r * (invaderHeight + invaderPadding),
width: invaderWidth,
height: invaderHeight,
status: 1 // 1 = alive, 0 = dead
});
}
}
}
function drawInvaders() {
invaders.forEach(invader => {
if (invader.status === 1) {
ctx.fillStyle = invaderColor;
ctx.fillRect(invader.x, invader.y, invader.width, invader.height);
}
});
}
function moveInvaders() {
// Check if any active invader has reached the canvas edge
let hitEdge = false;
let lowestInvader = 0;
invaders.forEach(invader => {
if (invader.status === 1) {
// Track the lowest invader for game over condition
lowestInvader = Math.max(lowestInvader, invader.y + invader.height);
// Check if any invader hits the side edge
if ((invader.x + invader.width + currentInvaderSideSpeed * invaderDirection > canvasWidth) ||
(invader.x + currentInvaderSideSpeed * invaderDirection < 0)) {
hitEdge = true;
}
}
});
// Move all invaders
invaders.forEach(invader => {
if (invader.status === 1) {
// If edge was hit, move down and change direction
if (hitEdge) {
invader.y += currentInvaderSpeed * 10; // Move down more when hitting edge
}
// Move side to side
invader.x += currentInvaderSideSpeed * invaderDirection;
// Check if invader reached bottom (game over condition)
if (invader.y + invader.height > player.y) {
console.log("GAME OVER - Invader reached player level");
document.location.reload(); // Reload to restart
}
}
});
// Change direction if edge was hit
if (hitEdge) {
invaderDirection *= -1;
}
}
// --- Bullets ---
const bulletWidth = 5;
const bulletHeight = 10;
const bulletSpeed = 10;
let bullets = [];
function drawBullets() {
bullets.forEach((bullet, index) => {
ctx.fillStyle = bulletColor;
ctx.fillRect(bullet.x, bullet.y, bullet.width, bullet.height);
// Move bullet
bullet.y -= bulletSpeed;
// Remove bullet if it goes off-screen
if (bullet.y + bullet.height < 0) {
bullets.splice(index, 1);
}
});
}
function shoot() {
if (bullets.length < 5) { // Limit bullets on screen
bullets.push({
x: player.x + player.width / 2 - bulletWidth / 2,
y: player.y,
width: bulletWidth,
height: bulletHeight
});
}
}
// --- Collision Detection ---
function collisionDetection() {
bullets.forEach((bullet, bulletIndex) => {
invaders.forEach((invader, invaderIndex) => {
if (invader.status === 1) {
if (
bullet.x < invader.x + invader.width &&
bullet.x + bullet.width > invader.x &&
bullet.y < invader.y + invader.height &&
bullet.y + bullet.height > invader.y
) {
// Collision!
invader.status = 0; // Mark invader as dead
score += 10; // Increase score
bullets.splice(bulletIndex, 1); // Remove bullet
// Check if all invaders are defeated
if (invaders.every(inv => inv.status === 0)) {
console.log("Stage completed!");
advanceToNextStage();
}
}
}
});
});
}
// --- Game Loop ---
function gameLoop() {
// Clear canvas
ctx.fillStyle = backgroundColor;
ctx.fillRect(0, 0, canvasWidth, canvasHeight);
// Draw & Update
drawPlayer();
movePlayer();
drawInvaders();
moveInvaders();
drawBullets();
collisionDetection();
// Update Score Board and Stage Display
scoreBoard.textContent = `Score: ${score}`;
stageDisplay.textContent = `Stage: ${stage}`;
requestAnimationFrame(gameLoop); // Keep the loop going
}
// --- Event Listeners ---
document.addEventListener('keydown', (e) => {
if (e.key === 'ArrowRight' || e.key === 'Right') {
player.dx = playerSpeed;
} else if (e.key === 'ArrowLeft' || e.key === 'Left') {
player.dx = -playerSpeed;
} else if (e.key === ' ' || e.key === 'Spacebar') {
shoot();
}
});
document.addEventListener('keyup', (e) => {
if (
e.key === 'ArrowRight' ||
e.key === 'Right' ||
e.key === 'ArrowLeft' ||
e.key === 'Left'
) {
player.dx = 0;
}
});
// --- Stage Progression ---
function advanceToNextStage() {
// Display stage clear message
showStageMessage(`STAGE ${stage} CLEAR!`);
// Increment stage
stage++;
// Increase speeds with more significant progression based on stage
currentInvaderSpeed = initialInvaderSpeed + (speedIncreasePerStage * stage);
currentInvaderSideSpeed = initialInvaderSideSpeed + (speedIncreasePerStage * stage / 2);
console.log(`Stage ${stage} - Speed increased! Down: ${currentInvaderSpeed.toFixed(2)}, Side: ${currentInvaderSideSpeed.toFixed(2)}`);
// Reset invader positions but keep the score
createInvaders();
}
// Display a temporary message in the center of the screen
function showStageMessage(message) {
ctx.save();
ctx.fillStyle = 'rgba(0, 0, 0, 0.7)';
ctx.fillRect(0, 0, canvasWidth, canvasHeight);
ctx.font = 'bold 36px Arial';
ctx.fillStyle = '#FF5722';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText(message, canvasWidth / 2, canvasHeight / 2);
// Display for 1.5 seconds
setTimeout(() => {
ctx.restore();
}, 1500);
}
// --- Start Game ---
createInvaders();
showStageMessage(`STAGE ${stage} START!`);
gameLoop();
| ykdojo/space-invaders | 2 | JavaScript | ykdojo | YK | Eventual | |
style.css | CSS | body {
margin: 0;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
background-color: #f0f0f0; /* Light grey background */
}
.game-container {
position: relative;
}
canvas {
border: 1px solid #ccc; /* Subtle border */
background-color: #ffffff; /* White canvas */
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.15);
}
#gameInfo {
position: absolute;
top: -40px;
right: 0;
display: flex;
gap: 15px;
}
#scoreBoard, #stageDisplay {
font-family: 'Arial', sans-serif;
font-size: 24px;
font-weight: bold;
color: #009688;
background-color: rgba(255, 255, 255, 0.8);
padding: 8px 15px;
border-radius: 5px;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
}
#stageDisplay {
color: #FF5722; /* Deep Orange to match invaders */
}
| ykdojo/space-invaders | 2 | JavaScript | ykdojo | YK | Eventual | |
jest.config.js | JavaScript | export default {
preset: 'ts-jest/presets/js-with-ts-esm',
testEnvironment: 'node',
transform: {
'^.+\\.tsx?$': ['ts-jest', {
useESM: true,
tsconfig: 'tsconfig.test.json'
}]
},
extensionsToTreatAsEsm: ['.ts', '.tsx'],
moduleNameMapper: {
'^(\\.{1,2}/.*)\\.js$': '$1'
},
testTimeout: 30000
}; | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
src/chat-cli.tsx | TypeScript (TSX) | #!/usr/bin/env node
import React from 'react';
import { render, Box, Text } from 'ink';
import { ChatApp } from './chat-ui.js';
// Check if stdin is TTY (interactive terminal)
if (process.stdin.isTTY) {
// Use standard render with no options for interactive terminals
render(<ChatApp />);
} else {
// Fallback for non-TTY environments (like when piping input)
console.log('This application requires an interactive terminal to run properly.');
console.log('Please run in a terminal environment that supports interactive input.');
process.exit(1);
} | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
src/chat-controller.ts | TypeScript | import { useState } from 'react';
import { GeminiAPI } from './gemini-api.js';
import { formatMessagesForGeminiAPI, Message as FormatterMessage } from './utils/message-formatter.js';
import { executeCommand } from './services/terminal-service.js';
export type Message = FormatterMessage;
// System instruction for the Turing terminal assistant
const SYSTEM_INSTRUCTION = `You are a helpful terminal assistant in the Turing application, working in the directory: ${process.cwd()}. Be proactive and run commands immediately when they would help answer the user's question. Never ask for permission in your text responses. Your job is to be efficient and helpful with minimal back-and-forth. Focus on being direct and concise when responding to user queries.`;
// Get model from environment or use default (flash for speed, pro for capabilities)
const MODEL = process.env.GEMINI_MODEL || 'gemini-2.0-flash';
// Initialize Gemini API with configured model, function calling enabled, and system instruction
const geminiApi = new GeminiAPI(MODEL, undefined, true, SYSTEM_INSTRUCTION);
export function useChatController() {
// Start with a completely empty chat history
const initialMessages: Message[] = [];
const [messages, setMessages] = useState<Message[]>(initialMessages);
const [inputText, setInputText] = useState('');
const [chatHistory, setChatHistory] = useState<any[]>([]);
const [pendingExecution, setPendingExecution] = useState<boolean>(false);
const [messageToExecute, setMessageToExecute] = useState<number | null>(null);
// Handle action when user presses Enter
const handleEnterKey = () => {
// Check if we have any pending safe commands to execute
if (messageToExecute !== null) {
const msgIndex = messageToExecute;
// Reset the message to execute
setMessageToExecute(null);
// Find the first unsafe and not executed command
const msg = messages[msgIndex];
if (msg?.functionCalls) {
const callIndex = msg.functionCalls.findIndex((call) =>
!call.args.isSafe && !call.executed);
if (callIndex !== -1) {
const command = msg.functionCalls[callIndex].args.command;
// Pass the chat session if available for continuity
executeCommand(
command,
msgIndex,
callIndex,
msg.chatSession,
geminiApi,
setMessages,
setChatHistory,
setPendingExecution,
setMessageToExecute
);
return true; // Command execution initiated
}
}
}
// If no pending execution, process normal text input
if (!pendingExecution && inputText.trim() !== '') {
// Add user message
setMessages(prev => [
...prev,
{ role: 'user', content: inputText }
]);
// Store message for API call
const userMessage = inputText;
setInputText('');
// Add loading message
setMessages(prev => [
...prev,
{ role: 'assistant', content: '', isLoading: true }
]);
// Format history for Gemini API
const formattedHistory = formatMessagesForGeminiAPI(messages);
// Call Gemini API
geminiApi.sendMessage(userMessage, formattedHistory)
.then(response => {
// Check if response has function calls
if (typeof response === 'object' && response.functionCalls) {
// Store the chat session for potential ongoing function calls
const chatSession = response.chatSession;
setMessages(prev => {
const newMsgs = [...prev];
// Replace loading message with response that includes function calls
newMsgs[newMsgs.length - 1] = {
role: 'assistant',
content: response.text || "I'll process that for you.",
functionCalls: response.functionCalls,
chatSession: chatSession // Store it with the message for future use
};
const msgIndex = newMsgs.length - 1;
// Set the message index for potential execution of unsafe commands
setMessageToExecute(msgIndex);
// Automatically execute safe commands
const safeCallIndex = response.functionCalls.findIndex(call =>
call.args.isSafe);
if (safeCallIndex !== -1) {
// Run the first safe command automatically
const command = response.functionCalls[safeCallIndex].args.command;
// Store the command and execution details for reference
const commandDetails = {
command,
msgIndex,
safeCallIndex,
chatSession: { ...chatSession }
};
// Use a small delay to ensure React state is updated first
setTimeout(() => {
// Execute outside the React state update to avoid React batch update issues
executeCommand(
commandDetails.command,
commandDetails.msgIndex,
commandDetails.safeCallIndex,
commandDetails.chatSession,
geminiApi,
setMessages,
setChatHistory,
setPendingExecution,
setMessageToExecute
);
}, 100);
}
return newMsgs;
});
// Update chat history with text response
setChatHistory(prev => [
...prev,
{ role: 'user', parts: [{ text: userMessage }] },
{ role: 'model', parts: [{ text: response.text || "I'll process that for you." }] }
]);
} else {
setMessages(prev => {
const newMsgs = [...prev];
// Replace loading message with regular text response
newMsgs[newMsgs.length - 1] = {
role: 'assistant',
content: typeof response === 'string' ? response : response.text || ''
};
return newMsgs;
});
// Update chat history with text response
setChatHistory(prev => [
...prev,
{ role: 'user', parts: [{ text: userMessage }] },
{ role: 'model', parts: [{ text: typeof response === 'string' ? response : response.text || '' }] }
]);
}
})
.catch(error => {
setMessages(prev => {
const newMsgs = [...prev];
// Replace loading message with error
newMsgs[newMsgs.length - 1] = {
role: 'assistant',
content: `Error: ${error.message}`
};
return newMsgs;
});
});
return true; // Message sent
}
return false; // No action taken
};
// Text input handlers
const updateInputText = (text: string) => {
setInputText(text);
};
const appendToInputText = (text: string) => {
setInputText(prev => prev + text);
};
const backspaceInputText = () => {
setInputText(prev => prev.slice(0, -1));
};
return {
messages,
inputText,
messageToExecute,
pendingExecution,
handleEnterKey,
updateInputText,
appendToInputText,
backspaceInputText
};
} | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
src/chat-ui.tsx | TypeScript (TSX) | import React from 'react';
import { Box, Text, useInput, useApp } from 'ink';
import Spinner from 'ink-spinner';
import { useChatController } from './chat-controller.js';
export const ChatApp = () => {
const {
messages,
inputText,
handleEnterKey,
appendToInputText,
backspaceInputText
} = useChatController();
const { exit } = useApp();
// Handle keyboard input
useInput((input, key) => {
if (key.ctrl && input === 'c') {
exit();
return;
}
if (key.return) {
handleEnterKey();
} else if (key.backspace || key.delete) {
backspaceInputText();
} else if (!key.ctrl && !key.meta && !key.escape &&
!key.rightArrow && !key.leftArrow &&
!key.upArrow && !key.downArrow &&
!key.tab) {
appendToInputText(input);
}
});
return (
<Box flexDirection="column" padding={1}>
{/* Message history */}
<Box flexDirection="column" flexGrow={1}>
{messages.map((message, index) => (
<Box key={index} marginY={1} flexDirection="column">
<Text bold color={message.role === 'user' ? 'green' : 'blue'}>
{message.role === 'user' ? '🧑 You:' : '🤖 AI:'}
</Text>
<Box marginLeft={2}>
{message.isLoading ? (
<Box>
<Text color="cyan"><Spinner type="dots" /></Text>
<Text> Thinking...</Text>
</Box>
) : (
<Box flexDirection="column">
<Text wrap="wrap">{message.content}</Text>
{message.functionCalls && message.functionCalls.length > 0 && (
<Box flexDirection="column" marginTop={1} borderStyle="round" borderColor="yellow" padding={1}>
<Text bold color="yellow">Function Call:</Text>
{message.functionCalls.map((call: any, idx: number) => (
<Box key={idx} flexDirection="column" marginLeft={1}>
<Text color="yellow">• {call.name}</Text>
<Box marginLeft={2}>
<Text color="cyan">Command: </Text>
<Text>{call.args.command}</Text>
</Box>
<Box marginLeft={2}>
<Text color="cyan">Safe: </Text>
<Text color={call.args.isSafe ? "green" : "red"}>
{call.args.isSafe ? "Yes" : "No"}
</Text>
</Box>
{call.executed && (
<Box marginLeft={2} marginTop={1}>
<Text color="cyan">Result: </Text>
<Text>{call.result}</Text>
</Box>
)}
{!call.args.isSafe && !call.executed && (
<Box marginLeft={2} marginTop={1}>
<Text color="magenta">Press Enter to execute this command</Text>
</Box>
)}
</Box>
))}
</Box>
)}
</Box>
)}
</Box>
</Box>
))}
</Box>
{/* Input prompt with cursor indicator */}
<Box borderStyle="single" borderColor="gray" padding={1}>
<Text>{`> ${inputText}`}<Text backgroundColor="white"> </Text></Text>
</Box>
</Box>
);
}; | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
src/gemini-api.ts | TypeScript | import { config } from 'dotenv';
import { GoogleGenAI } from '@google/genai';
import fs from 'node:fs';
import mime from 'mime-types';
import { getAvailableTools, terminalCommandTool } from './tools.js';
// Initialize environment variables
config();
// API key verification
const getApiKey = (): string => {
const apiKey = process.env.GEMINI_API_KEY;
if (!apiKey) {
throw new Error('GEMINI_API_KEY not found in environment variables. Make sure to set it in .env file or directly in your environment');
}
return apiKey;
};
// Default configuration
const defaultGenerationConfig = {
temperature: 1,
topP: 0.95,
topK: 64,
maxOutputTokens: 65536,
responseModalities: [],
responseMimeType: "text/plain",
};
// Class to handle Gemini API interactions
export class GeminiAPI {
private genAI: GoogleGenAI;
private modelName: string;
private generationConfig: any;
private tools: any[];
private toolConfig: any;
private systemInstruction: string | undefined;
constructor(
modelName: string,
config = defaultGenerationConfig,
enableFunctionCalling = false,
systemInstruction?: string
) {
this.genAI = new GoogleGenAI({
apiKey: getApiKey()
});
this.modelName = modelName;
this.tools = enableFunctionCalling ? getAvailableTools() : [];
this.toolConfig = enableFunctionCalling ? {functionCallingConfig: {mode: "AUTO"}} : undefined;
this.systemInstruction = systemInstruction ? systemInstruction : undefined;
this.generationConfig = {
...config,
responseMimeType: 'text/plain' // Always use text/plain for consistent response handling
};
}
// Start a chat session
startChat(history: any[] = []) {
const chat = {
history,
sendMessage: async (message: string | any[]) => {
let contents;
if (typeof message === 'string') {
contents = [
{
role: 'user',
parts: [{ text: message }]
}
];
} else {
contents = [
{
role: 'user',
parts: message
}
];
}
// Include history if available
if (history.length > 0) {
contents = [...history, ...contents];
}
const options: any = {
model: this.modelName,
config: {
...this.generationConfig,
responseMimeType: 'text/plain' // Ensure consistent MIME type
},
contents,
};
if (this.tools.length > 0) {
options.tools = this.tools;
options.toolConfig = this.toolConfig;
// Ensure we're setting the right properties for function calling
if (!options.config) options.config = {};
options.config.tools = this.tools;
options.config.toolConfig = this.toolConfig;
}
if (this.systemInstruction) {
options.config = {
...options.config,
systemInstruction: {
parts: [{ text: Array.isArray(this.systemInstruction) ?
this.systemInstruction[0].text : this.systemInstruction }]
}
};
}
// For consistent function calling, always use direct generateContent
// instead of wrapping in startChat
const response = await this.genAI.models.generateContent(options);
// Update history with the new messages
history.push(contents[contents.length - 1]);
if (response.candidates?.[0]?.content) {
history.push(response.candidates[0].content);
}
// Create a wrapper with text() method for backward compatibility
const wrappedResponse = {
...response,
response: {
text: () => {
if (response.candidates?.[0]?.content?.parts?.[0]?.text) {
return response.candidates[0].content.parts[0].text;
}
return '';
}
}
};
return wrappedResponse;
}
};
return chat;
}
// Process inline data from response (like images)
processInlineData(result: any) {
// Skip processing if response structure is unexpected or missing
if (!result?.candidates && !result?.response?.candidates) {
return;
}
const candidates = result.candidates || result.response.candidates;
if (!Array.isArray(candidates)) {
return;
}
for (let candidate_index = 0; candidate_index < candidates.length; candidate_index++) {
if (!candidates[candidate_index]?.content?.parts) continue;
for (let part_index = 0; part_index < candidates[candidate_index].content.parts.length; part_index++) {
const part = candidates[candidate_index].content.parts[part_index];
if (part.inlineData) {
try {
const filename = `output_${candidate_index}_${part_index}.${mime.extension(part.inlineData.mimeType)}`;
fs.writeFileSync(filename, Buffer.from(part.inlineData.data, 'base64'));
console.log(`Output written to: ${filename}`);
} catch (err) {
console.error(err);
}
}
}
}
}
// Process function calls from response
processFunctionCalls(result: any) {
const functionCalls: Array<{name: string; args: any}> = [];
// Skip processing if response structure is unexpected or missing
if (!result?.candidates && !result?.response?.candidates) {
return functionCalls;
}
const candidates = result.candidates || result.response.candidates;
if (!Array.isArray(candidates)) {
return functionCalls;
}
for (const candidate of candidates) {
if (!candidate?.content?.parts) continue;
for (const part of candidate.content.parts) {
if (part.functionCall) {
functionCalls.push({
name: part.functionCall.name,
args: part.functionCall.args
});
}
}
}
return functionCalls;
}
// Send function call results back to the model
async sendFunctionResults(chatSession: any, functionName: string, result: string) {
try {
// Format the function response parts according to Gemini API requirements
const parts = [
{
functionResponse: {
name: functionName,
response: {
content: result
}
}
}
];
// Send the result to the model
const response = await chatSession.sendMessage(parts);
// Process the response for possible additional function calls
const functionCalls = this.processFunctionCalls(response);
if (functionCalls.length > 0) {
return {
text: response.response?.text() || '',
functionCalls,
response: response // Return the raw response for potential further interactions
};
}
return response.response?.text() || '';
} catch (error) {
console.error("Error sending function results:", error);
console.error(error instanceof Error ? error.stack : String(error));
return "Error processing function results.";
}
}
// Simple function to send a message and get a response
async sendMessage(message: string, history: any[] = []) {
const chatSession = this.startChat(history);
const result = await chatSession.sendMessage(message);
this.processInlineData(result);
// Check for function calls
const functionCalls = this.processFunctionCalls(result);
if (functionCalls.length > 0) {
return {
text: result.response?.text() || '',
functionCalls,
chatSession // Return the chat session for continuous interaction
};
}
return result.response?.text() || '';
}
}
// No default instance exported - users must create instances with explicit model names | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
src/services/terminal-service.ts | TypeScript | import { exec } from 'child_process';
import { GeminiAPI } from '../gemini-api.js';
// Function to execute a terminal command and handle function call loop
export function executeCommand(
command: string,
messageIndex: number,
callIndex: number,
chatSession: any,
geminiApi: GeminiAPI,
setMessages: (callback: (prev: any[]) => any[]) => void,
setChatHistory: (callback: (prev: any[]) => any[]) => void,
setPendingExecution: (value: boolean) => void,
setMessageToExecute: (value: number | null) => void
) {
// Mark as pending execution
setPendingExecution(true);
exec(command, async (error, stdout, stderr) => {
// Prepare result
const result = error
? `Error: ${error.message}`
: stderr
? `${stderr}`
: stdout.trim() || 'Command executed successfully';
// Update the message with the command result
setMessages(prev => {
const newMsgs = [...prev];
if (newMsgs[messageIndex]?.functionCalls?.[callIndex]) {
newMsgs[messageIndex].functionCalls![callIndex].executed = true;
newMsgs[messageIndex].functionCalls![callIndex].result = result;
}
return newMsgs;
});
// Add a loading indicator for processing the function result
setMessages(prev => [
...prev,
{
role: 'system',
content: 'Processing command results...',
isLoading: true
}
]);
// Update chat history with function execution info but without showing the result again
setChatHistory(prev => [
...prev,
{
role: 'system',
parts: [{ text: `Command executed: ${command}` }]
}
]);
try {
// Use the existing chat session if provided, otherwise create a new one
const session = chatSession || geminiApi.startChat(chatSession);
// Get a fresh reference to messages for safer access
let functionName = "runTerminalCommand"; // Default fallback
// First try to get the function name from the message state
if (messageIndex >= 0 && callIndex >= 0) {
// Access the current messages state directly
setMessages(prev => {
if (prev[messageIndex]?.functionCalls?.[callIndex]?.name) {
functionName = prev[messageIndex].functionCalls![callIndex].name;
} else {
// If we can't find it through the standard path (which might happen during async state updates)
// We'll use the fallback that's already set
console.log("Using fallback function name");
}
return prev;
});
}
// Send function results back to the model
const response = await geminiApi.sendFunctionResults(session, functionName, result);
// Check if the response contains more function calls
if (typeof response === 'object' && response.functionCalls && response.functionCalls.length > 0) {
// Add the model's response with function calls
setMessages(prev => {
const newMsgs = [...prev];
// Replace loading system message
const loadingIndex = newMsgs.findIndex(m => m.isLoading);
if (loadingIndex !== -1) {
// Just remove the loading indicator since the function call UI already shows the result
newMsgs.splice(loadingIndex, 1);
}
// Add AI's reasoning/analysis
newMsgs.push({
role: 'assistant',
content: response.text,
functionCalls: response.functionCalls
});
const msgIndex = newMsgs.length - 1;
// Set message index for potential execution of unsafe commands
setMessageToExecute(msgIndex);
// Automatically execute safe commands
const safeCallIndex = response.functionCalls.findIndex((call: {name: string; args: {command: string; isSafe: boolean}}) =>
call.args.isSafe);
if (safeCallIndex !== -1) {
// Run the first safe command automatically
const command = response.functionCalls[safeCallIndex].args.command;
// Store the command and execution details for reference
const commandDetails = {
command,
msgIndex,
safeCallIndex,
chatSession: session // Use the current session for continuity
};
// Use a small delay to ensure React state is updated first
setTimeout(() => {
// Execute outside the React state update to avoid React batch update issues
executeCommand(
commandDetails.command,
commandDetails.msgIndex,
commandDetails.safeCallIndex,
commandDetails.chatSession,
geminiApi,
setMessages,
setChatHistory,
setPendingExecution,
setMessageToExecute
);
}, 100);
}
return newMsgs;
});
// Update chat history - ensure we always have non-empty text
setChatHistory(prev => [
...prev,
{
role: 'model',
parts: [{ text: response.text || "I'll process that for you." }]
}
]);
// Set pending execution to false (for unsafe commands, safe ones auto-execute)
setPendingExecution(false);
} else {
// No more function calls - just a regular response
setMessages(prev => {
const newMsgs = [...prev];
// Replace loading system message
const loadingIndex = newMsgs.findIndex(m => m.isLoading);
if (loadingIndex !== -1) {
// Just remove the loading indicator since the function call UI already shows the result
newMsgs.splice(loadingIndex, 1);
}
// Add AI's final response
newMsgs.push({
role: 'assistant',
content: typeof response === 'string' ? response : response.text
});
return newMsgs;
});
// Update chat history - ensure we have non-empty text
setChatHistory(prev => [
...prev,
{
role: 'model',
parts: [{
text: typeof response === 'string'
? (response || "I processed your request.")
: (response.text || "I processed your request.")
}]
}
]);
// Reset states
setPendingExecution(false);
setMessageToExecute(null);
}
} catch (error) {
console.error("Error handling function result:", error);
// Update error in UI
setMessages(prev => {
const newMsgs = [...prev];
// Replace loading message if any
const loadingIndex = newMsgs.findIndex(m => m.isLoading);
if (loadingIndex !== -1) {
// Just remove the loading indicator since the function call UI already shows the result
newMsgs.splice(loadingIndex, 1);
}
// Try to show a more useful error message
let errorMsg = "An error occurred while processing the command result.";
if (error instanceof Error) {
// For explicit errors, show the message
errorMsg = `Error: ${error.message}`;
} else if (typeof error === 'string') {
errorMsg = `Error: ${error}`;
}
// Add error message
newMsgs.push({
role: 'assistant',
content: errorMsg
});
return newMsgs;
});
// Reset states
setPendingExecution(false);
setMessageToExecute(null);
}
});
} | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
src/tools.ts | TypeScript | export interface ToolDefinition {
functionDeclarations: FunctionDeclaration[];
}
export interface FunctionDeclaration {
name: string;
description: string;
parameters: {
type: string;
properties: Record<string, any>;
required: string[];
};
}
// Terminal command tool definition
export const terminalCommandTool: ToolDefinition = {
functionDeclarations: [
{
name: "runTerminalCommand",
description: "Run a terminal command on the user's system. IMMEDIATELY RUN this tool for information gathering tasks like listing files, viewing content, or checking system information. For commands like ls, pwd, cat, find, grep, etc., run them directly and set isSafe=true. For potentially destructive commands like rm, mv, format, etc., set isSafe=false. The UI will automatically handle the confirmation flow based on the isSafe flag. Never ask for permission in your text response - just set the appropriate isSafe flag and let the UI handle it. Always run appropriate commands immediately without hesitation.",
parameters: {
type: "object",
properties: {
command: {
type: "string",
description: "The terminal command to execute"
},
isSafe: {
type: "boolean",
description: "Whether the command is considered safe to run"
}
},
required: ["command", "isSafe"]
}
}
]
};
// Tool handler types
export interface ToolHandler {
handleFunctionCall: (args: any) => Promise<string>;
}
// Terminal command tool handler (placeholder implementation)
export class TerminalCommandHandler implements ToolHandler {
async handleFunctionCall(args: { command: string, isSafe: boolean }): Promise<string> {
// This would be implemented to actually run terminal commands
// For now it's just a placeholder
if (!args.isSafe) {
return JSON.stringify({
output: "Command was not run because it was marked as unsafe.",
exitCode: -1
});
}
return JSON.stringify({
output: `Would execute command: ${args.command}`,
exitCode: 0
});
}
}
// Registry of tool handlers
export const toolHandlers: Record<string, ToolHandler> = {
runTerminalCommand: new TerminalCommandHandler()
};
// Get all available tools
export function getAvailableTools(): ToolDefinition[] {
return [terminalCommandTool];
} | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
src/utils/message-formatter.ts | TypeScript | // Utility for formatting messages for the Gemini API
type MessagePart = {
text: string;
};
export interface FormattedMessage {
role: 'user' | 'model';
parts: MessagePart[];
}
export interface Message {
role: 'user' | 'assistant' | 'system';
content: string;
isLoading?: boolean;
functionCalls?: Array<{
name: string;
args: {
command: string;
isSafe: boolean;
};
result?: string;
executed?: boolean;
}>;
chatSession?: any;
}
/**
* Formats chat messages for the Gemini API
* - Filters out loading messages
* - Converts roles to Gemini's expected format
* - Ensures non-empty text for function call messages
*/
export function formatMessagesForGeminiAPI(messages: Message[]): FormattedMessage[] {
return messages
.filter(msg => !msg.isLoading) // Filter out loading messages
.map(msg => {
if (msg.role === 'system') {
return { role: 'model', parts: [{ text: msg.content }] };
} else if (msg.role === 'assistant' && msg.functionCalls && msg.functionCalls.length > 0) {
// For assistant messages with function calls, ensure they have text content
// This prevents the "empty text parameter" error when sending a follow-up message
return {
role: 'model',
parts: [{ text: msg.content || "I'll process that for you." }]
};
} else {
return {
role: msg.role === 'assistant' ? 'model' : 'user',
parts: [{ text: msg.content }]
};
}
});
} | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/chat-controller.test.ts | TypeScript | /**
* @jest-environment node
*/
import { GeminiAPI } from '../src/gemini-api.js';
import { jest } from '@jest/globals';
// Define types for mocking purposes to resolve TypeScript errors
type MockResponse = {
response: {
text: () => string;
candidates?: Array<{
content: {
parts: Array<{
functionCall?: {
name: string;
args: {
command: string;
isSafe: boolean;
};
};
text?: string;
}>;
};
}>;
};
};
// Shape of GeminiAPI chat session response with sendMessage function
type MockChatSession = {
sendMessage: jest.MockedFunction<(input: any) => Promise<MockResponse>>;
};
// Test the chat controller core functionality by directly using the GeminiAPI
describe('Gemini Chat Functionality', () => {
let gemini: GeminiAPI;
beforeAll(() => {
// Initialize API with function calling enabled
gemini = new GeminiAPI('gemini-2.0-flash', undefined, true);
});
test('API should be initialized with function calling', () => {
// @ts-ignore - accessing private property for testing
expect(gemini.tools.length).toBeGreaterThan(0);
// @ts-ignore - accessing private property for testing
expect(gemini.toolConfig).toBeDefined();
});
test('API should format messages correctly', () => {
const history = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'system', content: 'Processing command' }
];
// Convert to Gemini format
const formattedHistory = history
.map(msg => {
if (msg.role === 'system') {
return { role: 'model', parts: [{ text: msg.content }] };
} else {
return {
role: msg.role === 'assistant' ? 'model' : 'user',
parts: [{ text: msg.content }]
};
}
});
// Check the conversion is correct
expect(formattedHistory.length).toBe(3);
expect(formattedHistory[0].role).toBe('user');
expect(formattedHistory[1].role).toBe('model');
expect(formattedHistory[2].role).toBe('model');
expect(formattedHistory[0].parts[0].text).toBe('Hello');
});
test('API should have function calling configuration set correctly', () => {
// Create a GeminiAPI instance with function calling enabled
const geminiWithFunctions = new GeminiAPI('gemini-2.0-flash', undefined, true);
// Check that the API has tools defined
// @ts-ignore - accessing private property for testing
expect(geminiWithFunctions.tools).toBeDefined();
// @ts-ignore - accessing private property for testing
expect(geminiWithFunctions.tools.length).toBeGreaterThan(0);
// Check tool config mode is set to AUTO as specified in CLAUDE.md
// @ts-ignore - accessing private property for testing
expect(geminiWithFunctions.toolConfig).toBeDefined();
// @ts-ignore - accessing private property for testing
expect(geminiWithFunctions.toolConfig.functionCallingConfig).toBeDefined();
// @ts-ignore - accessing private property for testing
expect(geminiWithFunctions.toolConfig.functionCallingConfig.mode).toBe("AUTO");
});
test('API should correctly process function calls from response', () => {
// Create a mock response with function calls
const mockResult = {
response: {
candidates: [
{
content: {
parts: [
{
functionCall: {
name: "runTerminalCommand",
args: {
command: "ls -la",
isSafe: true
}
}
}
]
}
}
]
}
};
// Process the mock response
// @ts-ignore - accessing private method for testing
const functionCalls = gemini.processFunctionCalls(mockResult);
// Verify the processed function calls
expect(functionCalls.length).toBe(1);
expect(functionCalls[0].name).toBe("runTerminalCommand");
expect(functionCalls[0].args.command).toBe("ls -la");
expect(functionCalls[0].args.isSafe).toBe(true);
});
test('API should properly format and send function results', async () => {
// Create a mock chat session with a sendMessage method
const mockChatSession: MockChatSession = {
sendMessage: jest.fn().mockResolvedValue({
response: {
text: () => "I received your function result"
}
} as MockResponse)
};
// Call the sendFunctionResults method
const result = await gemini.sendFunctionResults(
mockChatSession,
"runTerminalCommand",
"Command executed successfully: directory listing complete"
);
// Verify the chat session's sendMessage was called with properly formatted parts
expect(mockChatSession.sendMessage).toHaveBeenCalledTimes(1);
// Get the parts that were passed to sendMessage
const parts = mockChatSession.sendMessage.mock.calls[0][0] as Array<{
functionResponse: {
name: string;
response: {
content: string;
};
};
}>;
// Verify the structure of the parts
expect(parts).toHaveLength(1);
expect(parts[0].functionResponse).toBeDefined();
expect(parts[0].functionResponse.name).toBe("runTerminalCommand");
expect(parts[0].functionResponse.response.content).toBe("Command executed successfully: directory listing complete");
// Verify the returned result
expect(result).toBe("I received your function result");
});
test('API should correctly handle real function calling with terminal commands', async () => {
// Skip if running CI (API key might not be available)
if (process.env.CI) {
console.log('Skipping API test in CI environment');
return;
}
// Initialize with the same model used in chat-controller.ts
const gemini = new GeminiAPI('gemini-2.0-flash', undefined, true,
`You are a helpful terminal assistant, working in the directory: ${process.cwd()}. You can run terminal commands for the user when appropriate.`);
try {
// Print current tool configuration for debugging
console.log("Tool configuration:");
console.log(JSON.stringify(gemini['toolConfig'], null, 2));
console.log("Tools defined:");
console.log(JSON.stringify(gemini['tools'], null, 2));
// Send a prompt that should trigger function calling with more explicit instructions
const prompt = "I need you to execute the 'ls .' terminal command. DO NOT just explain it or describe it - use the runTerminalCommand function to run it.";
console.log("Sending prompt:", prompt);
const response = await gemini.sendMessage(prompt);
// Log the full response to debug
console.log("Raw response type:", typeof response);
if (typeof response === 'string') {
console.log("Text response received instead of function call:");
console.log(response);
// Test passes if we get string or object, but we log the issue
console.warn("NOTE: Model returned text instead of function call - this is the hallucination issue");
} else {
console.log("Function call response:");
console.log(JSON.stringify(response, null, 2));
// Verify we got a proper response structure for function calls
expect(response.text).toBeDefined();
expect(response.functionCalls).toBeDefined();
expect(Array.isArray(response.functionCalls)).toBe(true);
if (response.functionCalls && response.functionCalls.length > 0) {
// Verify the function call details
const functionCall = response.functionCalls[0];
expect(functionCall.name).toBe('runTerminalCommand');
expect(functionCall.args).toBeDefined();
expect(functionCall.args.command).toBeDefined();
expect(functionCall.args.isSafe).toBeDefined();
// Check that isSafe is a boolean
expect(typeof functionCall.args.isSafe).toBe('boolean');
} else {
console.warn("Function calls array exists but is empty - partial hallucination");
}
}
// Test passes either way - we're just diagnosing
} catch (error) {
console.error("API test error:", error);
throw error;
}
});
test('Compare function calling behavior between different models', async () => {
// Skip if running CI (API key might not be available)
if (process.env.CI) {
console.log('Skipping API test in CI environment');
return;
}
// Test with the flash model
const modelName = 'gemini-2.0-flash';
const systemInstruction = `You are a helpful terminal assistant, working in the directory: ${process.cwd()}. You can run terminal commands for the user when appropriate.`;
const prompt = "Execute the 'ls .' terminal command.";
console.log("=== Testing function calling with flash model ===");
console.log(`\n--- TESTING MODEL: ${modelName} ---`);
// Initialize API with function calling enabled
const api = new GeminiAPI(modelName, undefined, true, systemInstruction);
try {
// Send prompt to flash model
const response = await api.sendMessage(prompt);
console.log(`Model ${modelName} response type:`, typeof response);
if (typeof response === 'string') {
console.log("Text response received:");
console.log(response.substring(0, 200) + (response.length > 200 ? '...' : ''));
console.warn("NOTE: Model returned text instead of function call");
} else {
console.log("Function call response detected:");
if (response.functionCalls && response.functionCalls.length > 0) {
console.log(`Function name: ${response.functionCalls[0].name}`);
console.log(`Command: ${response.functionCalls[0].args.command}`);
console.log(`isSafe: ${response.functionCalls[0].args.isSafe}`);
} else {
console.warn("Function calls array exists but is empty");
}
}
} catch (error) {
console.error(`Error with model ${modelName}:`, error.message);
}
// Not verifying results - this is exploratory
});
test('API should handle complete function call flow with chained function calls', async () => {
// Create mocks for nested function calls
const mockFirstResponse = {
response: {
text: () => "I'll list those files for you",
candidates: [{
content: {
parts: [{
functionCall: {
name: "runTerminalCommand",
args: {
command: "ls -la",
isSafe: true
}
}
}]
}
}]
}
};
const mockSecondResponse = {
response: {
text: () => "Here are the results from the command",
candidates: [{
content: {
parts: [{
functionCall: {
name: "runTerminalCommand",
args: {
command: "cat file.txt",
isSafe: true
}
}
}]
}
}]
}
};
const mockFinalResponse = {
response: {
text: () => "Final response with no more function calls"
}
};
// Setup mocked chat session
const mockChatSession: MockChatSession = {
sendMessage: jest.fn()
.mockResolvedValueOnce(mockFirstResponse as MockResponse) // Initial response with function call
.mockResolvedValueOnce(mockSecondResponse as MockResponse) // Response after first function result
.mockResolvedValueOnce(mockFinalResponse as MockResponse) // Final response
};
// Mock the startChat method to return our mock session
jest.spyOn(gemini, 'startChat').mockReturnValue(mockChatSession);
// Call sendMessage to start the flow
const response = await gemini.sendMessage("List my files", []);
// Verify that the response contains function calls
expect(typeof response).toBe('object');
expect(response.text).toBe("I'll list those files for you");
expect(response.functionCalls).toBeDefined();
expect(response.functionCalls.length).toBe(1);
expect(response.functionCalls[0].name).toBe("runTerminalCommand");
expect(response.functionCalls[0].args.command).toBe("ls -la");
expect(response.chatSession).toBe(mockChatSession);
// Now simulate sending function results back
const secondResponse = await gemini.sendFunctionResults(
mockChatSession,
"runTerminalCommand",
"file1.txt\nfile2.txt\nfile3.txt"
);
// Verify second response structure
expect(typeof secondResponse).toBe('object');
expect(secondResponse.text).toBe("Here are the results from the command");
expect(secondResponse.functionCalls).toBeDefined();
expect(secondResponse.functionCalls.length).toBe(1);
expect(secondResponse.functionCalls[0].name).toBe("runTerminalCommand");
expect(secondResponse.functionCalls[0].args.command).toBe("cat file.txt");
// Finally, send the last function result
const finalResponse = await gemini.sendFunctionResults(
mockChatSession,
"runTerminalCommand",
"Contents of file.txt"
);
// Verify final response is a simple text response
expect(finalResponse).toBe("Final response with no more function calls");
});
test('API should handle user message after function call response', async () => {
// Create a mock chat session for the function call flow
const mockFunctionResponse = {
response: {
text: () => "Command executed successfully",
candidates: [] // No more function calls
}
};
// Mock response for user's follow-up message
const mockUserFollowupResponse = {
response: {
text: () => "Here's my response to your follow-up message"
}
};
// Setup mocked chat session
const mockChatSession: MockChatSession = {
sendMessage: jest.fn()
.mockResolvedValueOnce(mockFunctionResponse as MockResponse) // Response to function result
.mockResolvedValueOnce(mockUserFollowupResponse as MockResponse) // Response to follow-up user message
};
// Mock the startChat method to return our mock session
jest.spyOn(gemini, 'startChat').mockReturnValue(mockChatSession);
// First, simulate sending function results
const functionResultResponse = await gemini.sendFunctionResults(
mockChatSession,
"runTerminalCommand",
"ls -la output"
);
// Verify function result response
expect(functionResultResponse).toBe("Command executed successfully");
// Create a formatted history that includes the function call and result
const history = [
{ role: 'user', parts: [{ text: "Run ls command" }] },
{ role: 'model', parts: [{ text: "I'll run that for you" }] },
{
role: 'function',
parts: [{
functionResponse: {
name: "runTerminalCommand",
response: { content: "ls -la output" }
}
}]
},
{ role: 'model', parts: [{ text: "Command executed successfully" }] }
];
// Now simulate sending a user follow-up message after the function call
const userMessage = "Analyze these results";
// Create a new mock chat session for the follow-up
const mockFollowupChatSession: MockChatSession = {
sendMessage: jest.fn().mockResolvedValue(mockUserFollowupResponse as MockResponse)
};
// Mock the startChat method again
jest.spyOn(gemini, 'startChat').mockReturnValue(mockFollowupChatSession);
// Send the follow-up user message
const followupResponse = await gemini.sendMessage(userMessage, history);
// Verify the response to the follow-up message
expect(followupResponse).toBe("Here's my response to your follow-up message");
// Verify that startChat was called with the correct history
expect(gemini.startChat).toHaveBeenCalledWith(history);
// Verify that sendMessage on the chat session was called with the user message
expect(mockFollowupChatSession.sendMessage).toHaveBeenCalledWith(userMessage);
});
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/chat-ui.test.tsx | TypeScript (TSX) | /**
* @jest-environment node
*/
import React from 'react';
import { render } from 'ink-testing-library';
import { jest } from '@jest/globals';
// We need to test the implementation of line 19:
// if (key.ctrl && input === 'c') { exit(); return; }
// Create our mocks before importing
const mockExit = jest.fn();
let capturedCallback = null;
// Mock the ink module
jest.mock('ink', () => ({
Box: jest.fn(({ children }) => children),
Text: jest.fn(({ children }) => children),
useApp: () => ({ exit: mockExit }),
useInput: (callback) => {
// Store the callback so we can trigger it manually in tests
capturedCallback = callback;
}
}));
// Mock the spinner
jest.mock('ink-spinner', () => jest.fn(() => 'Loading...'));
// Mock the controller
jest.mock('../src/chat-controller', () => ({
useChatController: () => ({
messages: [],
inputText: 'test-input',
handleEnterKey: jest.fn(),
appendToInputText: jest.fn(),
backspaceInputText: jest.fn()
})
}));
// Import after mocking
import { ChatApp } from '../src/chat-ui';
// Test our component
describe('ChatApp component', () => {
beforeEach(() => {
mockExit.mockReset();
capturedCallback = null;
});
test('exits when Ctrl+C is pressed', () => {
// Reset the mock
mockExit.mockClear();
// Render our component (this will capture the useInput callback)
render(<ChatApp />);
// Ensure our callback was captured
expect(capturedCallback).toBeDefined();
// Now manually trigger the callback with Ctrl+C
if (capturedCallback) {
capturedCallback('c', { ctrl: true });
// Verify the exit was called
expect(mockExit).toHaveBeenCalled();
}
});
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/gemini-api-function-call.test.ts | TypeScript | import 'dotenv/config';
import { GeminiAPI } from '../src/gemini-api';
// Don't use jest.setTimeout here; it's set in the config
describe('GeminiAPI Function Calling Tests', () => {
test('should handle basic function calling setup with custom tool', async () => {
// Initialize the Gemini API with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash-lite', undefined, true);
// Verify the toolConfig is configured correctly with AUTO mode (per CLAUDE.md)
expect(gemini['toolConfig']).toBeDefined();
expect(gemini['toolConfig'].functionCallingConfig).toBeDefined();
expect(gemini['toolConfig'].functionCallingConfig.mode).toBe('AUTO');
// Verify tools array exists
expect(gemini['tools']).toBeDefined();
expect(Array.isArray(gemini['tools'])).toBe(true);
expect(gemini['tools'].length).toBeGreaterThan(0);
// Verify the terminal command tool exists and is properly configured
const toolDeclarations = gemini['tools'][0].functionDeclarations;
expect(toolDeclarations).toBeDefined();
const terminalTool = toolDeclarations.find((fn) => fn.name === 'runTerminalCommand');
expect(terminalTool).toBeDefined();
expect(terminalTool.parameters.properties).toHaveProperty('command');
expect(terminalTool.parameters.properties).toHaveProperty('isSafe');
expect(terminalTool.parameters.required).toContain('command');
expect(terminalTool.parameters.required).toContain('isSafe');
});
test('should correctly format function responses for Gemini API', async () => {
// Initialize the Gemini API with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash-lite', undefined, true);
// Start a chat session
const chatSession = gemini.startChat();
// Create a mock function call result
const functionName = 'runTerminalCommand';
const functionResult = JSON.stringify({
output: "This is a test result from the function call",
exitCode: 0
});
// Mock the chat session's sendMessage method to avoid actual API calls
const originalSendMessage = chatSession.sendMessage;
// Create a mock function that simulates the response
chatSession.sendMessage = async () => ({
response: {
text: () => "I received the function result"
}
});
// Call the method under test
const result = await gemini.sendFunctionResults(chatSession, functionName, functionResult);
// Verify the result
expect(result).toBe("I received the function result");
// Now manually verify the format of a function response
// Create a sample function response message
const sampleFunctionResponse = [
{
functionResponse: {
name: functionName,
response: {
content: functionResult
}
}
}
];
// Check that it has the expected structure
expect(Array.isArray(sampleFunctionResponse)).toBe(true);
expect(sampleFunctionResponse[0]).toHaveProperty('functionResponse');
expect(sampleFunctionResponse[0].functionResponse.name).toBe(functionName);
expect(sampleFunctionResponse[0].functionResponse.response).toHaveProperty('content');
// Restore the original method
chatSession.sendMessage = originalSendMessage;
});
test('should handle complete function calling flow with weather example', async () => {
// Skip if running CI (API key might not be available)
if (process.env.CI) {
console.log('Skipping API test in CI environment');
return;
}
// Initialize with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash-lite', undefined, true);
// Spy on the sendFunctionResults method to avoid actual API calls
const originalSendFunctionResults = gemini.sendFunctionResults;
let functionCallData = null;
// Mock sendFunctionResults to verify correct calls and return predictable response
gemini.sendFunctionResults = async (chatSession, name, response) => {
functionCallData = { name, response };
return 'The weather in Vancouver is nice.';
};
// Mock sendMessage to simulate function call response
const originalSendMessage = gemini.sendMessage;
gemini.sendMessage = async (prompt, chatSession) => {
return {
response: {
text: () => '',
functionCalls: () => [{
name: 'getWeather',
args: { city: 'vancouver' }
}]
}
};
};
// Create a chat session and test the flow
const chatSession = gemini.startChat();
// Send initial prompt that should trigger function call
const result = await gemini.sendMessage("what's the weather in vancouver", chatSession);
// Verify function call was detected
expect(result.response.functionCalls()).toBeDefined();
expect(result.response.functionCalls().length).toBe(1);
// Extract function call details
const functionCall = result.response.functionCalls()[0];
expect(functionCall.name).toBe('getWeather');
expect(functionCall.args).toEqual({ city: 'vancouver' });
// Handle the function call and send results back
const functionOutput = { temperature: '22°C', condition: 'Sunny', humidity: '45%' };
const response = await gemini.sendFunctionResults(
chatSession,
functionCall.name,
JSON.stringify(functionOutput)
);
// Verify function results were sent correctly
expect(functionCallData).toEqual({
name: 'getWeather',
response: JSON.stringify(functionOutput)
});
// Verify final response
expect(response).toBe('The weather in Vancouver is nice.');
// Restore original methods
gemini.sendMessage = originalSendMessage;
gemini.sendFunctionResults = originalSendFunctionResults;
});
test('should handle error cases in function calling', async () => {
// Initialize the Gemini API with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash-lite', undefined, true);
// Create a mock chat session
const chatSession = gemini.startChat();
// Test case 1: Invalid function name
try {
// Mock sendFunctionResults to simulate sending results for non-existent function
const originalSendFunctionResults = gemini.sendFunctionResults;
gemini.sendFunctionResults = async (chatSession, name, response) => {
if (name === 'nonExistentFunction') {
throw new Error('Function nonExistentFunction is not defined in the model configuration.');
}
return 'Success';
};
await gemini.sendFunctionResults(chatSession, 'nonExistentFunction', '{}');
// If no error is thrown, fail the test
expect('Should have thrown an error').toBe(false);
} catch (error) {
// Verify error was thrown with correct message
expect(error.message).toContain('nonExistentFunction');
}
// Test case 2: Invalid JSON in response
try {
// Mock sendFunctionResults to simulate sending invalid JSON
const originalSendFunctionResults = gemini.sendFunctionResults;
gemini.sendFunctionResults = async (chatSession, name, response) => {
if (response === 'invalid json') {
throw new Error('Invalid JSON in function response');
}
return 'Success';
};
await gemini.sendFunctionResults(chatSession, 'getWeather', 'invalid json');
// If no error is thrown, fail the test
expect('Should have thrown an error').toBe(false);
} catch (error) {
// Verify error was thrown with correct message
expect(error.message).toContain('Invalid JSON');
}
});
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/gemini-api-system-instructions.test.ts | TypeScript | /**
* @jest-environment node
*/
import { GeminiAPI } from '../src/gemini-api';
import dotenv from 'dotenv';
// Load environment variables from .env file
dotenv.config();
// Skip all tests if API key isn't set
const hasApiKey = !!process.env.GEMINI_API_KEY;
describe('GeminiAPI System Instructions Test', () => {
(hasApiKey ? test : test.skip)('should correctly extract the directory path from system instructions', async () => {
const originalConsoleLog = console.log;
try {
// System instruction with a unique directory path
const systemInstruction = `You are a helpful terminal assistant in the Turing application, working in the directory: /VERY_UNIQUE_TEST_DIR_12345/gemini_test_path.
You can run terminal commands for the user when appropriate. Only suggest running terminal commands when they are safe and necessary.
Provide clear explanations about what commands will do before executing them. Focus on being helpful, concise, and security-conscious.`;
// Create GeminiAPI instance with system instruction
const gemini = new GeminiAPI(
'gemini-2.0-flash-lite',
{
temperature: 0.1, // Lower temperature for more predictable outputs
topP: 0.1,
responseMimeType: 'text/plain',
},
false, // No function calling needed for this test
systemInstruction
);
// Query to extract the directory path
const prompt = `According to your system instructions, what specific directory path are you configured to work in? Extract and provide ONLY the exact
directory path from your system instructions.`;
originalConsoleLog('Sending request to Gemini API...');
const response = await gemini.sendMessage(prompt);
// We expect the raw text response from our GeminiAPI class
const fullResponse = typeof response === 'string' ? response : '';
originalConsoleLog('Full response from Gemini:', fullResponse);
// Check for expected path
const expectedPath = '/VERY_UNIQUE_TEST_DIR_12345/gemini_test_path';
const cleanResponse = fullResponse.trim().replace(/\s+/g, '');
const cleanExpected = expectedPath.trim().replace(/\s+/g, '');
// Test if the response contains the expected path
const containsPath = fullResponse.includes(expectedPath);
const similarPath = cleanResponse.includes(cleanExpected);
originalConsoleLog('Contains exact path:', containsPath);
originalConsoleLog('Contains similar path:', similarPath);
// Also check if we can compose the path from the chunks
expect(containsPath || similarPath).toBe(true);
} finally {
// Restore console functions
console.log = originalConsoleLog;
}
}, 30000); // 30 second timeout for API call
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/gemini-function-call.test.ts | TypeScript | import { GoogleGenAI, Type } from '@google/genai';
import 'dotenv/config';
import { GeminiAPI } from '../src/gemini-api';
// Don't use jest.setTimeout here; it's set in the config
// Using a longer timeout is handled in jest.config.js
describe('Gemini Function Calling Tests', () => {
test('should make a direct function call request to Gemini Flash with minimal conversation', async () => {
// Skip if running CI (API key might not be available)
if (process.env.CI) {
console.log('Skipping API test in CI environment');
return;
}
const ai = new GoogleGenAI({
apiKey: process.env.GEMINI_API_KEY,
});
const tools = [
{
functionDeclarations: [
{
name: 'getWeather',
description: 'gets the weather for a requested city',
parameters: {
type: Type.OBJECT,
properties: {
city: {
type: Type.STRING,
},
},
required: ['city']
},
},
],
}
];
const config = {
tools,
responseMimeType: 'text/plain',
toolConfig: {
functionCallingConfig: {
mode: "AUTO" // Always use "AUTO" mode per CLAUDE.md instructions
}
}
};
const model = 'gemini-2.0-flash';
const contents = [
{
role: 'user',
parts: [
{
text: `What's the weather like in Vancouver?`,
},
],
}
];
try {
// Make a direct API call using the stream API
const response = await ai.models.generateContentStream({
model,
config,
contents,
});
let functionCallDetected = false;
for await (const chunk of response) {
if (chunk.functionCalls && chunk.functionCalls.length > 0) {
functionCallDetected = true;
const functionCall = chunk.functionCalls[0];
// Verify the function call was for getWeather
expect(functionCall.name).toBe('getWeather');
// Verify the city parameter is present
expect(functionCall.args).toHaveProperty('city');
expect(functionCall.args.city.toLowerCase()).toBe('vancouver');
}
}
// Verify that we detected a function call
expect(functionCallDetected).toBe(true);
} catch (error) {
console.error("Direct API test failed:", error);
// Re-throw to fail the test
throw error;
}
});
test('should test terminal command function calling with the production model', async () => {
// Skip if running CI (API key might not be available)
if (process.env.CI) {
console.log('Skipping API test in CI environment');
return;
}
// Get the model used in the actual chat controller
const productionModel = 'gemini-2.0-flash';
const ai = new GoogleGenAI({
apiKey: process.env.GEMINI_API_KEY,
});
// Use the terminal command tool as defined in our implementation
const terminalCommandTool = {
functionDeclarations: [
{
name: "runTerminalCommand",
description: "Run a terminal command on the user's system",
parameters: {
type: "object",
properties: {
command: {
type: "string",
description: "The terminal command to execute"
},
isSafe: {
type: "boolean",
description: "Whether the command is considered safe to run"
}
},
required: ["command", "isSafe"]
}
}
]
};
const config = {
tools: [terminalCommandTool],
responseMimeType: 'text/plain',
toolConfig: {
functionCallingConfig: {
mode: "AUTO" // Always use "AUTO" mode per CLAUDE.md instructions
}
},
systemInstruction: {
parts: [
{
text: `You are a helpful terminal assistant in the Turing application. You can run terminal commands for the user when appropriate. Only suggest running terminal commands when they are safe and necessary. Provide clear explanations about what commands will do before executing them. Focus on being helpful, concise, and security-conscious.`
}
]
}
};
const contents = [
{
role: 'user',
parts: [
{
text: `Try running an LS command in the current directory using period and summarize the result. It is safe to run.`,
},
],
}
];
try {
// Make a direct API call
const response = await ai.models.generateContent({
model: productionModel,
config,
contents,
});
console.log("Production model test response:");
console.log(JSON.stringify(response, null, 2));
// Check for function calls
let foundFunctionCall = false;
if (response.candidates && response.candidates.length > 0) {
const candidate = response.candidates[0];
if (candidate.content && candidate.content.parts) {
for (const part of candidate.content.parts) {
if (part.functionCall) {
foundFunctionCall = true;
// Verify function call details
expect(part.functionCall.name).toBe('runTerminalCommand');
expect(part.functionCall.args).toHaveProperty('command');
expect(part.functionCall.args).toHaveProperty('isSafe');
// The command should be ls . or similar
expect(part.functionCall.args.command.toLowerCase().includes('ls')).toBe(true);
// It should be marked as safe
expect(part.functionCall.args.isSafe).toBe(true);
}
}
}
}
// We should find at least one function call
expect(foundFunctionCall).toBe(true);
} catch (error) {
console.error("Production model API test failed:", error);
console.error("Error details:", error instanceof Error ? error.message : String(error));
console.error("Stack trace:", error instanceof Error ? error.stack : "No stack trace");
// Mark the test as inconclusive rather than failing if it's likely an API limit issue
if (error.message && (
error.message.includes('quota') ||
error.message.includes('rate limit') ||
error.message.includes('429') ||
error.message.includes('invalid_grant')
)) {
console.warn("Test skipped due to apparent API limits - consider this inconclusive rather than a failure");
return;
}
// Re-throw for other errors
throw error;
}
});
test('should handle basic function calling setup with custom tool', async () => {
// Initialize the Gemini API with function calling enabled and use model from GEMINI_MODELS.md
const gemini = new GeminiAPI('gemini-2.0-flash', undefined, true);
// Verify the toolConfig is configured correctly with AUTO mode (per CLAUDE.md)
expect(gemini['toolConfig']).toBeDefined();
expect(gemini['toolConfig'].functionCallingConfig).toBeDefined();
expect(gemini['toolConfig'].functionCallingConfig.mode).toBe('AUTO');
// Verify tools array exists
expect(gemini['tools']).toBeDefined();
expect(Array.isArray(gemini['tools'])).toBe(true);
expect(gemini['tools'].length).toBeGreaterThan(0);
// Verify the terminal command tool exists and is properly configured
const toolDeclarations = gemini['tools'][0].functionDeclarations;
expect(toolDeclarations).toBeDefined();
const terminalTool = toolDeclarations.find((fn) => fn.name === 'runTerminalCommand');
expect(terminalTool).toBeDefined();
expect(terminalTool.parameters.properties).toHaveProperty('command');
expect(terminalTool.parameters.properties).toHaveProperty('isSafe');
expect(terminalTool.parameters.required).toContain('command');
expect(terminalTool.parameters.required).toContain('isSafe');
});
test('should correctly format function responses for Gemini API', async () => {
// Initialize the Gemini API with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash', undefined, true);
// Start a chat session
const chatSession = gemini.startChat();
// Create a mock function call result
const functionName = 'runTerminalCommand';
const functionResult = JSON.stringify({
output: "This is a test result from the function call",
exitCode: 0
});
// Mock the chat session's sendMessage method to avoid actual API calls
const originalMethod = chatSession.sendMessage;
// Create a mock function that simulates the response
chatSession.sendMessage = async () => ({
response: {
text: () => "I received the function result"
}
});
// Call the method under test
const result = await gemini.sendFunctionResults(chatSession, functionName, functionResult);
// Verify the result
expect(result).toBe("I received the function result");
// Now manually verify the format of a function response
// Create a sample function response message
const sampleFunctionResponse = [
{
functionResponse: {
name: functionName,
response: {
content: functionResult
}
}
}
];
// Check that it has the expected structure
expect(Array.isArray(sampleFunctionResponse)).toBe(true);
expect(sampleFunctionResponse[0]).toHaveProperty('functionResponse');
expect(sampleFunctionResponse[0].functionResponse.name).toBe(functionName);
expect(sampleFunctionResponse[0].functionResponse.response).toHaveProperty('content');
// Restore the original method
chatSession.sendMessage = originalMethod;
});
test('should handle complete function calling flow with weather example', async () => {
// Skip if running CI (API key might not be available)
if (process.env.CI) {
console.log('Skipping API test in CI environment');
return;
}
// Initialize with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash', undefined, true);
// Spy on the sendFunctionResults method to avoid actual API calls
const originalSendFunctionResults = gemini.sendFunctionResults;
let functionCallData = null;
// Mock sendFunctionResults to verify correct calls and return predictable response
gemini.sendFunctionResults = async (chatSession, name, response) => {
functionCallData = { name, response };
return 'The weather in Vancouver is nice.';
};
// Mock sendMessage to simulate function call response
const originalSendMessage = gemini.sendMessage;
gemini.sendMessage = async (prompt, chatSession) => {
return {
response: {
text: () => '',
functionCalls: () => [{
name: 'getWeather',
args: { city: 'vancouver' }
}]
}
};
};
// Create a chat session and test the flow
const chatSession = gemini.startChat();
// Send initial prompt that should trigger function call
const result = await gemini.sendMessage("what's the weather in vancouver", chatSession);
// Verify function call was detected
expect(result.response.functionCalls()).toBeDefined();
expect(result.response.functionCalls().length).toBe(1);
// Extract function call details
const functionCall = result.response.functionCalls()[0];
expect(functionCall.name).toBe('getWeather');
expect(functionCall.args).toEqual({ city: 'vancouver' });
// Handle the function call and send results back
const functionOutput = { temperature: '22°C', condition: 'Sunny', humidity: '45%' };
const response = await gemini.sendFunctionResults(
chatSession,
functionCall.name,
JSON.stringify(functionOutput)
);
// Verify function results were sent correctly
expect(functionCallData).toEqual({
name: 'getWeather',
response: JSON.stringify(functionOutput)
});
// Verify final response
expect(response).toBe('The weather in Vancouver is nice.');
// Restore original methods
gemini.sendMessage = originalSendMessage;
gemini.sendFunctionResults = originalSendFunctionResults;
});
test('should handle error cases in function calling', async () => {
// Initialize the Gemini API with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash', undefined, true);
// Create a mock chat session
const chatSession = gemini.startChat();
// Test case 1: Invalid function name
try {
// Mock sendFunctionResults to simulate sending results for non-existent function
const originalSendFunctionResults = gemini.sendFunctionResults;
gemini.sendFunctionResults = async (chatSession, name, response) => {
if (name === 'nonExistentFunction') {
throw new Error('Function nonExistentFunction is not defined in the model configuration.');
}
return 'Success';
};
await gemini.sendFunctionResults(chatSession, 'nonExistentFunction', '{}');
// If no error is thrown, fail the test
expect('Should have thrown an error').toBe(false);
} catch (error) {
// Verify error was thrown with correct message
expect(error.message).toContain('nonExistentFunction');
}
// Test case 2: Invalid JSON in response
try {
// Mock sendFunctionResults to simulate sending invalid JSON
const originalSendFunctionResults = gemini.sendFunctionResults;
gemini.sendFunctionResults = async (chatSession, name, response) => {
if (response === 'invalid json') {
throw new Error('Invalid JSON in function response');
}
return 'Success';
};
await gemini.sendFunctionResults(chatSession, 'getWeather', 'invalid json');
// If no error is thrown, fail the test
expect('Should have thrown an error').toBe(false);
} catch (error) {
// Verify error was thrown with correct message
expect(error.message).toContain('Invalid JSON');
}
});
test('should allow custom function declarations with GeminiAPI', async () => {
// Create a custom weather function tool for this test
const weatherTool = {
functionDeclarations: [
{
name: "getWeather",
description: "Gets the weather for a specified city",
parameters: {
type: "object",
properties: {
city: {
type: "string",
description: "The city to get weather for"
}
},
required: ["city"]
}
}
]
};
// Create a custom GeminiAPI instance with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash', undefined, true);
// Replace the default terminal command tool with our weather tool
gemini['tools'] = [weatherTool];
// Verify the weather tool is set correctly
expect(gemini['tools']).toEqual([weatherTool]);
// Verify the toolConfig is set correctly
expect(gemini['toolConfig']).toEqual({functionCallingConfig: {mode: "AUTO"}});
// Verify the function declarations
expect(gemini['tools'][0].functionDeclarations[0].name).toBe('getWeather');
expect(gemini['tools'][0].functionDeclarations[0].parameters.properties).toHaveProperty('city');
expect(gemini['tools'][0].functionDeclarations[0].parameters.required).toContain('city');
});
test('should detect and process function calls correctly', async () => {
// Create a GeminiAPI instance
const gemini = new GeminiAPI('gemini-2.0-flash', undefined, true);
// Create a mock response with a function call
const mockResponse = {
candidates: [
{
content: {
parts: [
{
functionCall: {
name: 'getWeather',
args: {
city: 'Vancouver'
}
}
}
]
}
}
]
};
// Process function calls
const functionCalls = gemini.processFunctionCalls(mockResponse);
// Verify function calls are extracted correctly
expect(functionCalls.length).toBe(1);
expect(functionCalls[0].name).toBe('getWeather');
expect(functionCalls[0].args).toEqual({city: 'Vancouver'});
});
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/gemini-system-instructions.test.ts | TypeScript | /**
* @jest-environment node
*/
import { GoogleGenAI } from '@google/genai';
import dotenv from 'dotenv';
// Load environment variables from .env file
dotenv.config();
// Skip all tests if API key isn't set
const hasApiKey = !!process.env.GEMINI_API_KEY;
describe('Gemini API System Instructions Test', () => {
(hasApiKey ? test : test.skip)('should correctly extract the directory path from system instructions', async () => {
const originalConsoleLog = console.log;
try {
// Test implementation
const ai = new GoogleGenAI({
apiKey: process.env.GEMINI_API_KEY,
});
const config = {
responseMimeType: 'text/plain',
systemInstruction: [
{
text: ` You are a helpful terminal assistant in the Turing application, working in the directory: /VERY_UNIQUE_TEST_DIR_12345/gemini_test_path.
You can run terminal commands for the user when appropriate. Only suggest running terminal commands when they are safe and necessary.
Provide clear explanations about what commands will do before executing them. Focus on being helpful, concise, and security-conscious.`,
}
],
};
const model = 'gemini-2.0-flash-lite';
const contents = [
{
role: 'user',
parts: [
{
text: `According to your system instructions, what specific directory path are you configured to work in? Extract and provide ONLY the exact
directory path from your system instructions.`,
},
],
},
];
originalConsoleLog('Sending request to Gemini API...');
const response = await ai.models.generateContentStream({
model,
config,
contents,
});
// Collect all chunks into a single response
let fullResponse = '';
for await (const chunk of response) {
fullResponse += chunk.text;
}
originalConsoleLog('Full response from Gemini:', fullResponse);
// Check for expected path
const expectedPath = '/VERY_UNIQUE_TEST_DIR_12345/gemini_test_path';
const cleanResponse = fullResponse.trim().replace(/\s+/g, '');
const cleanExpected = expectedPath.trim().replace(/\s+/g, '');
// Test if the response contains the expected path
const containsPath = fullResponse.includes(expectedPath);
const similarPath = cleanResponse.includes(cleanExpected);
originalConsoleLog('Contains exact path:', containsPath);
originalConsoleLog('Contains similar path:', similarPath);
// Also check if we can compose the path from the chunks
expect(containsPath || similarPath).toBe(true);
} finally {
// Restore console functions
console.log = originalConsoleLog;
}
}, 30000); // 30 second timeout for API call
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/gemini-terminal-command.test.ts | TypeScript | import 'dotenv/config';
import { GeminiAPI } from '../src/gemini-api';
describe('Gemini Terminal Command Function Tests', () => {
test('should handle terminal command function configuration', async () => {
// Initialize the Gemini API with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash-lite', undefined, true);
// Verify the toolConfig is configured correctly with AUTO mode (per CLAUDE.md)
expect(gemini['toolConfig']).toBeDefined();
expect(gemini['toolConfig'].functionCallingConfig).toBeDefined();
expect(gemini['toolConfig'].functionCallingConfig.mode).toBe('AUTO');
// Verify tools array exists with terminal command tool
expect(gemini['tools']).toBeDefined();
expect(Array.isArray(gemini['tools'])).toBe(true);
expect(gemini['tools'].length).toBeGreaterThan(0);
// Verify the terminal command tool exists and is properly configured
const toolDeclarations = gemini['tools'][0].functionDeclarations;
expect(toolDeclarations).toBeDefined();
const terminalTool = toolDeclarations.find((fn) => fn.name === 'runTerminalCommand');
expect(terminalTool).toBeDefined();
expect(terminalTool.name).toBe('runTerminalCommand');
expect(terminalTool.description).toContain('Run a terminal command on the user\'s system');
expect(terminalTool.parameters.properties).toHaveProperty('command');
expect(terminalTool.parameters.properties).toHaveProperty('isSafe');
expect(terminalTool.parameters.required).toContain('command');
expect(terminalTool.parameters.required).toContain('isSafe');
});
test('should handle terminal command function calling flow', async () => {
// Skip if running CI (API key might not be available)
if (process.env.CI) {
console.log('Skipping API test in CI environment');
return;
}
// Initialize with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash-lite', undefined, true);
// Spy on the sendFunctionResults method to avoid actual API calls
const originalSendFunctionResults = gemini.sendFunctionResults;
let functionCallData = null;
// Mock sendFunctionResults to verify correct calls and return predictable response
gemini.sendFunctionResults = async (chatSession, name, response) => {
functionCallData = { name, response };
return 'Command executed successfully.';
};
// Mock sendMessage to simulate terminal command function call response
const originalSendMessage = gemini.sendMessage;
gemini.sendMessage = async (prompt, chatSession) => {
return {
response: {
text: () => 'I\'ll list the files in your directory.',
functionCalls: () => [{
name: 'runTerminalCommand',
args: {
command: 'ls -la',
isSafe: true
}
}]
}
};
};
// Create a chat session and test the flow
const chatSession = gemini.startChat();
// Send initial prompt that should trigger function call
const result = await gemini.sendMessage("list files in my directory", chatSession);
// Verify function call was detected
expect(result.response.functionCalls()).toBeDefined();
expect(result.response.functionCalls().length).toBe(1);
// Extract function call details
const functionCall = result.response.functionCalls()[0];
expect(functionCall.name).toBe('runTerminalCommand');
expect(functionCall.args).toEqual({
command: 'ls -la',
isSafe: true
});
// Simulate command execution and send results back
const commandOutput = "total 112\ndrwxr-xr-x 21 user staff 672 Apr 15 10:22 .\ndrwxr-xr-x 6 user staff 192 Apr 15 09:45 ..\n-rw-r--r-- 1 user staff 302 Apr 15 10:22 .gitignore";
const response = await gemini.sendFunctionResults(
chatSession,
functionCall.name,
commandOutput
);
// Verify function results were sent correctly
expect(functionCallData).toEqual({
name: 'runTerminalCommand',
response: commandOutput
});
// Verify final response
expect(response).toBe('Command executed successfully.');
// Restore original methods
gemini.sendMessage = originalSendMessage;
gemini.sendFunctionResults = originalSendFunctionResults;
});
test('should handle safety flag for terminal commands', async () => {
// Initialize with function calling enabled
const gemini = new GeminiAPI('gemini-2.0-flash-lite', undefined, true);
// Mock sendMessage to simulate terminal command function call with unsafe command
const originalSendMessage = gemini.sendMessage;
gemini.sendMessage = async (prompt, chatSession) => {
return {
response: {
text: () => 'I cannot execute that command as it might be unsafe.',
functionCalls: () => [{
name: 'runTerminalCommand',
args: {
command: 'rm -rf /',
isSafe: false
}
}]
}
};
};
// Create a chat session and test the flow
const chatSession = gemini.startChat();
// Send prompt that should trigger unsafe command function call
const result = await gemini.sendMessage("delete all files on my system", chatSession);
// Verify function call was detected
expect(result.response.functionCalls()).toBeDefined();
expect(result.response.functionCalls().length).toBe(1);
// Extract function call details and verify safety flag
const functionCall = result.response.functionCalls()[0];
expect(functionCall.name).toBe('runTerminalCommand');
expect(functionCall.args.command).toBe('rm -rf /');
expect(functionCall.args.isSafe).toBe(false);
// Restore original method
gemini.sendMessage = originalSendMessage;
});
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/gemini.test.ts | TypeScript | import { GeminiAPI } from '../src/gemini-api.js';
// Don't use jest.setTimeout here; it's set in the config
describe('Gemini API Tests', () => {
let gemini: GeminiAPI;
beforeAll(() => {
// Initialize the API before all tests
gemini = new GeminiAPI("gemini-2.0-flash-lite");
});
test('API should successfully connect and return exact requested text', async () => {
// Test with a specific keyword that the model should return exactly
const testKeyword = "GEMINI_TEST_1234";
const response = await gemini.sendMessage(
`Return ONLY the word ${testKeyword} with no punctuation, explanation, or other text.`
);
// Trim the response to handle any whitespace
const trimmedResponse = typeof response === 'string' ? response.trim() : response.text.trim();
// Test that the response is exactly our keyword
expect(trimmedResponse).toBe(testKeyword);
});
test('API should handle conversation with history', async () => {
// Define a sample conversation history
const history = [
{
role: "user",
parts: [
{text: "What is JavaScript?"},
],
},
{
role: "model",
parts: [
{text: "JavaScript is a programming language commonly used for web development."},
],
},
{
role: "user",
parts: [
{text: "How does it compare to Python?"},
],
},
{
role: "model",
parts: [
{text: "JavaScript and Python are both popular programming languages but have different use cases. JavaScript is primarily for web development, while Python is more general-purpose and popular for data science."},
],
}
];
// Create a chat session with the history
const chatSession = gemini.startChat(history);
// The verification keyword
const historyKeyword = "HISTORY_TEST_9876";
// Send a message that should be aware of the conversation history
const result = await chatSession.sendMessage(
`Based on our conversation about programming languages, respond ONLY with the exact word: ${historyKeyword}`
);
// Trim the response and check for exact match
const response = result.response.text().trim();
expect(response).toBe(historyKeyword);
});
test('API should handle function calling setup for terminal commands', async () => {
// Initialize with function calling enabled
const geminiWithFunctions = new GeminiAPI("gemini-2.0-flash-lite", undefined, true);
// Verify that the model is configured with the terminal command tool
expect(geminiWithFunctions).toHaveProperty('tools');
expect(geminiWithFunctions['tools'].length).toBeGreaterThan(0);
// Verify the tool configuration
const tools = geminiWithFunctions['tools'];
const terminalTool = tools.find((tool: any) =>
tool.functionDeclarations &&
tool.functionDeclarations.some((fn: any) => fn.name === 'runTerminalCommand')
);
// Check that the terminal command tool is configured
expect(terminalTool).toBeDefined();
// Check that the function declaration has the right parameters
const terminalFunction = terminalTool.functionDeclarations.find((fn: any) => fn.name === 'runTerminalCommand');
expect(terminalFunction).toBeDefined();
expect(terminalFunction.parameters.properties).toHaveProperty('command');
expect(terminalFunction.parameters.properties).toHaveProperty('isSafe');
// Try sending a message that might trigger function calling
// Note: We don't assert on function calling behavior since it can be inconsistent
const response = await geminiWithFunctions.sendMessage(
"How can I list all files in my current directory?"
);
// Just verify we get some kind of response
expect(response).toBeDefined();
});
test('API should support system instruction configuration', async () => {
// Define test system instruction
const testInstruction = "You are a helpful terminal assistant in the Turing application. You can run terminal commands for the user when appropriate. Only suggest running terminal commands when they are safe and necessary. Provide clear explanations about what commands will do before executing them. Focus on being helpful, concise, and security-conscious.";
// Initialize with system instruction
const geminiWithSystemInstruction = new GeminiAPI(
"gemini-2.0-flash-lite",
undefined,
true,
testInstruction
);
// For testing purposes, we can't directly access or verify the system instruction
// in the model configuration. We can only test that the API still functions.
// Try sending a message with the system-instructed model
// This won't definitively verify the system instruction's effect,
// but ensures basic functionality still works with system instruction
const response = await geminiWithSystemInstruction.sendMessage(
"What terminal command would safely show the current directory?"
);
// Just verify we get some kind of response
expect(response).toBeDefined();
});
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/message-formatter.test.ts | TypeScript | /**
* @jest-environment node
*/
import { formatMessagesForGeminiAPI, Message } from '../src/utils/message-formatter';
describe('Message Formatter Utility', () => {
test('should format regular messages correctly', () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'system', content: 'Processing command' }
];
const formatted = formatMessagesForGeminiAPI(messages);
expect(formatted.length).toBe(3);
expect(formatted[0].role).toBe('user');
expect(formatted[0].parts[0].text).toBe('Hello');
expect(formatted[1].role).toBe('model');
expect(formatted[1].parts[0].text).toBe('Hi there');
expect(formatted[2].role).toBe('model');
expect(formatted[2].parts[0].text).toBe('Processing command');
});
test('should filter out loading messages', () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: '', isLoading: true },
{ role: 'system', content: 'Processing command' }
];
const formatted = formatMessagesForGeminiAPI(messages);
expect(formatted.length).toBe(2); // Not 3, because loading is filtered
expect(formatted[0].role).toBe('user');
expect(formatted[1].role).toBe('model');
});
test('should handle assistant messages with function calls', () => {
const messages: Message[] = [
{ role: 'user', content: 'Run ls command' },
{
role: 'assistant',
content: '', // Empty content
functionCalls: [
{
name: 'runTerminalCommand',
args: {
command: 'ls -la',
isSafe: true
}
}
]
}
];
const formatted = formatMessagesForGeminiAPI(messages);
expect(formatted.length).toBe(2);
expect(formatted[1].role).toBe('model');
// Should provide default text instead of empty string
expect(formatted[1].parts[0].text).toBe("I'll process that for you.");
});
test('should handle assistant messages with function calls and content', () => {
const messages: Message[] = [
{ role: 'user', content: 'Run ls command' },
{
role: 'assistant',
content: 'Let me list those files for you.',
functionCalls: [
{
name: 'runTerminalCommand',
args: {
command: 'ls -la',
isSafe: true
}
}
]
}
];
const formatted = formatMessagesForGeminiAPI(messages);
expect(formatted.length).toBe(2);
expect(formatted[1].role).toBe('model');
expect(formatted[1].parts[0].text).toBe('Let me list those files for you.');
});
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
tests/terminal-service.test.ts | TypeScript | /**
* @jest-environment node
*/
import { jest } from '@jest/globals';
import { GeminiAPI } from '../src/gemini-api.js';
import { executeCommand } from '../src/services/terminal-service.js';
// Import after mocking
import * as childProcess from 'child_process';
// Create direct mock functions
const mockExec = jest.fn();
// Mock the required dependencies
jest.mock('child_process', () => ({
exec: (cmd, cb) => {
mockExec(cmd, cb);
// Return a mock ChildProcess object
return {
on: jest.fn(),
stdout: { on: jest.fn() },
stderr: { on: jest.fn() }
};
}
}));
describe('Terminal Service Tests', () => {
let geminiApi: GeminiAPI;
let setMessages: jest.Mock;
let setChatHistory: jest.Mock;
let setPendingExecution: jest.Mock;
let setMessageToExecute: jest.Mock;
let mockChatSession: any;
beforeEach(() => {
// Initialize a new GeminiAPI instance for each test
geminiApi = new GeminiAPI('gemini-2.0-flash-lite', undefined, true);
// Mock the sendFunctionResults method to return a simple response
geminiApi.sendFunctionResults = jest.fn().mockResolvedValue({
text: "Command result processed",
functionCalls: []
});
// Mock the React state setter functions
setMessages = jest.fn();
setChatHistory = jest.fn();
setPendingExecution = jest.fn();
setMessageToExecute = jest.fn();
// Create a mock chat session
mockChatSession = {
sendMessage: jest.fn().mockResolvedValue({
response: {
text: () => "Command executed successfully"
}
})
};
// Reset the exec mock for each test
jest.clearAllMocks();
});
test('should execute command and handle successful result', async () => {
// Mock successful command execution
mockExec.mockImplementationOnce((cmd, callback) => {
callback(null, "command output", "");
});
// Execute a test command
executeCommand(
"ls -la",
0,
0,
mockChatSession,
geminiApi,
setMessages,
setChatHistory,
setPendingExecution,
setMessageToExecute
);
// Wait for async operations to complete
await new Promise(r => setTimeout(r, 100));
// Verify exec was called with the correct command
expect(mockExec).toHaveBeenCalledWith("ls -la", expect.any(Function));
// Verify messages were updated with command result
expect(setMessages).toHaveBeenCalled();
// First call should update the function call with the result
const firstCall = setMessages.mock.calls[0][0];
const updatedMessages = firstCall([{ functionCalls: [{}] }]);
expect(updatedMessages[0].functionCalls[0].executed).toBe(true);
expect(updatedMessages[0].functionCalls[0].result).toBe("command output");
// Second call should add a loading indicator
const secondCall = setMessages.mock.calls[1][0];
const messagesWithLoading = secondCall([]);
expect(messagesWithLoading[0].role).toBe('system');
expect(messagesWithLoading[0].content).toBe('Processing command results...');
expect(messagesWithLoading[0].isLoading).toBe(true);
// Verify chat history was updated
expect(setChatHistory).toHaveBeenCalled();
const chatHistoryCall = setChatHistory.mock.calls[0][0];
const updatedChatHistory = chatHistoryCall([]);
expect(updatedChatHistory[0].role).toBe('system');
expect(updatedChatHistory[0].parts[0].text).toContain('Command executed: ls -la');
// Verify sendFunctionResults was called with the right parameters
expect(geminiApi.sendFunctionResults).toHaveBeenCalledWith(
mockChatSession,
"runTerminalCommand",
"command output"
);
// Verify pending execution state was managed correctly
expect(setPendingExecution).toHaveBeenCalledWith(true);
});
test('should handle command execution error', async () => {
// Mock command execution with error
const mockError = new Error("Command failed");
mockExec.mockImplementationOnce((cmd, callback) => {
callback(mockError, "", "");
});
// Execute a test command that will fail
executeCommand(
"invalid-command",
0,
0,
mockChatSession,
geminiApi,
setMessages,
setChatHistory,
setPendingExecution,
setMessageToExecute
);
// Wait for async operations to complete
await new Promise(r => setTimeout(r, 100));
// Verify exec was called
expect(mockExec).toHaveBeenCalledWith("invalid-command", expect.any(Function));
// Verify messages were updated with error
const firstCall = setMessages.mock.calls[0][0];
const updatedMessages = firstCall([{ functionCalls: [{}] }]);
expect(updatedMessages[0].functionCalls[0].executed).toBe(true);
expect(updatedMessages[0].functionCalls[0].result).toBe("Error: Command failed");
// Verify sendFunctionResults was called with the error message
expect(geminiApi.sendFunctionResults).toHaveBeenCalledWith(
mockChatSession,
"runTerminalCommand",
"Error: Command failed"
);
});
test('should handle stderr output', async () => {
// Mock command execution with stderr output
mockExec.mockImplementationOnce((cmd, callback) => {
callback(null, "", "Warning message");
});
// Execute a test command
executeCommand(
"grep nonexistent",
0,
0,
mockChatSession,
geminiApi,
setMessages,
setChatHistory,
setPendingExecution,
setMessageToExecute
);
// Wait for async operations to complete
await new Promise(r => setTimeout(r, 100));
// Verify exec was called
expect(mockExec).toHaveBeenCalledWith("grep nonexistent", expect.any(Function));
// Verify messages were updated with stderr output
const firstCall = setMessages.mock.calls[0][0];
const updatedMessages = firstCall([{ functionCalls: [{}] }]);
expect(updatedMessages[0].functionCalls[0].executed).toBe(true);
expect(updatedMessages[0].functionCalls[0].result).toBe("Warning message");
// Verify sendFunctionResults was called with the stderr message
expect(geminiApi.sendFunctionResults).toHaveBeenCalledWith(
mockChatSession,
"runTerminalCommand",
"Warning message"
);
});
test('should handle API error during function result processing', async () => {
// Mock successful command execution
mockExec.mockImplementationOnce((cmd, callback) => {
callback(null, "command output", "");
});
// Mock API error during sendFunctionResults
const mockApiError = new Error("API error");
geminiApi.sendFunctionResults = jest.fn().mockRejectedValue(mockApiError);
// Execute a test command
executeCommand(
"ls -la",
0,
0,
mockChatSession,
geminiApi,
setMessages,
setChatHistory,
setPendingExecution,
setMessageToExecute
);
// Wait for async operations to complete
await new Promise(r => setTimeout(r, 100));
// Verify exec was called
expect(mockExec).toHaveBeenCalledWith("ls -la", expect.any(Function));
// Verify error handling in messages
// Find the call that removes loading indicator and adds error message
const errorMessageCall = setMessages.mock.calls.find(call => {
const fn = call[0];
const result = fn([{isLoading: true}]);
return result.length > 0 && result[0].role === 'assistant' && result[0].content.includes('Error');
});
expect(errorMessageCall).toBeDefined();
// Verify pending execution was reset
expect(setPendingExecution).toHaveBeenCalledWith(false);
expect(setMessageToExecute).toHaveBeenCalledWith(null);
});
test('should handle follow-up function calls correctly', async () => {
// Mock successful command execution
mockExec.mockImplementationOnce((cmd, callback) => {
callback(null, "command output", "");
});
// Mock API response with follow-up function call
geminiApi.sendFunctionResults = jest.fn().mockResolvedValue({
text: "Let me also run another command",
functionCalls: [
{
name: "runTerminalCommand",
args: {
command: "echo 'follow-up'",
isSafe: true
}
}
]
});
// Execute a test command
executeCommand(
"ls -la",
0,
0,
mockChatSession,
geminiApi,
setMessages,
setChatHistory,
setPendingExecution,
setMessageToExecute
);
// Wait for async operations to complete
await new Promise(r => setTimeout(r, 100));
// Verify exec was called
expect(mockExec).toHaveBeenCalledWith("ls -la", expect.any(Function));
// Verify that the message with function call was added
const functionCallMessageCall = setMessages.mock.calls.find(call => {
const fn = call[0];
const result = fn([{isLoading: true}]);
// Look for the call that replaces loading indicator and adds assistant message with function calls
return result.length > 0 &&
result[0].role === 'assistant' &&
result[0].functionCalls !== undefined;
});
expect(functionCallMessageCall).toBeDefined();
// Verify that timeout was set for auto-execution of safe command
jest.useFakeTimers();
jest.advanceTimersByTime(100);
jest.useRealTimers();
// Should have attempted to execute the follow-up command
expect(mockExec).toHaveBeenCalledWith(expect.stringContaining("echo 'follow-up'"), expect.any(Function));
});
}); | ykdojo/turing | 0 | An open source alternative to Claude Code | TypeScript | ykdojo | YK | Eventual |
apps/expo/app.config.ts | TypeScript | import type { ConfigContext, ExpoConfig } from "expo/config";
export default ({ config }: ConfigContext): ExpoConfig => ({
...config,
name: "expo",
slug: "expo",
scheme: "expo",
version: "0.1.0",
orientation: "portrait",
icon: "./assets/icon-light.png",
userInterfaceStyle: "automatic",
updates: {
fallbackToCacheTimeout: 0,
},
newArchEnabled: true,
assetBundlePatterns: ["**/*"],
ios: {
bundleIdentifier: "your.bundle.identifier",
supportsTablet: true,
icon: {
light: "./assets/icon-light.png",
dark: "./assets/icon-dark.png",
},
},
android: {
package: "your.bundle.identifier",
adaptiveIcon: {
foregroundImage: "./assets/icon-light.png",
backgroundColor: "#1F104A",
},
edgeToEdgeEnabled: true,
},
// extra: {
// eas: {
// projectId: "your-eas-project-id",
// },
// },
experiments: {
tsconfigPaths: true,
typedRoutes: true,
},
plugins: [
"expo-router",
"expo-secure-store",
"expo-web-browser",
[
"expo-splash-screen",
{
backgroundColor: "#E4E4E7",
image: "./assets/icon-light.png",
dark: {
backgroundColor: "#18181B",
image: "./assets/icon-dark.png",
},
},
],
],
});
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/babel.config.js | JavaScript | /** @type {import("@babel/core").ConfigFunction} */
module.exports = (api) => {
api.cache(true);
return {
presets: [
["babel-preset-expo", { jsxImportSource: "nativewind" }],
"nativewind/babel",
],
plugins: ["react-native-reanimated/plugin"],
};
};
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/eslint.config.mjs | JavaScript | import baseConfig from "@acme/eslint-config/base";
import reactConfig from "@acme/eslint-config/react";
/** @type {import('typescript-eslint').Config} */
export default [
{
ignores: [".expo/**", "expo-plugins/**"],
},
...baseConfig,
...reactConfig,
];
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/index.ts | TypeScript | import "expo-router/entry";
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/metro.config.js | JavaScript | // Learn more: https://docs.expo.dev/guides/monorepos/
const { getDefaultConfig } = require("expo/metro-config");
const { FileStore } = require("metro-cache");
const { withNativeWind } = require("nativewind/metro");
const path = require("node:path");
const config = withTurborepoManagedCache(
withNativeWind(getDefaultConfig(__dirname), {
input: "./src/styles.css",
configPath: "./tailwind.config.ts",
}),
);
module.exports = config;
/**
* Move the Metro cache to the `.cache/metro` folder.
* If you have any environment variables, you can configure Turborepo to invalidate it when needed.
*
* @see https://turborepo.com/docs/reference/configuration#env
* @param {import('expo/metro-config').MetroConfig} config
* @returns {import('expo/metro-config').MetroConfig}
*/
function withTurborepoManagedCache(config) {
config.cacheStores = [
new FileStore({ root: path.join(__dirname, ".cache/metro") }),
];
return config;
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/nativewind-env.d.ts | TypeScript | /// <reference types="nativewind/types" />
// NOTE: This file should not be edited and should be committed with your source code. It is generated by NativeWind.
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/src/app/_layout.tsx | TypeScript (TSX) | import { Stack } from "expo-router";
import { StatusBar } from "expo-status-bar";
import { useColorScheme } from "nativewind";
import { queryClient } from "~/utils/api";
import "../styles.css";
import { QueryClientProvider } from "@tanstack/react-query";
// This is the main layout of the app
// It wraps your pages with the providers they need
export default function RootLayout() {
const { colorScheme } = useColorScheme();
return (
<QueryClientProvider client={queryClient}>
{/*
The Stack component displays the current page.
It also allows you to configure your screens
*/}
<Stack
screenOptions={{
headerStyle: {
backgroundColor: "#f472b6",
},
contentStyle: {
backgroundColor: colorScheme == "dark" ? "#09090B" : "#FFFFFF",
},
}}
/>
<StatusBar />
</QueryClientProvider>
);
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/src/app/index.tsx | TypeScript (TSX) | import React, { useState } from "react";
import { Button, Pressable, Text, TextInput, View } from "react-native";
import { SafeAreaView } from "react-native-safe-area-context";
import { Link, Stack } from "expo-router";
import { LegendList } from "@legendapp/list";
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query";
import type { RouterOutputs } from "~/utils/api";
import { trpc } from "~/utils/api";
import { authClient } from "~/utils/auth";
function PostCard(props: {
post: RouterOutputs["post"]["all"][number];
onDelete: () => void;
}) {
return (
<View className="flex flex-row rounded-lg bg-muted p-4">
<View className="flex-grow">
<Link
asChild
href={{
pathname: "/post/[id]",
params: { id: props.post.id },
}}
>
<Pressable className="">
<Text className="text-xl font-semibold text-primary">
{props.post.title}
</Text>
<Text className="mt-2 text-foreground">{props.post.content}</Text>
</Pressable>
</Link>
</View>
<Pressable onPress={props.onDelete}>
<Text className="font-bold uppercase text-primary">Delete</Text>
</Pressable>
</View>
);
}
function CreatePost() {
const queryClient = useQueryClient();
const [title, setTitle] = useState("");
const [content, setContent] = useState("");
const { mutate, error } = useMutation(
trpc.post.create.mutationOptions({
async onSuccess() {
setTitle("");
setContent("");
await queryClient.invalidateQueries(trpc.post.all.queryFilter());
},
}),
);
return (
<View className="mt-4 flex gap-2">
<TextInput
className="items-center rounded-md border border-input bg-background px-3 text-lg leading-[1.25] text-foreground"
value={title}
onChangeText={setTitle}
placeholder="Title"
/>
{error?.data?.zodError?.fieldErrors.title && (
<Text className="mb-2 text-destructive">
{error.data.zodError.fieldErrors.title}
</Text>
)}
<TextInput
className="items-center rounded-md border border-input bg-background px-3 text-lg leading-[1.25] text-foreground"
value={content}
onChangeText={setContent}
placeholder="Content"
/>
{error?.data?.zodError?.fieldErrors.content && (
<Text className="mb-2 text-destructive">
{error.data.zodError.fieldErrors.content}
</Text>
)}
<Pressable
className="flex items-center rounded bg-primary p-2"
onPress={() => {
mutate({
title,
content,
});
}}
>
<Text className="text-foreground">Create</Text>
</Pressable>
{error?.data?.code === "UNAUTHORIZED" && (
<Text className="mt-2 text-destructive">
You need to be logged in to create a post
</Text>
)}
</View>
);
}
function MobileAuth() {
const { data: session } = authClient.useSession();
return (
<>
<Text className="pb-2 text-center text-xl font-semibold text-zinc-900">
{session?.user.name ? `Hello, ${session.user.name}` : "Not logged in"}
</Text>
<Button
onPress={() =>
session
? authClient.signOut()
: authClient.signIn.social({
provider: "discord",
callbackURL: "/",
})
}
title={session ? "Sign Out" : "Sign In With Discord"}
color={"#5B65E9"}
/>
</>
);
}
export default function Index() {
const queryClient = useQueryClient();
const postQuery = useQuery(trpc.post.all.queryOptions());
const deletePostMutation = useMutation(
trpc.post.delete.mutationOptions({
onSettled: () =>
queryClient.invalidateQueries(trpc.post.all.queryFilter()),
}),
);
return (
<SafeAreaView className="bg-background">
{/* Changes page title visible on the header */}
<Stack.Screen options={{ title: "Home Page" }} />
<View className="h-full w-full bg-background p-4">
<Text className="pb-2 text-center text-5xl font-bold text-foreground">
Create <Text className="text-primary">T3</Text> Turbo
</Text>
<MobileAuth />
<View className="py-2">
<Text className="font-semibold italic text-primary">
Press on a post
</Text>
</View>
<LegendList
data={postQuery.data ?? []}
estimatedItemSize={20}
keyExtractor={(item) => item.id}
ItemSeparatorComponent={() => <View className="h-2" />}
renderItem={(p) => (
<PostCard
post={p.item}
onDelete={() => deletePostMutation.mutate(p.item.id)}
/>
)}
/>
<CreatePost />
</View>
</SafeAreaView>
);
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/src/app/post/[id].tsx | TypeScript (TSX) | import { SafeAreaView, Text, View } from "react-native";
import { Stack, useGlobalSearchParams } from "expo-router";
import { useQuery } from "@tanstack/react-query";
import { trpc } from "~/utils/api";
export default function Post() {
const { id } = useGlobalSearchParams();
if (!id || typeof id !== "string") throw new Error("unreachable");
const { data } = useQuery(trpc.post.byId.queryOptions({ id }));
if (!data) return null;
return (
<SafeAreaView className="bg-background">
<Stack.Screen options={{ title: data.title }} />
<View className="h-full w-full p-4">
<Text className="py-2 text-3xl font-bold text-primary">
{data.title}
</Text>
<Text className="py-4 text-foreground">{data.content}</Text>
</View>
</SafeAreaView>
);
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/src/styles.css | CSS | @tailwind base;
@tailwind components;
@tailwind utilities;
:root {
--background: 0 0% 100%;
--foreground: 240 10% 3.9%;
--card: 0 0% 100%;
--card-foreground: 240 10% 3.9%;
--popover: 0 0% 100%;
--popover-foreground: 240 10% 3.9%;
--primary: 327 66% 69%;
--primary-foreground: 337 65.5% 17.1%;
--secondary: 240 4.8% 95.9%;
--secondary-foreground: 240 5.9% 10%;
--muted: 240 4.8% 95.9%;
--muted-foreground: 240 3.8% 46.1%;
--accent: 240 4.8% 95.9%;
--accent-foreground: 240 5.9% 10%;
--destructive: 0 72.22% 50.59%;
--destructive-foreground: 0 0% 98%;
--border: 240 5.9% 90%;
--input: 240 5.9% 90%;
--ring: 240 5% 64.9%;
--radius: 0.5rem;
}
@media (prefers-color-scheme: dark) {
:root {
--background: 240 10% 3.9%;
--foreground: 0 0% 98%;
--card: 240 10% 3.9%;
--card-foreground: 0 0% 98%;
--popover: 240 10% 3.9%;
--popover-foreground: 0 0% 98%;
--primary: 327 66% 69%;
--primary-foreground: 337 65.5% 17.1%;
--secondary: 240 3.7% 15.9%;
--secondary-foreground: 0 0% 98%;
--muted: 240 3.7% 15.9%;
--muted-foreground: 240 5% 64.9%;
--accent: 240 3.7% 15.9%;
--accent-foreground: 0 0% 98%;
--destructive: 0 62.8% 30.6%;
--destructive-foreground: 0 85.7% 97.3%;
--border: 240 3.7% 15.9%;
--input: 240 3.7% 15.9%;
--ring: 240 4.9% 83.9%;
}
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/src/utils/api.tsx | TypeScript (TSX) | import { QueryClient } from "@tanstack/react-query";
import { createTRPCClient, httpBatchLink, loggerLink } from "@trpc/client";
import { createTRPCOptionsProxy } from "@trpc/tanstack-react-query";
import superjson from "superjson";
import type { AppRouter } from "@acme/api";
import { authClient } from "./auth";
import { getBaseUrl } from "./base-url";
export const queryClient = new QueryClient({
defaultOptions: {
queries: {
// ...
},
},
});
/**
* A set of typesafe hooks for consuming your API.
*/
export const trpc = createTRPCOptionsProxy<AppRouter>({
client: createTRPCClient({
links: [
loggerLink({
enabled: (opts) =>
process.env.NODE_ENV === "development" ||
(opts.direction === "down" && opts.result instanceof Error),
colorMode: "ansi",
}),
httpBatchLink({
transformer: superjson,
url: `${getBaseUrl()}/api/trpc`,
headers() {
const headers = new Map<string, string>();
headers.set("x-trpc-source", "expo-react");
const cookies = authClient.getCookie();
if (cookies) {
headers.set("Cookie", cookies);
}
return headers;
},
}),
],
}),
queryClient,
});
export { type RouterInputs, type RouterOutputs } from "@acme/api";
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/src/utils/auth.ts | TypeScript | import * as SecureStore from "expo-secure-store";
import { expoClient } from "@better-auth/expo/client";
import { createAuthClient } from "better-auth/react";
import { getBaseUrl } from "./base-url";
console.log("getBaseUrl", getBaseUrl());
export const authClient = createAuthClient({
baseURL: getBaseUrl(),
plugins: [
expoClient({
scheme: "expo",
storagePrefix: "expo",
storage: SecureStore,
}),
],
});
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/src/utils/base-url.ts | TypeScript | import Constants from "expo-constants";
/**
* Extend this function when going to production by
* setting the baseUrl to your production API URL.
*/
export const getBaseUrl = () => {
/**
* Gets the IP address of your host-machine. If it cannot automatically find it,
* you'll have to manually set it. NOTE: Port 3000 should work for most but confirm
* you don't have anything else running on it, or you'd have to change it.
*
* **NOTE**: This is only for development. In production, you'll want to set the
* baseUrl to your production API URL.
*/
const debuggerHost = Constants.expoConfig?.hostUri;
const localhost = debuggerHost?.split(":")[0];
if (!localhost) {
// return "https://turbo.t3.gg";
throw new Error(
"Failed to get localhost. Please point to your production server.",
);
}
return `http://${localhost}:3000`;
};
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/src/utils/session-store.ts | TypeScript | import * as SecureStore from "expo-secure-store";
const key = "session_token";
export const getToken = () => SecureStore.getItem(key);
export const deleteToken = () => SecureStore.deleteItemAsync(key);
export const setToken = (v: string) => SecureStore.setItem(key, v);
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/expo/tailwind.config.ts | TypeScript | import type { Config } from "tailwindcss";
// @ts-expect-error - no types
import nativewind from "nativewind/preset";
import baseConfig from "@acme/tailwind-config/native";
export default {
content: ["./src/**/*.{ts,tsx}"],
presets: [baseConfig, nativewind],
} satisfies Config;
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/eslint.config.js | JavaScript | import baseConfig, { restrictEnvAccess } from "@acme/eslint-config/base";
import nextjsConfig from "@acme/eslint-config/nextjs";
import reactConfig from "@acme/eslint-config/react";
/** @type {import('typescript-eslint').Config} */
export default [
{
ignores: [".next/**"],
},
...baseConfig,
...reactConfig,
...nextjsConfig,
...restrictEnvAccess,
];
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/next.config.js | JavaScript | import { createJiti } from "jiti";
const jiti = createJiti(import.meta.url);
// Import env files to validate at build time. Use jiti so we can load .ts files in here.
await jiti.import("./src/env");
/** @type {import("next").NextConfig} */
const config = {
/** Enables hot reloading for local packages without a build step */
transpilePackages: [
"@acme/api",
"@acme/auth",
"@acme/db",
"@acme/ui",
"@acme/validators",
],
/** We already do linting and typechecking as separate tasks in CI */
eslint: { ignoreDuringBuilds: true },
typescript: { ignoreBuildErrors: true },
};
export default config;
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/postcss.config.cjs | JavaScript | module.exports = {
plugins: {
tailwindcss: {},
},
};
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/app/_components/auth-showcase.tsx | TypeScript (TSX) | import { headers } from "next/headers";
import { redirect } from "next/navigation";
import { Button } from "@acme/ui/button";
import { auth, getSession } from "~/auth/server";
export async function AuthShowcase() {
const session = await getSession();
if (!session) {
return (
<form>
<Button
size="lg"
formAction={async () => {
"use server";
const res = await auth.api.signInSocial({
body: {
provider: "discord",
callbackURL: "/",
},
});
if (!res.url) {
throw new Error("No URL returned from signInSocial");
}
redirect(res.url);
}}
>
Sign in with Discord
</Button>
</form>
);
}
return (
<div className="flex flex-col items-center justify-center gap-4">
<p className="text-center text-2xl">
<span>Logged in as {session.user.name}</span>
</p>
<form>
<Button
size="lg"
formAction={async () => {
"use server";
await auth.api.signOut({
headers: await headers(),
});
redirect("/");
}}
>
Sign out
</Button>
</form>
</div>
);
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/app/_components/posts.tsx | TypeScript (TSX) | "use client";
import {
useMutation,
useQueryClient,
useSuspenseQuery,
} from "@tanstack/react-query";
import type { RouterOutputs } from "@acme/api";
import { CreatePostSchema } from "@acme/db/schema";
import { cn } from "@acme/ui";
import { Button } from "@acme/ui/button";
import {
Form,
FormControl,
FormField,
FormItem,
FormMessage,
useForm,
} from "@acme/ui/form";
import { Input } from "@acme/ui/input";
import { toast } from "@acme/ui/toast";
import { useTRPC } from "~/trpc/react";
export function CreatePostForm() {
const trpc = useTRPC();
const form = useForm({
schema: CreatePostSchema,
defaultValues: {
content: "",
title: "",
},
});
const queryClient = useQueryClient();
const createPost = useMutation(
trpc.post.create.mutationOptions({
onSuccess: async () => {
form.reset();
await queryClient.invalidateQueries(trpc.post.pathFilter());
},
onError: (err) => {
toast.error(
err.data?.code === "UNAUTHORIZED"
? "You must be logged in to post"
: "Failed to create post",
);
},
}),
);
return (
<Form {...form}>
<form
className="flex w-full max-w-2xl flex-col gap-4"
onSubmit={form.handleSubmit((data) => {
createPost.mutate(data);
})}
>
<FormField
control={form.control}
name="title"
render={({ field }) => (
<FormItem>
<FormControl>
<Input {...field} placeholder="Title" />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="content"
render={({ field }) => (
<FormItem>
<FormControl>
<Input {...field} placeholder="Content" />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<Button>Create</Button>
</form>
</Form>
);
}
export function PostList() {
const trpc = useTRPC();
const { data: posts } = useSuspenseQuery(trpc.post.all.queryOptions());
if (posts.length === 0) {
return (
<div className="relative flex w-full flex-col gap-4">
<PostCardSkeleton pulse={false} />
<PostCardSkeleton pulse={false} />
<PostCardSkeleton pulse={false} />
<div className="absolute inset-0 flex flex-col items-center justify-center bg-black/10">
<p className="text-2xl font-bold text-white">No posts yet</p>
</div>
</div>
);
}
return (
<div className="flex w-full flex-col gap-4">
{posts.map((p) => {
return <PostCard key={p.id} post={p} />;
})}
</div>
);
}
export function PostCard(props: {
post: RouterOutputs["post"]["all"][number];
}) {
const trpc = useTRPC();
const queryClient = useQueryClient();
const deletePost = useMutation(
trpc.post.delete.mutationOptions({
onSuccess: async () => {
await queryClient.invalidateQueries(trpc.post.pathFilter());
},
onError: (err) => {
toast.error(
err.data?.code === "UNAUTHORIZED"
? "You must be logged in to delete a post"
: "Failed to delete post",
);
},
}),
);
return (
<div className="flex flex-row rounded-lg bg-muted p-4">
<div className="flex-grow">
<h2 className="text-2xl font-bold text-primary">{props.post.title}</h2>
<p className="mt-2 text-sm">{props.post.content}</p>
</div>
<div>
<Button
variant="ghost"
className="cursor-pointer text-sm font-bold uppercase text-primary hover:bg-transparent hover:text-white"
onClick={() => deletePost.mutate(props.post.id)}
>
Delete
</Button>
</div>
</div>
);
}
export function PostCardSkeleton(props: { pulse?: boolean }) {
const { pulse = true } = props;
return (
<div className="flex flex-row rounded-lg bg-muted p-4">
<div className="flex-grow">
<h2
className={cn(
"w-1/4 rounded bg-primary text-2xl font-bold",
pulse && "animate-pulse",
)}
>
</h2>
<p
className={cn(
"mt-2 w-1/3 rounded bg-current text-sm",
pulse && "animate-pulse",
)}
>
</p>
</div>
</div>
);
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/app/api/auth/[...all]/route.ts | TypeScript | import { auth } from "~/auth/server";
export const GET = auth.handler;
export const POST = auth.handler;
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/app/api/trpc/[trpc]/route.ts | TypeScript | import type { NextRequest } from "next/server";
import { fetchRequestHandler } from "@trpc/server/adapters/fetch";
import { appRouter, createTRPCContext } from "@acme/api";
import { auth } from "~/auth/server";
/**
* Configure basic CORS headers
* You should extend this to match your needs
*/
const setCorsHeaders = (res: Response) => {
res.headers.set("Access-Control-Allow-Origin", "*");
res.headers.set("Access-Control-Request-Method", "*");
res.headers.set("Access-Control-Allow-Methods", "OPTIONS, GET, POST");
res.headers.set("Access-Control-Allow-Headers", "*");
};
export const OPTIONS = () => {
const response = new Response(null, {
status: 204,
});
setCorsHeaders(response);
return response;
};
const handler = async (req: NextRequest) => {
const response = await fetchRequestHandler({
endpoint: "/api/trpc",
router: appRouter,
req,
createContext: () =>
createTRPCContext({
auth: auth,
headers: req.headers,
}),
onError({ error, path }) {
console.error(`>>> tRPC Error on '${path}'`, error);
},
});
setCorsHeaders(response);
return response;
};
export { handler as GET, handler as POST };
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/app/globals.css | CSS | @tailwind base;
@tailwind components;
@tailwind utilities;
@layer base {
:root {
--background: 0 0% 100%;
--foreground: 240 10% 3.9%;
--card: 0 0% 100%;
--card-foreground: 240 10% 3.9%;
--popover: 0 0% 100%;
--popover-foreground: 240 10% 3.9%;
--primary: 327 66% 69%;
--primary-foreground: 337 65.5% 17.1%;
--secondary: 240 4.8% 95.9%;
--secondary-foreground: 240 5.9% 10%;
--muted: 240 4.8% 95.9%;
--muted-foreground: 240 3.8% 46.1%;
--accent: 240 4.8% 95.9%;
--accent-foreground: 240 5.9% 10%;
--destructive: 0 72.22% 50.59%;
--destructive-foreground: 0 0% 98%;
--border: 240 5.9% 90%;
--input: 240 5.9% 90%;
--ring: 240 5% 64.9%;
--radius: 0.5rem;
}
.dark {
--background: 240 10% 3.9%;
--foreground: 0 0% 98%;
--card: 240 10% 3.9%;
--card-foreground: 0 0% 98%;
--popover: 240 10% 3.9%;
--popover-foreground: 0 0% 98%;
--primary: 327 66% 69%;
--primary-foreground: 337 65.5% 17.1%;
--secondary: 240 3.7% 15.9%;
--secondary-foreground: 0 0% 98%;
--muted: 240 3.7% 15.9%;
--muted-foreground: 240 5% 64.9%;
--accent: 240 3.7% 15.9%;
--accent-foreground: 0 0% 98%;
--destructive: 0 62.8% 30.6%;
--destructive-foreground: 0 85.7% 97.3%;
--border: 240 3.7% 15.9%;
--input: 240 3.7% 15.9%;
--ring: 240 4.9% 83.9%;
}
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/app/layout.tsx | TypeScript (TSX) | import type { Metadata, Viewport } from "next";
import { Geist, Geist_Mono } from "next/font/google";
import { cn } from "@acme/ui";
import { ThemeProvider, ThemeToggle } from "@acme/ui/theme";
import { Toaster } from "@acme/ui/toast";
import { TRPCReactProvider } from "~/trpc/react";
import "~/app/globals.css";
import { env } from "~/env";
export const metadata: Metadata = {
metadataBase: new URL(
env.VERCEL_ENV === "production"
? "https://turbo.t3.gg"
: "http://localhost:3000",
),
title: "Create T3 Turbo",
description: "Simple monorepo with shared backend for web & mobile apps",
openGraph: {
title: "Create T3 Turbo",
description: "Simple monorepo with shared backend for web & mobile apps",
url: "https://create-t3-turbo.vercel.app",
siteName: "Create T3 Turbo",
},
twitter: {
card: "summary_large_image",
site: "@jullerino",
creator: "@jullerino",
},
};
export const viewport: Viewport = {
themeColor: [
{ media: "(prefers-color-scheme: light)", color: "white" },
{ media: "(prefers-color-scheme: dark)", color: "black" },
],
};
const geistSans = Geist({
subsets: ["latin"],
variable: "--font-geist-sans",
});
const geistMono = Geist_Mono({
subsets: ["latin"],
variable: "--font-geist-mono",
});
export default function RootLayout(props: { children: React.ReactNode }) {
return (
<html lang="en" suppressHydrationWarning>
<body
className={cn(
"min-h-screen bg-background font-sans text-foreground antialiased",
geistSans.variable,
geistMono.variable,
)}
>
<ThemeProvider attribute="class" defaultTheme="system" enableSystem>
<TRPCReactProvider>{props.children}</TRPCReactProvider>
<div className="absolute bottom-4 right-4">
<ThemeToggle />
</div>
<Toaster />
</ThemeProvider>
</body>
</html>
);
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/app/page.tsx | TypeScript (TSX) | import { Suspense } from "react";
import { HydrateClient, prefetch, trpc } from "~/trpc/server";
import { AuthShowcase } from "./_components/auth-showcase";
import {
CreatePostForm,
PostCardSkeleton,
PostList,
} from "./_components/posts";
export default function HomePage() {
prefetch(trpc.post.all.queryOptions());
return (
<HydrateClient>
<main className="container h-screen py-16">
<div className="flex flex-col items-center justify-center gap-4">
<h1 className="text-5xl font-extrabold tracking-tight sm:text-[5rem]">
Create <span className="text-primary">T3</span> Turbo
</h1>
<AuthShowcase />
<CreatePostForm />
<div className="w-full max-w-2xl overflow-y-scroll">
<Suspense
fallback={
<div className="flex w-full flex-col gap-4">
<PostCardSkeleton />
<PostCardSkeleton />
<PostCardSkeleton />
</div>
}
>
<PostList />
</Suspense>
</div>
</div>
</main>
</HydrateClient>
);
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/auth/client.ts | TypeScript | import { createAuthClient } from "better-auth/react";
export const authClient = createAuthClient();
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/auth/server.ts | TypeScript | import "server-only";
import { cache } from "react";
import { headers } from "next/headers";
import { initAuth } from "@acme/auth";
import { env } from "~/env";
const baseUrl =
env.VERCEL_ENV === "production"
? `https://${env.VERCEL_PROJECT_PRODUCTION_URL}`
: env.VERCEL_ENV === "preview"
? `https://${env.VERCEL_URL}`
: "http://localhost:3000";
export const auth = initAuth({
baseUrl,
productionUrl: `https://${env.VERCEL_PROJECT_PRODUCTION_URL ?? "turbo.t3.gg"}`,
secret: env.AUTH_SECRET,
discordClientId: env.AUTH_DISCORD_ID,
discordClientSecret: env.AUTH_DISCORD_SECRET,
});
export const getSession = cache(async () =>
auth.api.getSession({ headers: await headers() }),
);
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/env.ts | TypeScript | import { createEnv } from "@t3-oss/env-nextjs";
import { vercel } from "@t3-oss/env-nextjs/presets-zod";
import { z } from "zod/v4";
import { authEnv } from "@acme/auth/env";
export const env = createEnv({
extends: [authEnv(), vercel()],
shared: {
NODE_ENV: z
.enum(["development", "production", "test"])
.default("development"),
},
/**
* Specify your server-side environment variables schema here.
* This way you can ensure the app isn't built with invalid env vars.
*/
server: {
POSTGRES_URL: z.string().url(),
},
/**
* Specify your client-side environment variables schema here.
* For them to be exposed to the client, prefix them with `NEXT_PUBLIC_`.
*/
client: {
// NEXT_PUBLIC_CLIENTVAR: z.string(),
},
/**
* Destructure all variables from `process.env` to make sure they aren't tree-shaken away.
*/
experimental__runtimeEnv: {
NODE_ENV: process.env.NODE_ENV,
// NEXT_PUBLIC_CLIENTVAR: process.env.NEXT_PUBLIC_CLIENTVAR,
},
skipValidation:
!!process.env.CI || process.env.npm_lifecycle_event === "lint",
});
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/trpc/query-client.ts | TypeScript | import {
defaultShouldDehydrateQuery,
QueryClient,
} from "@tanstack/react-query";
import SuperJSON from "superjson";
export const createQueryClient = () =>
new QueryClient({
defaultOptions: {
queries: {
// With SSR, we usually want to set some default staleTime
// above 0 to avoid refetching immediately on the client
staleTime: 30 * 1000,
},
dehydrate: {
serializeData: SuperJSON.serialize,
shouldDehydrateQuery: (query) =>
defaultShouldDehydrateQuery(query) ||
query.state.status === "pending",
shouldRedactErrors: () => {
// We should not catch Next.js server errors
// as that's how Next.js detects dynamic pages
// so we cannot redact them.
// Next.js also automatically redacts errors for us
// with better digests.
return false;
},
},
hydrate: {
deserializeData: SuperJSON.deserialize,
},
},
});
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/trpc/react.tsx | TypeScript (TSX) | "use client";
import type { QueryClient } from "@tanstack/react-query";
import { useState } from "react";
import { QueryClientProvider } from "@tanstack/react-query";
import {
createTRPCClient,
httpBatchStreamLink,
loggerLink,
} from "@trpc/client";
import { createTRPCContext } from "@trpc/tanstack-react-query";
import SuperJSON from "superjson";
import type { AppRouter } from "@acme/api";
import { env } from "~/env";
import { createQueryClient } from "./query-client";
let clientQueryClientSingleton: QueryClient | undefined = undefined;
const getQueryClient = () => {
if (typeof window === "undefined") {
// Server: always make a new query client
return createQueryClient();
} else {
// Browser: use singleton pattern to keep the same query client
return (clientQueryClientSingleton ??= createQueryClient());
}
};
export const { useTRPC, TRPCProvider } = createTRPCContext<AppRouter>();
export function TRPCReactProvider(props: { children: React.ReactNode }) {
const queryClient = getQueryClient();
const [trpcClient] = useState(() =>
createTRPCClient<AppRouter>({
links: [
loggerLink({
enabled: (op) =>
env.NODE_ENV === "development" ||
(op.direction === "down" && op.result instanceof Error),
}),
httpBatchStreamLink({
transformer: SuperJSON,
url: getBaseUrl() + "/api/trpc",
headers() {
const headers = new Headers();
headers.set("x-trpc-source", "nextjs-react");
return headers;
},
}),
],
}),
);
return (
<QueryClientProvider client={queryClient}>
<TRPCProvider trpcClient={trpcClient} queryClient={queryClient}>
{props.children}
</TRPCProvider>
</QueryClientProvider>
);
}
const getBaseUrl = () => {
if (typeof window !== "undefined") return window.location.origin;
if (env.VERCEL_URL) return `https://${env.VERCEL_URL}`;
// eslint-disable-next-line no-restricted-properties
return `http://localhost:${process.env.PORT ?? 3000}`;
};
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/src/trpc/server.tsx | TypeScript (TSX) | import type { TRPCQueryOptions } from "@trpc/tanstack-react-query";
import { cache } from "react";
import { headers } from "next/headers";
import { dehydrate, HydrationBoundary } from "@tanstack/react-query";
import { createTRPCOptionsProxy } from "@trpc/tanstack-react-query";
import type { AppRouter } from "@acme/api";
import { appRouter, createTRPCContext } from "@acme/api";
import { auth } from "~/auth/server";
import { createQueryClient } from "./query-client";
/**
* This wraps the `createTRPCContext` helper and provides the required context for the tRPC API when
* handling a tRPC call from a React Server Component.
*/
const createContext = cache(async () => {
const heads = new Headers(await headers());
heads.set("x-trpc-source", "rsc");
return createTRPCContext({
headers: heads,
auth,
});
});
const getQueryClient = cache(createQueryClient);
export const trpc = createTRPCOptionsProxy<AppRouter>({
router: appRouter,
ctx: createContext,
queryClient: getQueryClient,
});
export function HydrateClient(props: { children: React.ReactNode }) {
const queryClient = getQueryClient();
return (
<HydrationBoundary state={dehydrate(queryClient)}>
{props.children}
</HydrationBoundary>
);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export function prefetch<T extends ReturnType<TRPCQueryOptions<any>>>(
queryOptions: T,
) {
const queryClient = getQueryClient();
if (queryOptions.queryKey[1]?.type === "infinite") {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument, @typescript-eslint/no-explicit-any
void queryClient.prefetchInfiniteQuery(queryOptions as any);
} else {
void queryClient.prefetchQuery(queryOptions);
}
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
apps/nextjs/tailwind.config.ts | TypeScript | import type { Config } from "tailwindcss";
import { fontFamily } from "tailwindcss/defaultTheme";
import baseConfig from "@acme/tailwind-config/web";
export default {
// We need to append the path to the UI package to the content array so that
// those classes are included correctly.
content: [...baseConfig.content, "../../packages/ui/src/*.{ts,tsx}"],
presets: [baseConfig],
theme: {
extend: {
fontFamily: {
sans: ["var(--font-geist-sans)", ...fontFamily.sans],
mono: ["var(--font-geist-mono)", ...fontFamily.mono],
},
},
},
} satisfies Config;
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
tooling/eslint/base.js | JavaScript | /// <reference types="./types.d.ts" />
import * as path from "node:path";
import { includeIgnoreFile } from "@eslint/compat";
import eslint from "@eslint/js";
import importPlugin from "eslint-plugin-import";
import turboPlugin from "eslint-plugin-turbo";
import tseslint from "typescript-eslint";
/**
* All packages that leverage t3-env should use this rule
*/
export const restrictEnvAccess = tseslint.config(
{ ignores: ["**/env.ts"] },
{
files: ["**/*.js", "**/*.ts", "**/*.tsx"],
rules: {
"no-restricted-properties": [
"error",
{
object: "process",
property: "env",
message:
"Use `import { env } from '~/env'` instead to ensure validated types.",
},
],
"no-restricted-imports": [
"error",
{
name: "process",
importNames: ["env"],
message:
"Use `import { env } from '~/env'` instead to ensure validated types.",
},
],
},
},
);
export default tseslint.config(
// Ignore files not tracked by VCS and any config files
includeIgnoreFile(path.join(import.meta.dirname, "../../.gitignore")),
{ ignores: ["**/*.config.*"] },
{
files: ["**/*.js", "**/*.ts", "**/*.tsx"],
plugins: {
import: importPlugin,
turbo: turboPlugin,
},
extends: [
eslint.configs.recommended,
...tseslint.configs.recommended,
...tseslint.configs.recommendedTypeChecked,
...tseslint.configs.stylisticTypeChecked,
],
rules: {
...turboPlugin.configs.recommended.rules,
"@typescript-eslint/no-unused-vars": [
"error",
{ argsIgnorePattern: "^_", varsIgnorePattern: "^_" },
],
"@typescript-eslint/consistent-type-imports": [
"warn",
{ prefer: "type-imports", fixStyle: "separate-type-imports" },
],
"@typescript-eslint/no-misused-promises": [
2,
{ checksVoidReturn: { attributes: false } },
],
"@typescript-eslint/no-unnecessary-condition": [
"error",
{
allowConstantLoopConditions: true,
},
],
"@typescript-eslint/no-non-null-assertion": "error",
"import/consistent-type-specifier-style": ["error", "prefer-top-level"],
"no-restricted-imports": [
"error",
{
name: "zod",
message: "Use `import { z } from 'zod/v4'` instead to ensure v4.",
},
],
},
},
{
linterOptions: { reportUnusedDisableDirectives: true },
languageOptions: { parserOptions: { projectService: true } },
},
);
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
tooling/eslint/nextjs.js | JavaScript | import nextPlugin from "@next/eslint-plugin-next";
/** @type {Awaited<import('typescript-eslint').Config>} */
export default [
{
files: ["**/*.ts", "**/*.tsx"],
plugins: {
"@next/next": nextPlugin,
},
rules: {
...nextPlugin.configs.recommended.rules,
...nextPlugin.configs["core-web-vitals"].rules,
// TypeError: context.getAncestors is not a function
"@next/next/no-duplicate-head": "off",
},
},
];
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
tooling/eslint/react.js | JavaScript | import reactPlugin from "eslint-plugin-react";
import * as reactHooks from "eslint-plugin-react-hooks";
/** @type {Awaited<import('typescript-eslint').Config>} */
export default [
reactHooks.configs.recommended,
{
files: ["**/*.ts", "**/*.tsx"],
plugins: {
react: reactPlugin,
},
rules: {
...reactPlugin.configs["jsx-runtime"].rules,
"react-hooks/react-compiler": "error",
},
languageOptions: {
globals: {
React: "writable",
},
},
},
];
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
tooling/eslint/types.d.ts | TypeScript | /**
* Since the ecosystem hasn't fully migrated to ESLint's new FlatConfig system yet,
* we "need" to type some of the plugins manually :(
*/
declare module "eslint-plugin-import" {
import type { Linter, Rule } from "eslint";
export const configs: {
recommended: { rules: Linter.RulesRecord };
};
export const rules: Record<string, Rule.RuleModule>;
}
declare module "eslint-plugin-react" {
import type { Linter, Rule } from "eslint";
export const configs: {
recommended: { rules: Linter.RulesRecord };
all: { rules: Linter.RulesRecord };
"jsx-runtime": { rules: Linter.RulesRecord };
};
export const rules: Record<string, Rule.RuleModule>;
}
declare module "@next/eslint-plugin-next" {
import type { Linter, Rule } from "eslint";
export const configs: {
recommended: { rules: Linter.RulesRecord };
"core-web-vitals": { rules: Linter.RulesRecord };
};
export const rules: Record<string, Rule.RuleModule>;
}
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
tooling/prettier/index.js | JavaScript | import { fileURLToPath } from "node:url";
/** @typedef {import("prettier").Config} PrettierConfig */
/** @typedef {import("prettier-plugin-tailwindcss").PluginOptions} TailwindConfig */
/** @typedef {import("@ianvs/prettier-plugin-sort-imports").PluginConfig} SortImportsConfig */
/** @type { PrettierConfig | SortImportsConfig | TailwindConfig } */
const config = {
plugins: [
"@ianvs/prettier-plugin-sort-imports",
"prettier-plugin-tailwindcss",
],
tailwindConfig: fileURLToPath(
new URL("../../tooling/tailwind/web.ts", import.meta.url),
),
tailwindFunctions: ["cn", "cva"],
importOrder: [
"<TYPES>",
"^(react/(.*)$)|^(react$)|^(react-native(.*)$)",
"^(next/(.*)$)|^(next$)",
"^(expo(.*)$)|^(expo$)",
"<THIRD_PARTY_MODULES>",
"",
"<TYPES>^@acme",
"^@acme/(.*)$",
"",
"<TYPES>^[.|..|~]",
"^~/",
"^[../]",
"^[./]",
],
importOrderParserPlugins: ["typescript", "jsx", "decorators-legacy"],
importOrderTypeScriptVersion: "4.4.0",
overrides: [
{
files: "*.json.hbs",
options: {
parser: "json",
},
},
{
files: "*.js.hbs",
options: {
parser: "babel",
},
},
],
};
export default config;
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq | |
tooling/tailwind/base.ts | TypeScript | import type { Config } from "tailwindcss";
export default {
darkMode: ["class"],
content: ["src/**/*.{ts,tsx}"],
theme: {
extend: {
colors: {
border: "hsl(var(--border))",
input: "hsl(var(--input))",
ring: "hsl(var(--ring))",
background: "hsl(var(--background))",
foreground: "hsl(var(--foreground))",
primary: {
DEFAULT: "hsl(var(--primary))",
foreground: "hsl(var(--primary-foreground))",
},
secondary: {
DEFAULT: "hsl(var(--secondary))",
foreground: "hsl(var(--secondary-foreground))",
},
destructive: {
DEFAULT: "hsl(var(--destructive))",
foreground: "hsl(var(--destructive-foreground))",
},
muted: {
DEFAULT: "hsl(var(--muted))",
foreground: "hsl(var(--muted-foreground))",
},
accent: {
DEFAULT: "hsl(var(--accent))",
foreground: "hsl(var(--accent-foreground))",
},
popover: {
DEFAULT: "hsl(var(--popover))",
foreground: "hsl(var(--popover-foreground))",
},
card: {
DEFAULT: "hsl(var(--card))",
foreground: "hsl(var(--card-foreground))",
},
},
borderColor: {
DEFAULT: "hsl(var(--border))",
},
},
},
} satisfies Config;
| ymc9/my-t3-turbo | 0 | TypeScript | ymc9 | Yiming Cao | zenstackhq |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.