# DReamMachine Configuration File # All models, prompts, and settings are configurable here # Model Configuration models: # Dreamer models - High creativity, high temperature dreamers: - model_id: "mistralai/Mixtral-8x7B-Instruct-v0.1" temperature: 0.9 max_tokens: 1000 - model_id: "meta-llama/Meta-Llama-3-8B-Instruct" temperature: 0.85 max_tokens: 1000 - model_id: "mistralai/Mistral-7B-Instruct-v0.3" temperature: 0.9 max_tokens: 1000 # Deep Thinker - Analytical reasoning deep_thinker: model_id: "meta-llama/Meta-Llama-3-70B-Instruct" temperature: 0.3 max_tokens: 1500 # Curator - Evaluation and scoring curator: model_id: "meta-llama/Meta-Llama-3-70B-Instruct" temperature: 0.2 max_tokens: 800 # Writer - Story/pitch creation writer: model_id: "mistralai/Mistral-7B-Instruct-v0.2" temperature: 0.6 max_tokens: 1200 # Logger - Technical extraction logger: model_id: "mistralai/Mistral-7B-Instruct-v0.2" temperature: 0.4 max_tokens: 800 # Narrator - Final presentation narrator: model_id: "mistralai/Mistral-7B-Instruct-v0.2" temperature: 0.5 max_tokens: 1000 # Hugging Face Configuration huggingface: use_inference_api: true dataset_name: "dreammachine-logs" dataset_private: true use_zero_gpu: false # Disabled - using Inference API (no local GPU needed) # Orchestration Settings orchestration: max_iterations: 1000 # Maximum dream rounds run_interval: 3600 # Seconds between rounds (1 hour) batch_mode: true max_runtime: 21600 # Maximum runtime in seconds (6 hours) auto_advance_threshold: feasibility_min: 7 originality_min: 5 # Scoring Configuration scoring: metrics: - originality # 1-10 - feasibility # 1-10 - global_impact # 1-10 - narrative_coherence # 1-10 reforge_criteria: feasibility_threshold: 7 originality_threshold: 5 # Constraint System (for A.1 Setup) constraints: physics: "Must use current or near-future physics (within 50 years)" ethics: "Must solve a global humanitarian problem" feasibility: "Must be achievable with existing materials or near-term developments" scope: "Must have measurable positive impact on at least 1 million people" # Logging Settings logging: output_format: "json" chunk_size: 100 # Entries per file log_directory: "./logs" save_to_hf_dataset: true verbose: true # Prompt Detail Level (configurable) prompt_detail_level: "full" # Options: "simple", "moderate", "full"