import os import re import random import math import yaml from typing import List, Dict, Tuple, Set, Any from collections import defaultdict, Counter import pandas as pd from tqdm import tqdm from feather import FeatherManager, similarity_score, calculate_confidence_score class GrammarRules: @staticmethod def apply_all_rules(text: str) -> str: if not text: return text return text.strip() class PatternExtractor: def __init__(self): pass def extract_keywords(self, text: str) -> List[str]: if not text: return [] full_text_normalized = re.sub(r'\s+', ' ', text.strip().lower()) words = re.findall(r'\b[a-zA-Z]+\b', full_text_normalized) result = [full_text_normalized] result.extend(words) return list(set(result)) def create_pattern(self, user_input: str) -> str: if not user_input: return "" pattern = re.sub(r'\s+', ' ', user_input.strip().lower()) pattern = f" {pattern} " return pattern def calculate_pattern_similarity(self, pattern1: str, pattern2: str) -> float: return similarity_score(pattern1.strip(), pattern2.strip()) class MiniModelTrainer: def __init__(self, feather_manager: FeatherManager): self.feather_manager = feather_manager self.pattern_extractor = PatternExtractor() self.grammar_rules = GrammarRules() def train_mini_model(self, training_pairs: List[Tuple[str, str]], confidence_threshold: float = 0.1) -> Dict[str, Any]: if not training_pairs or len(training_pairs) < 2: return None keyword_patterns = [] responses = [] pattern_confidences = [] all_keywords = [] for user_input, ai_response in training_pairs: processed_response = ai_response.strip() # Get both the pattern and keywords pattern = self.pattern_extractor.create_pattern(user_input) keywords = self.pattern_extractor.extract_keywords(user_input) all_keywords.extend(keywords) # Add the main pattern keyword_patterns.append(pattern) responses.append(processed_response) individual_confidence = min(0.9, len(training_pairs) / 20.0) pattern_confidences.append(individual_confidence) if not keyword_patterns: return None base_confidence = min(0.9, len(training_pairs) / 20.0) keyword_counter = Counter(all_keywords) top_keywords = [word for word, count in keyword_counter.most_common(10)] mini_model = { 'patterns': keyword_patterns, 'responses': responses, 'pattern_confidences': pattern_confidences, 'confidence': base_confidence, 'grammar_rules': [], 'keywords': top_keywords, 'training_samples': len(training_pairs) } return mini_model class AgGPTTrainer: def __init__(self, models_dir: str = "models"): self.feather_manager = FeatherManager(models_dir) self.mini_trainer = MiniModelTrainer(self.feather_manager) self.target_size_mb = 5 self.estimated_size_per_pair = 1000 self.chunk_size = (self.target_size_mb * 1024 * 1024) // self.estimated_size_per_pair self.readable_weights_dir = "readable_weights" os.makedirs(self.readable_weights_dir, exist_ok=True) def save_model_as_yaml(self, model_data: Dict[str, Any], model_id: int): try: filename = f"AgGPT_Model_{model_id:04d}.yaml" filepath = os.path.join(self.readable_weights_dir, filename) print(f"Creating YAML data for model {model_id}...") yaml_data = { 'model_info': { 'model_id': model_id, 'confidence': model_data.get('confidence', 0.5), 'training_samples': model_data.get('training_samples', 0), 'keywords': model_data.get('keywords', []) }, 'patterns_and_responses': [] } patterns = model_data.get('patterns', []) responses = model_data.get('responses', []) weights = model_data.get('weights', []) print(f"Processing {len(patterns)} patterns...") for i in range(len(patterns)): entry = { 'pattern': patterns[i] if i < len(patterns) else '', 'response': responses[i] if i < len(responses) else '', 'weight': weights[i] if i < len(weights) else 1.0 } yaml_data['patterns_and_responses'].append(entry) print(f"Writing YAML to {filepath}...") with open(filepath, 'w', encoding='utf-8') as f: yaml.dump(yaml_data, f, default_flow_style=False, allow_unicode=True, indent=2) print(f"Saved readable model: {filename}") except Exception as e: print(f"Error in save_model_as_yaml: {e}") import traceback traceback.print_exc() def load_training_data(self, file_path: str) -> List[Tuple[str, str]]: training_pairs = [] with open(file_path, 'r', encoding='utf-8') as f: content = f.read() conversations = content.split('') print(f"Processing {len(conversations)} conversation chunks...") for conversation in tqdm(conversations, desc="Parsing conversations"): conversation = conversation.strip() if not conversation: continue user_match = re.search(r'user:\s*(.*?)(?=\n|\nai:|$)', conversation, re.DOTALL) ai_match = re.search(r'ai:\s*(.*?)$', conversation, re.DOTALL) if user_match and ai_match: user_input = user_match.group(1).strip() ai_response = ai_match.group(1).strip() user_input = re.sub(r'', '', user_input).strip() ai_response = re.sub(r'', '', ai_response).strip() if user_input and ai_response and len(user_input) > 0 and len(ai_response) > 0: training_pairs.append((user_input, ai_response)) print(f"Extracted {len(training_pairs)} training pairs") return training_pairs def create_training_chunks(self, training_pairs: List[Tuple[str, str]]) -> List[List[Tuple[str, str]]]: shuffled_pairs = training_pairs.copy() random.shuffle(shuffled_pairs) chunks = [] total_pairs = len(shuffled_pairs) for i in range(0, total_pairs, self.chunk_size): chunk = shuffled_pairs[i:i + self.chunk_size] if len(chunk) >= 5: chunks.append(chunk) print(f"Created {len(chunks)} training chunks (target: {self.target_size_mb}MB each)") return chunks def train_multiple_corpora(self, training_files: List[str] = None): """Train on multiple corpora files sequentially""" if training_files is None: # Automatically find all .txt files in training_corpora directory training_corpora_dir = "training_corpora" if os.path.exists(training_corpora_dir): training_files = [] for filename in sorted(os.listdir(training_corpora_dir)): if filename.endswith('.txt'): file_path = os.path.join(training_corpora_dir, filename) training_files.append(file_path) print(f"Auto-discovered {len(training_files)} training files in {training_corpora_dir}/") else: # Fallback to old default for backward compatibility training_files = ["training_data/corpora.txt", "training_data/corpora2.txt"] print("Using fallback training files (training_corpora/ not found)") print("Starting AgGPT-20 Multi-Corpora Training with Scalable Feather Architecture") print("=" * 70) cleared_count = self.feather_manager.clear_all_models() if cleared_count > 0: print(f"Cleared {cleared_count} existing models") all_trained_models = [] total_model_id = 1 for file_idx, training_file in enumerate(training_files, 1): print(f"\n--- Training on file {file_idx}/{len(training_files)}: {training_file} ---") if not os.path.exists(training_file): print(f"Warning: Training file {training_file} does not exist. Skipping...") continue if os.path.getsize(training_file) == 0: print(f"Warning: Training file {training_file} is empty. Skipping...") continue print(f"Loading training data from {training_file}...") training_pairs = self.load_training_data(training_file) if not training_pairs: print(f"No training data found in {training_file}. Skipping...") continue print(f"Creating training chunks for {training_file}...") training_chunks = self.create_training_chunks(training_pairs) print(f"Training mini-models from {training_file}...") file_trained_models = [] progress_bar = tqdm(training_chunks, desc=f"Training from {os.path.basename(training_file)}") for chunk_idx, chunk in enumerate(progress_bar): print(f"\nProcessing chunk {chunk_idx + 1}/{len(training_chunks)}") mini_model = self.mini_trainer.train_mini_model(chunk) if mini_model: file_trained_models.append(mini_model) all_trained_models.append(mini_model) print(f"Saving model {total_model_id}...") self.feather_manager.save_mini_model(mini_model, total_model_id) if total_model_id == 1: print("Saving first model as YAML...") try: self.save_model_as_yaml(mini_model, total_model_id) print("YAML saved successfully") except Exception as e: print(f"Error saving YAML: {e}") total_model_id += 1 print(f"Model {total_model_id - 1} completed") try: progress_bar.set_postfix({ 'File Models': len(file_trained_models), 'Total Models': len(all_trained_models), 'Confidence': f"{mini_model['confidence']:.3f}" }) except Exception as e: print(f"Error updating progress bar: {e}") print(f"Completed training on {training_file}: {len(file_trained_models)} mini-models created") print(f"Total models so far: {len(all_trained_models)}") print(f"\n--- Multi-Corpora Training Complete ---") final_count = self.feather_manager.get_model_count() print(f"Final model count: {final_count}") print(f"Trained on {len([f for f in training_files if os.path.exists(f) and os.path.getsize(f) > 0])} corpora files") print("=" * 70) def train(self, training_file: str = "training_data/corpora.txt"): print("Starting AgGPT-20 Training with Scalable Feather Architecture") print("=" * 60) cleared_count = self.feather_manager.clear_all_models() if cleared_count > 0: print(f"Cleared {cleared_count} existing models") print("Loading training data...") training_pairs = self.load_training_data(training_file) if not training_pairs: print("No training data found!") return print("Creating training chunks...") training_chunks = self.create_training_chunks(training_pairs) print("Training mini-models...") trained_models = [] model_id = 1 progress_bar = tqdm(training_chunks, desc="Training mini-models") for chunk in progress_bar: mini_model = self.mini_trainer.train_mini_model(chunk) if mini_model: trained_models.append(mini_model) self.feather_manager.save_mini_model(mini_model, model_id) if model_id == 1: self.save_model_as_yaml(mini_model, model_id) model_id += 1 progress_bar.set_postfix({ 'Models': len(trained_models), 'Confidence': f"{mini_model['confidence']:.3f}" }) print(f"Trained {len(trained_models)} mini-models") final_count = self.feather_manager.get_model_count() print(f"Training complete! Final model count: {final_count}") print("=" * 60) def main(): print("AgGPT-20 Scalable Feather Architecture Trainer") print("=" * 50) # Automatically find all .txt files in training_corpora directory training_corpora_dir = "training_corpora" training_files = [] if os.path.exists(training_corpora_dir): print(f"Scanning {training_corpora_dir} directory for training files...") for filename in sorted(os.listdir(training_corpora_dir)): if filename.endswith('.txt'): file_path = os.path.join(training_corpora_dir, filename) training_files.append(file_path) print(f"Found {len(training_files)} training files:") for file_path in training_files: print(f" - {file_path}") else: print(f"Warning: {training_corpora_dir} directory not found!") training_files = None trainer = AgGPTTrainer() try: trainer.train_multiple_corpora(training_files=training_files) except KeyboardInterrupt: print("\nTraining interrupted by user") except Exception as e: print(f"Training failed: {e}") import traceback traceback.print_exc() if __name__ == "__main__": main()