File size: 4,062 Bytes
b620472
0b94fac
4c5b5e2
8a6fb45
 
 
 
175adcc
503d4ac
9d54dfd
54639e8
fce9bf7
5f6fd2a
4c5b5e2
 
 
 
 
 
 
 
 
503d4ac
4c5b5e2
 
 
 
 
 
 
 
 
 
 
 
 
 
3bba70c
4c5b5e2
 
 
 
503d4ac
4c5b5e2
503d4ac
4c5b5e2
503d4ac
4c5b5e2
 
 
503d4ac
4c5b5e2
503d4ac
 
54639e8
 
 
 
175adcc
 
 
 
 
 
 
 
 
 
 
 
503d4ac
175adcc
4c5b5e2
 
54639e8
fce9bf7
54639e8
4c5b5e2
 
54639e8
 
 
 
 
 
 
455e3bd
1c821b0
455e3bd
0b00536
4c5b5e2
54639e8
 
 
 
0b00536
4c5b5e2
54639e8
 
 
0b00536
 
 
503d4ac
 
 
0b00536
 
 
503d4ac
0b00536
 
503d4ac
 
 
54639e8
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import asyncio
import argparse
import sys
import os

# Add project root to sys.path to allow 'from src.config import ...'
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import src.logger_config  # Configure logging early
from src.logger_config import logger
from src.config import get_config_value, configure_job_environment
from pipeline_processor import download_all_library_videos
from google_src.gcs_utils import list_gcs_files
from src.workflows import run_content_strategy_workflow, run_plain_video_workflow

def pre_check():
    """Validate required environment variables defined in video_generate.env."""
    
    # Path to video_generate.env (in root, one level up from src/)
    import os 
    env_path = os.path.join(os.path.dirname(__file__), '..', 'video_generate.env')
    
    if not os.path.exists(env_path):
        logger.warning(f"video_generate.env not found at {env_path}. Skipping dynamic validation.")
        return

    missing = []
    
    try:
        with open(env_path, 'r') as f:
            for line in f:
                line = line.strip()
                if not line or line.startswith('#'):
                    continue
                
                # Extract key (before the first =)
                if '=' in line:
                    key = line.split('=')[0].strip()
                    # Use get_config_value (which now falls back to get_config_value)
                    if key and not get_config_value(key):
                        missing.append(key)
                        
    except Exception as e:
        logger.error(f"Failed to read video_generate.env: {e}")
        sys.exit(1)

    except Exception as e:
        logger.error(f"Failed to read video_generate.env: {e}")
        sys.exit(1)

    if missing:
        logger.warning(f"Missing required environment variables: {', '.join(missing)}")
        pass

    logger.info("✅ Environment pre-check passed")

async def main():
    """Parse command-line arguments and orchestrate the automation process."""

    # Add argument parsing
    parser = argparse.ArgumentParser(description="AI Content Automation Pipeline")
    parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose logging (DEBUG level)")
    # Allow other args for downstream consumption if necessary, or parse known args
    args, unknown = parser.parse_known_args()
    
    if args.verbose:
        os.environ["VERBOSE"] = "true"
        # Reload logger configuration to apply DEBUG level
        import src.logger_config
        import importlib
        importlib.reload(src.logger_config)
        logger.info("Verbose mode enabled (DEBUG level)")

    # Run pre-check
    pre_check()

    commit = not get_config_value("test_automation")

    job_index = get_config_value("JOB_INDEX")
    total_jobs = get_config_value("TOTAL_JOBS")

    configure_job_environment(job_index)

    # Pre-download assets
    await download_all_library_videos()

    # Determine execution mode
    ai_generation = get_config_value("ai_generation")
    list_gcs_files()
    if ai_generation:
        generated_results = await run_content_strategy_workflow(
            commit=commit, 
            job_index=job_index, 
            total_jobs=total_jobs
        )
    else:
        generated_results = await run_plain_video_workflow(
            commit=commit, 
            job_index=job_index, 
            total_jobs=total_jobs
        )
    
    # Print Summary Table
    if generated_results:
        logger.info("\n" + "="*120)
        logger.info(f"{'LOCAL PATH':<50} | {'GCS FILENAME':<30} | {'DRIVE/PUBLIC URL'}")
        logger.info("-" * 120)
        for res in generated_results:
            local = str(res['local_path'])
            if len(local) > 47: local = "..." + local[-44:]

            gcs = str(res['gcs_path'])
            if len(gcs) > 27: gcs = "..." + gcs[-24:]

            logger.info(f"{local:<50} | {gcs:<30} | {res['drive_path']}")
        logger.info("="*120 + "\n")

if __name__ == "__main__":
    asyncio.run(main())