#!/usr/bin/env python3
"""
Test runner for AI reporting functionality.

This script runs all AI reporting tests with proper setup and clean output.
Tests have been moved to tests/ai_report/ for better organization.
"""

import os
import sys
import django
import subprocess
from django.conf import settings
from django.test.utils import get_runner

def setup_django():
    """Set up Django environment for testing"""
    # Add the parent directory to Python path
    current_dir = os.path.dirname(os.path.abspath(__file__))
    project_root = os.path.dirname(os.path.dirname(current_dir))
    sys.path.insert(0, project_root)
    
    # Set Django settings
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'iching.settings')
    os.environ.setdefault('DJANGO_ENV', 'development')
    
    django.setup()

def run_django_test(test_module, quiet=True):
    """Run a single test module using Django's manage.py test command"""
    # Use the same approach as the working manual command
    cmd = f'DJANGO_ENV="development" python manage.py test {test_module} --verbosity={1 if quiet else 2}'
    
    # Get project root directory
    current_dir = os.path.dirname(os.path.abspath(__file__))
    project_root = os.path.dirname(os.path.dirname(current_dir))
    
    try:
        # For quiet mode, stream output in real-time to show dots
        if quiet:
            print(f"🔄 Running {test_module}...")
            result = subprocess.run(
                cmd,
                shell=True,
                text=True, 
                cwd=project_root
            )
            
            # For quiet mode, we can't capture detailed parsing, so just check return code
            success = result.returncode == 0
            return {
                'success': success,
                'test_count': 0,  # Will be filled by summary parsing
                'failures': 0 if success else 1,
                'errors': 0,
                'output': "",
                'stderr': ""
            }
        else:
            # For verbose mode, capture output for detailed parsing
            result = subprocess.run(
                cmd,
                shell=True,
                capture_output=True, 
                text=True, 
                cwd=project_root
            )
            
            # Parse the output to get test count and results
            output_lines = result.stdout.split('\n')
            error_lines = result.stderr.split('\n') if result.stderr else []
            
            # Find test results
            test_count = 0
            failures = 0
            errors = 0
            
            for line in output_lines:
                # Look for "Found X test(s)" or "Ran X tests"
                if 'Found ' in line and ' test' in line:
                    # Extract from "Found X test(s)."
                    parts = line.split()
                    for i, part in enumerate(parts):
                        if part.isdigit() and i > 0:
                            test_count = int(part)
                            break
                elif 'Ran ' in line and ' test' in line:
                    # Extract test count from "Ran X tests in Y.Ys"
                    parts = line.split()
                    if len(parts) >= 2 and parts[1].isdigit():
                        test_count = int(parts[1])
                elif line.startswith('FAILED ('):
                    # Parse "FAILED (failures=X, errors=Y)" or "FAILED (failures=X)"
                    if 'failures=' in line:
                        fail_part = line.split('failures=')[1].split(',')[0].split(')')[0]
                        failures = int(fail_part)
                    if 'errors=' in line:
                        err_part = line.split('errors=')[1].split(',')[0].split(')')[0]
                        errors = int(err_part)
            
            success = result.returncode == 0
            return {
                'success': success,
                'test_count': test_count,
                'failures': failures,
                'errors': errors,
                'output': result.stdout,
                'stderr': result.stderr
            }
        
    except Exception as e:
        return {
            'success': False,
            'test_count': 0,
            'failures': 1,
            'errors': 0,
            'output': f"Error running test: {str(e)}",
            'stderr': str(e)
        }

def run_tests(test_type='all', verbose=False):
    """Run AI reporting tests with clean output and summary"""
    
    # All test modules
    test_modules = {
        'debug': [
            ('tests.ai_report.test_debug_admin_setup', 'Debug Admin Setup Tests')
        ],
        'emails': [
            ('tests.ai_report.test_reporting_emails', 'Email Notification Tests')
        ],
        'admin': [
            ('tests.ai_report.test_reporting_admin', 'Admin Interface Tests')
        ],
        'api': [
            ('tests.ai_report.test_reporting_api', 'API Endpoint Tests')
        ],
        'integration': [
            ('tests.ai_report.test_reporting_integration', 'Integration Tests')
        ],
        'models': [
            ('tests.ai_report.test_reporting_models', 'Model Tests')
        ],
        'all': [
            ('tests.ai_report.test_debug_admin_setup', 'Debug Admin Setup Tests'),
            ('tests.ai_report.test_reporting_emails', 'Email Notification Tests'),
            ('tests.ai_report.test_reporting_admin', 'Admin Interface Tests'),
            ('tests.ai_report.test_reporting_api', 'API Endpoint Tests'),
            ('tests.ai_report.test_reporting_integration', 'Integration Tests'),
            ('tests.ai_report.test_reporting_models', 'Model Tests'),
        ]
    }
    
    if test_type not in test_modules:
        print(f"Invalid test type: {test_type}")
        print("Valid options: debug, emails, admin, api, integration, models, all")
        return False
    
    modules_to_test = test_modules[test_type]
    
    print(f"\n{'='*60}")
    print(f"Running {test_type.title()} AI Reporting Tests")
    print(f"{'='*60}")
    
    results = []
    total_failures = 0
    total_errors = 0
    
    for module_name, display_name in modules_to_test:        
        result = run_django_test(module_name, quiet=not verbose)
        results.append((display_name, result))
        
        total_failures += result['failures']
        total_errors += result['errors']
        
        # Show immediate result
        if result['success']:
            print(f"✅ {display_name}: passed")
        else:
            print(f"❌ {display_name}: {result['failures']} failures, {result['errors']} errors")
            
        # Show verbose output if requested or if there are failures
        if verbose or not result['success']:
            if result['stderr']:
                print("STDERR:", result['stderr'])
            
            # Show only the key parts of output for failures
            if not result['success'] and not verbose:
                output_lines = result['output'].split('\n')
                in_failure_section = False
                for line in output_lines:
                    if 'FAIL:' in line or 'ERROR:' in line:
                        in_failure_section = True
                    elif line.startswith('======') and in_failure_section:
                        in_failure_section = False
                    elif line.startswith('Ran ') or line.startswith('FAILED'):
                        in_failure_section = False
                        print(line)
                        
                    if in_failure_section:
                        print(line)
    
    # Print summary
    print(f"\n{'='*60}")
    print("TEST SUMMARY")
    print(f"{'='*60}")
    
    for display_name, result in results:
        status = "✅ PASS" if result['success'] else "❌ FAIL"
        print(f"{status} {display_name}")
    
    print(f"\n📊 TOTAL: {len(modules_to_test)} test modules")
    
    if total_failures == 0 and total_errors == 0:
        print("🎉 ALL TESTS PASSED!")
        return True
    else:
        print(f"💥 FAILURES: {total_failures}, ERRORS: {total_errors}")
        return False

def print_help():
    """Print usage help"""
    print("""
AI Reporting Test Runner

Usage:
    python tests/ai_report/run_ai_report_tests.py [test_type] [options]

Test Types:
    all         - Run all AI reporting tests (default)
    debug       - Run only debug admin setup tests
    emails      - Run only email notification tests  
    admin       - Run only admin interface tests
    api         - Run only API endpoint tests
    integration - Run only integration tests
    models      - Run only model tests

Options:
    --verbose   - Show detailed test output
    --help      - Show this help message

Examples:
    python tests/ai_report/run_ai_report_tests.py
    python tests/ai_report/run_ai_report_tests.py all
    python tests/ai_report/run_ai_report_tests.py emails --verbose
    python tests/ai_report/run_ai_report_tests.py debug

Alternative: Use Django's test command directly:
    python manage.py test tests.ai_report.test_reporting_emails
    python manage.py test tests.ai_report.test_reporting_api  
    python manage.py test tests.ai_report.test_debug_admin_setup
    python manage.py test tests.ai_report
""")

def main():
    """Main function"""
    setup_django()
    
    # Parse arguments
    args = sys.argv[1:]
    test_type = 'all'
    verbose = False
    
    for arg in args:
        if arg == '--help':
            print_help()
            sys.exit(0)
        elif arg == '--verbose':
            verbose = True
        elif arg.startswith('--'):
            print(f"Unknown option: {arg}")
            print_help()
            sys.exit(1)
        else:
            test_type = arg
    
    success = run_tests(test_type, verbose)
    sys.exit(0 if success else 1)

if __name__ == "__main__":
    print("AI Reporting Test Runner")
    print("Usage:")
    print("  python tests/ai_report/run_ai_report_tests.py           # Run all tests")
    print("  python tests/ai_report/run_ai_report_tests.py debug     # Run debug tests")
    print("  python tests/ai_report/run_ai_report_tests.py emails    # Run email tests")
    print("  python tests/ai_report/run_ai_report_tests.py admin     # Run admin tests")
    print("  python tests/ai_report/run_ai_report_tests.py api       # Run API tests")
    print("  python tests/ai_report/run_ai_report_tests.py models    # Run model tests")
    print("  python tests/ai_report/run_ai_report_tests.py integration # Run integration tests")
    print()
    
    main() 