"""
Comprehensive test suite for AdminAIAnalysis functionality.

This test suite covers:

1. **Model Tests** (AdminAIAnalysisModelTestCase):
   - Model creation and field validation
   - Default values and string representation
   - Provider choices and status updates
   - Model field constraints

2. **Permission Tests** (AdminAIAnalysisPermissionTestCase):
   - Permission-based access control for different user types
   - Superuser access (with/without explicit permission)
   - Staff user access (with/without permission)
   - Regular user access (with/without permission)
   - URL-level access control testing

3. **Admin Interface Tests** (AdminAIAnalysisAdminTestCase):
   - List display configuration
   - Search and filter functionality
   - Readonly fields configuration
   - Fieldsets organization
   - AJAX model selection endpoint
   - Context data for dynamic model selection
   - User assignment on model save

4. **Form Tests** (AdminAIAnalysisFormTestCase):
   - Form initialization for new and existing instances
   - Dynamic model choice preservation
   - CSS class assignment for JavaScript targeting
   - Form validation (valid and invalid data)
   - Required field validation

5. **LLM Inference Tests** (AdminLLMInferenceTestCase):
   - Successful inference with Groq provider
   - Successful inference with OpenAI provider
   - Error handling for API failures
   - Unsupported provider handling
   - Status updates during inference

6. **Integration Tests** (AIProviderConfigIntegrationTestCase):
   - Model availability retrieval for different providers
   - Provider validation
   - Model list structure validation

The test suite uses mocking to avoid actual LLM API calls and focuses on
testing the business logic, permission system, and admin interface functionality
without external dependencies.

Total: 32 tests covering all aspects of the AdminAIAnalysis feature.
"""

import json
from unittest.mock import patch, Mock
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.contrib.admin.sites import AdminSite
from django.http import JsonResponse

from ai.models import AdminAIAnalysis, AIProviderConfig
from ai.admin import AdminAIAnalysisAdmin, AdminAIAnalysisAdminForm
from ai.utils.admin_llm import run_admin_llm_inference

User = get_user_model()


class AdminAIAnalysisModelTestCase(TestCase):
    """Test cases for AdminAIAnalysis model functionality."""

    def setUp(self):
        self.user = User.objects.create_user(
            phone='1234567890',
            email='test@example.com',
            password='testpass123',
            is_staff=True
        )

    def test_model_creation(self):
        """Test basic model creation."""
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='groq',
            model='llama-3.3-70b-versatile',
            created_by=self.user
        )
        
        self.assertEqual(analysis.title, 'Test Analysis')
        self.assertEqual(analysis.prompt, 'Test prompt')
        self.assertEqual(analysis.provider, 'groq')
        self.assertEqual(analysis.model, 'llama-3.3-70b-versatile')
        self.assertEqual(analysis.status, 'pending')
        self.assertEqual(analysis.created_by, self.user)
        self.assertIsNotNone(analysis.created_at)
        self.assertIsNotNone(analysis.updated_at)

    def test_model_string_representation(self):
        """Test model __str__ method."""
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='groq',
            model='llama-3.3-70b-versatile',
            created_by=self.user
        )
        expected = f"Test Analysis"
        self.assertEqual(str(analysis), expected)

    def test_model_defaults(self):
        """Test model field defaults."""
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='groq',
            model='llama-3.3-70b-versatile',
            created_by=self.user
        )
        
        self.assertEqual(analysis.status, 'pending')
        self.assertEqual(analysis.meta, {})
        self.assertIsNone(analysis.response)

    def test_provider_choices(self):
        """Test that only valid providers are accepted."""
        # Valid providers should work
        for provider in ['groq', 'openai']:
            analysis = AdminAIAnalysis(
                title='Test',
                prompt='Test',
                provider=provider,
                model='test-model',
                created_by=self.user
            )
            analysis.full_clean()  # Should not raise ValidationError

    def test_status_updates(self):
        """Test status field updates."""
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='groq',
            model='llama-3.3-70b-versatile',
            created_by=self.user
        )
        
        # Test status updates
        analysis.status = 'completed'
        analysis.response = 'Test response'
        analysis.save()
        
        analysis.refresh_from_db()
        self.assertEqual(analysis.status, 'completed')
        self.assertEqual(analysis.response, 'Test response')


class AdminAIAnalysisPermissionTestCase(TestCase):
    """Test cases for permission-based access control."""

    def setUp(self):
        # Create users
        self.admin_user = User.objects.create_user(
            phone='admin123',
            email='admin@example.com',
            password='testpass123',
            is_staff=True,
            is_superuser=True
        )
        
        self.staff_user = User.objects.create_user(
            phone='staff123',
            email='staff@example.com',
            password='testpass123',
            is_staff=True
        )
        
        self.regular_user = User.objects.create_user(
            phone='user123',
            email='user@example.com',
            password='testpass123',
            is_staff=False
        )
        
        # Get the permission
        content_type = ContentType.objects.get_for_model(AdminAIAnalysis)
        try:
            self.permission = Permission.objects.get(
                codename='can_send_admin_llm_analysis',
                content_type=content_type,
            )
        except Permission.DoesNotExist:
            self.permission = Permission.objects.create(
                codename='can_send_admin_llm_analysis',
                name='Can send admin LLM analysis',
                content_type=content_type,
            )
        
        # Create admin site and admin instance
        self.site = AdminSite()
        self.admin_instance = AdminAIAnalysisAdmin(AdminAIAnalysis, self.site)
        
        self.client = Client()

    def test_superuser_has_access(self):
        """Test that superuser has access even without explicit permission."""
        # Superuser should have access
        request = Mock()
        request.user = self.admin_user
        
        self.assertTrue(self.admin_instance.has_module_permission(request))
        self.assertTrue(self.admin_instance.has_view_permission(request))
        self.assertTrue(self.admin_instance.has_add_permission(request))
        self.assertTrue(self.admin_instance.has_change_permission(request))

    def test_staff_without_permission_no_access(self):
        """Test that staff user without permission has no access."""
        request = Mock()
        request.user = self.staff_user
        
        self.assertFalse(self.admin_instance.has_module_permission(request))
        self.assertFalse(self.admin_instance.has_view_permission(request))
        self.assertFalse(self.admin_instance.has_add_permission(request))
        self.assertFalse(self.admin_instance.has_change_permission(request))

    def test_staff_with_permission_has_access(self):
        """Test that staff user with permission has access."""
        # Assign permission to staff user
        self.staff_user.user_permissions.add(self.permission)
        
        request = Mock()
        request.user = self.staff_user
        
        self.assertTrue(self.admin_instance.has_module_permission(request))
        self.assertTrue(self.admin_instance.has_view_permission(request))
        self.assertTrue(self.admin_instance.has_add_permission(request))
        self.assertTrue(self.admin_instance.has_change_permission(request))

    def test_regular_user_no_access(self):
        """Test that regular user has no access without permission."""
        # Don't assign permission to regular user
        request = Mock()
        request.user = self.regular_user
        
        # Without the permission, should not have access
        self.assertFalse(self.admin_instance.has_module_permission(request))

    def test_regular_user_with_permission_has_access(self):
        """Test that regular user with permission has access to the module."""
        # Assign permission to regular user
        self.regular_user.user_permissions.add(self.permission)
        
        request = Mock()
        request.user = self.regular_user
        
        # With the permission, should have access to the module
        # (Note: The admin interface itself will still require staff status for full functionality)
        self.assertTrue(self.admin_instance.has_module_permission(request))

    def test_admin_url_access_with_permission(self):
        """Test admin URL access with proper permissions."""
        # Login with staff user who has permission
        self.staff_user.user_permissions.add(self.permission)
        self.client.login(phone='staff123', password='testpass123')
        
        # Should be able to access admin list view
        response = self.client.get('/admin/ai/adminaianalysis/')
        self.assertEqual(response.status_code, 200)

    def test_admin_url_access_without_permission(self):
        """Test admin URL access without permission is denied."""
        # Login with staff user without permission
        self.client.login(phone='staff123', password='testpass123')
        
        # Should get permission denied (403)
        response = self.client.get('/admin/ai/adminaianalysis/')
        self.assertEqual(response.status_code, 403)


class AdminAIAnalysisAdminTestCase(TestCase):
    """Test cases for the Django admin interface."""

    def setUp(self):
        self.user = User.objects.create_user(
            phone='admin123',
            email='admin@example.com',
            password='testpass123',
            is_staff=True,
            is_superuser=True
        )
        
        # Create admin site and admin instance
        self.site = AdminSite()
        self.admin_instance = AdminAIAnalysisAdmin(AdminAIAnalysis, self.site)
        
        self.client = Client()
        self.client.login(phone='admin123', password='testpass123')

    def test_list_display_fields(self):
        """Test that list display shows correct fields."""
        expected_fields = ('title', 'provider', 'model', 'status', 'created_by', 'created_at')
        self.assertEqual(self.admin_instance.list_display, expected_fields)

    def test_search_fields(self):
        """Test search functionality."""
        expected_fields = ('title', 'prompt', 'response')
        self.assertEqual(self.admin_instance.search_fields, expected_fields)

    def test_list_filter_fields(self):
        """Test list filter functionality."""
        expected_fields = ('provider', 'model', 'status', 'created_at')
        self.assertEqual(self.admin_instance.list_filter, expected_fields)

    def test_readonly_fields(self):
        """Test readonly fields configuration."""
        expected_fields = ('response', 'status', 'created_by', 'created_at', 'updated_at')
        self.assertEqual(self.admin_instance.readonly_fields, expected_fields)

    def test_fieldsets_configuration(self):
        """Test fieldsets configuration."""
        fieldsets = self.admin_instance.fieldsets
        self.assertEqual(len(fieldsets), 3)
        
        # Check main section
        main_fields = fieldsets[0][1]['fields']
        self.assertEqual(main_fields, ('title', 'prompt', 'provider', 'model'))
        
        # Check results section
        results_fields = fieldsets[1][1]['fields']
        self.assertEqual(results_fields, ('response', 'status', 'meta'))

    def test_get_models_view(self):
        """Test the AJAX get models view."""
        request = Mock()
        request.user = self.user
        
        # Test with valid provider
        response = self.admin_instance.get_models_view(request, 'groq')
        self.assertIsInstance(response, JsonResponse)
        
        # Parse the response content
        response_data = json.loads(response.content)
        self.assertTrue(response_data['success'])
        self.assertIn('models', response_data)
        self.assertIsInstance(response_data['models'], list)
        
        # Check model structure
        if response_data['models']:
            model = response_data['models'][0]
            self.assertIn('id', model)
            self.assertIn('name', model)

    def test_get_models_view_invalid_provider(self):
        """Test get models view with invalid provider."""
        request = Mock()
        request.user = self.user
        
        response = self.admin_instance.get_models_view(request, 'invalid_provider')
        self.assertIsInstance(response, JsonResponse)
        # Invalid provider returns empty list, which is still success=True
        response_data = json.loads(response.content)
        self.assertTrue(response_data['success'])
        self.assertEqual(response_data['models'], [])

    def test_save_model_sets_created_by(self):
        """Test that save_model sets created_by to current user."""
        request = Mock()
        request.user = self.user
        
        analysis = AdminAIAnalysis(
            title='Test Analysis',
            prompt='Test prompt',
            provider='groq',
            model='llama-3.3-70b-versatile'
        )
        
        form = Mock()
        
        # Mock run_admin_llm_inference to avoid actual LLM calls
        with patch('ai.admin.run_admin_llm_inference') as mock_inference:
            self.admin_instance.save_model(request, analysis, form, False)
            
            self.assertEqual(analysis.created_by, self.user)
            # Should call LLM inference since status is pending
            mock_inference.assert_called_once_with(analysis)

    def test_context_data_in_views(self):
        """Test that context data is properly added to admin views."""
        # Test the context data preparation logic directly
        extra_context = {}
        
        # Test add_view context preparation
        provider_models = {}
        for provider_code, provider_name in AdminAIAnalysis.PROVIDER_CHOICES:
            provider_models[provider_code] = AIProviderConfig.get_available_models(provider_code)
        
        extra_context['provider_models_json'] = json.dumps(provider_models)
        
        # Verify the context contains the expected data
        self.assertIn('provider_models_json', extra_context)
        provider_data = json.loads(extra_context['provider_models_json'])
        self.assertIn('groq', provider_data)
        self.assertIn('openai', provider_data)
        self.assertIsInstance(provider_data['groq'], list)
        self.assertIsInstance(provider_data['openai'], list)


class AdminAIAnalysisFormTestCase(TestCase):
    """Test cases for the admin form."""

    def setUp(self):
        self.user = User.objects.create_user(
            phone='admin123',
            email='admin@example.com',
            password='testpass123',
            is_staff=True
        )

    def test_form_initialization_new_instance(self):
        """Test form initialization for new instance."""
        form = AdminAIAnalysisAdminForm()
        
        # Check that CSS classes are added
        self.assertIn('provider-select', form.fields['provider'].widget.attrs['class'])
        self.assertIn('model-select', form.fields['model'].widget.attrs['class'])
        
        # Check model field choices for new instance
        model_choices = form.fields['model'].widget.choices
        self.assertEqual(len(model_choices), 1)  # Only empty choice
        self.assertEqual(model_choices[0], ('', '---------'))

    def test_form_initialization_existing_instance(self):
        """Test form initialization for existing instance."""
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='groq',
            model='llama-3.3-70b-versatile',
            created_by=self.user
        )
        
        form = AdminAIAnalysisAdminForm(instance=analysis)
        
        # Check that model field preserves existing value
        model_choices = dict(form.fields['model'].widget.choices)
        self.assertIn('llama-3.3-70b-versatile', model_choices)

    def test_form_validation(self):
        """Test form validation."""
        form_data = {
            'title': 'Test Analysis',
            'prompt': 'Test prompt',
            'provider': 'groq',
            'model': 'llama-3.3-70b-versatile',
            'meta': '{}',
            'status': 'pending'
        }
        
        form = AdminAIAnalysisAdminForm(data=form_data)
        self.assertTrue(form.is_valid())

    def test_form_validation_missing_required_fields(self):
        """Test form validation with missing required fields."""
        form_data = {
            'title': '',  # Required field empty
            'prompt': 'Test prompt',
            'provider': 'groq',
            'model': 'llama-3.3-70b-versatile'
        }
        
        form = AdminAIAnalysisAdminForm(data=form_data)
        self.assertFalse(form.is_valid())
        self.assertIn('title', form.errors)


class AdminLLMInferenceTestCase(TestCase):
    """Test cases for LLM inference functionality."""

    def setUp(self):
        self.user = User.objects.create_user(
            phone='admin123',
            email='admin@example.com',
            password='testpass123',
            is_staff=True
        )

    @patch('ai.utils.admin_llm.GroqService')
    def test_llm_inference_groq_success(self, mock_groq_service):
        """Test successful LLM inference with Groq provider."""
        # Mock the service
        mock_service_instance = Mock()
        mock_service_instance.get_completion.return_value = "Test response from Groq"
        mock_groq_service.return_value = mock_service_instance
        
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='groq',
            model='llama-3.3-70b-versatile',
            created_by=self.user
        )
        
        run_admin_llm_inference(analysis)
        
        analysis.refresh_from_db()
        self.assertEqual(analysis.status, 'completed')
        self.assertEqual(analysis.response, 'Test response from Groq')
        
        # Verify service was called correctly
        mock_groq_service.assert_called_once_with(model='llama-3.3-70b-versatile')
        mock_service_instance.get_completion.assert_called_once_with('Test prompt')

    @patch('ai.utils.admin_llm.OpenAIService')
    def test_llm_inference_openai_success(self, mock_openai_service):
        """Test successful LLM inference with OpenAI provider."""
        # Mock the service
        mock_service_instance = Mock()
        mock_service_instance.get_completion.return_value = "Test response from OpenAI"
        mock_openai_service.return_value = mock_service_instance
        
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='openai',
            model='gpt-4o',
            created_by=self.user
        )
        
        run_admin_llm_inference(analysis)
        
        analysis.refresh_from_db()
        self.assertEqual(analysis.status, 'completed')
        self.assertEqual(analysis.response, 'Test response from OpenAI')
        
        # Verify service was called correctly
        mock_openai_service.assert_called_once_with(model='gpt-4o')
        mock_service_instance.get_completion.assert_called_once_with('Test prompt')

    @patch('ai.utils.admin_llm.GroqService')
    def test_llm_inference_error_handling(self, mock_groq_service):
        """Test LLM inference error handling."""
        # Mock the service to raise an exception
        mock_service_instance = Mock()
        mock_service_instance.get_completion.side_effect = Exception("API Error")
        mock_groq_service.return_value = mock_service_instance
        
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='groq',
            model='llama-3.3-70b-versatile',
            created_by=self.user
        )
        
        run_admin_llm_inference(analysis)
        
        analysis.refresh_from_db()
        self.assertEqual(analysis.status, 'error')
        self.assertEqual(analysis.response, 'API Error')

    def test_llm_inference_unsupported_provider(self):
        """Test LLM inference with unsupported provider."""
        analysis = AdminAIAnalysis.objects.create(
            title='Test Analysis',
            prompt='Test prompt',
            provider='unsupported',
            model='some-model',
            created_by=self.user
        )
        
        run_admin_llm_inference(analysis)
        
        analysis.refresh_from_db()
        self.assertEqual(analysis.status, 'error')
        self.assertIn('Unsupported provider', analysis.response)


class AIProviderConfigIntegrationTestCase(TestCase):
    """Test integration with AIProviderConfig model."""

    def test_get_available_models_groq(self):
        """Test getting available models for Groq provider."""
        models = AIProviderConfig.get_available_models('groq')
        self.assertIsInstance(models, list)
        self.assertGreater(len(models), 0)
        
        # Check model structure
        for model_code, model_name in models:
            self.assertIsInstance(model_code, str)
            self.assertIsInstance(model_name, str)
            self.assertGreater(len(model_code), 0)
            self.assertGreater(len(model_name), 0)

    def test_get_available_models_openai(self):
        """Test getting available models for OpenAI provider."""
        models = AIProviderConfig.get_available_models('openai')
        self.assertIsInstance(models, list)
        self.assertGreater(len(models), 0)
        
        # Check model structure
        for model_code, model_name in models:
            self.assertIsInstance(model_code, str)
            self.assertIsInstance(model_name, str)
            self.assertGreater(len(model_code), 0)
            self.assertGreater(len(model_name), 0)

    def test_get_available_models_invalid_provider(self):
        """Test getting models for invalid provider returns empty list."""
        models = AIProviderConfig.get_available_models('invalid_provider')
        self.assertEqual(models, []) 