"""
Groq API service for accessing LLM capabilities.
"""
import os
import logging
from typing import Dict, List, Optional, Any, Union
from django.conf import settings
from groq import Groq, AsyncGroq
from .llm_service import LLMService

logger = logging.getLogger(__name__)

class GroqService(LLMService):
    """
    Service for interacting with the Groq API.
    """
    def __init__(self, api_key: Optional[str] = None, model: Optional[str] = None):
        """
        Initialize the Groq service with API key and model.
        
        Args:
            api_key: Groq API key, defaults to settings.GROQ_API_KEY
            model: Groq model to use, defaults to settings.GROQ_MODEL
        """
        api_key = api_key or settings.GROQ_API_KEY
        model = model or settings.GROQ_MODEL
        super().__init__(api_key, model)
        self.client = Groq(api_key=self.api_key)
        
    def get_completion(self, 
                       prompt: str, 
                       system_prompt: Optional[str] = None,
                       temperature: float = 0.7,
                       max_tokens: int = 4096,
                       top_p: float = 0.9) -> str:
        """
        Get a text completion from the Groq API.
        
        Args:
            prompt: The user prompt to send to the model
            system_prompt: Optional system prompt to set context
            temperature: Controls randomness (0-1), lower is more deterministic
            max_tokens: Maximum number of tokens to generate
            top_p: Nucleus sampling parameter
            
        Returns:
            The text response from the model
        """
        messages = []
        
        # Add system prompt if provided
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})
            
        # Add user prompt
        messages.append({"role": "user", "content": prompt})
        
        try:
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens,
                top_p=top_p
            )
            
            return response.choices[0].message.content
            
        except Exception as e:
            logger.error(f"Error calling Groq API: {str(e)}")
            raise
    
    def change_model(self, model_key: str) -> bool:
        """
        Change the model being used by the service.
        
        Args:
            model_key: Key from the GROQ_MODELS dictionary in settings
            
        Returns:
            True if successful, False otherwise
        """
        models = getattr(settings, 'GROQ_MODELS', {})
        if model_key in models:
            self.model = models[model_key]
            return True
        else:
            logger.warning(f"Model key '{model_key}' not found in settings.GROQ_MODELS")
            return False
            
    def get_available_models(self) -> Dict[str, str]:
        """
        Get a dictionary of available models.
        
        Returns:
            Dictionary with model keys and their corresponding model IDs
        """
        return getattr(settings, 'GROQ_MODELS', {})


