"""LLM Engine — unified AI abstraction layer for aura_core.

Provides:
- LLMEngine class: configurable OpenAI wrapper with retry logic
- parse_llm_json: robust JSON extraction from LLM responses
- build_system_prompt: task-specific system prompt construction

Zero imports from app/. Uses Replit AI Integrations env vars.
"""

import os
import re
import json
import logging
from typing import Optional

from aura_core.types import LLMRequest, LLMResponse

logger = logging.getLogger(__name__)

SYSTEM_PROMPT_TEMPLATES = {
    "domain analysis": (
        "You are an expert domain name analyst and niche market researcher. "
        "Analyze domain names to identify profitable business niches with "
        "affiliate opportunities. Always respond with valid JSON."
    ),
    "brand generation": (
        "You are a brand strategist specializing in creating compelling brand "
        "identities for online businesses. Generate brand names, taglines, and "
        "color palettes. Always respond with valid JSON."
    ),
    "site copy": (
        "You are a professional website copywriter who creates compelling, "
        "conversion-optimized content for business websites. Generate structured "
        "site copy sections. Always respond with valid JSON."
    ),
    "sales letter": (
        "You are a direct response copywriter who creates compelling sales "
        "letters that convert visitors into customers. Write persuasive, "
        "benefit-driven copy."
    ),
    "general": (
        "You are a helpful AI assistant specializing in business strategy "
        "and online entrepreneurship. Provide clear, actionable guidance. "
        "When asked for structured data, respond with valid JSON."
    ),
}


def parse_llm_json(text: str) -> Optional[dict]:
    if not text or not text.strip():
        return None

    cleaned = text.strip()

    md_match = re.search(r'```(?:json)?\s*\n?(.*?)\n?\s*```', cleaned, re.DOTALL)
    if md_match:
        cleaned = md_match.group(1).strip()

    try:
        return json.loads(cleaned)
    except json.JSONDecodeError:
        pass

    json_match = re.search(r'\{.*\}', cleaned, re.DOTALL)
    if json_match:
        try:
            return json.loads(json_match.group(0))
        except json.JSONDecodeError:
            pass

    arr_match = re.search(r'\[.*\]', cleaned, re.DOTALL)
    if arr_match:
        try:
            return json.loads(arr_match.group(0))
        except json.JSONDecodeError:
            pass

    return None


def build_system_prompt(task: str, context: str = "") -> str:
    task_lower = task.lower().strip()

    template = SYSTEM_PROMPT_TEMPLATES.get(task_lower)
    if not template:
        for key, val in SYSTEM_PROMPT_TEMPLATES.items():
            if key in task_lower or task_lower in key:
                template = val
                break

    if not template:
        template = SYSTEM_PROMPT_TEMPLATES["general"]

    if context:
        template += f"\n\nAdditional context:\n{context}"

    return template


class LLMEngine:
    def __init__(self, model: str = "gpt-5", max_retries: int = 5):
        self.model = model
        self.max_retries = max_retries
        self._client = None

    def _get_client(self):
        if self._client is None:
            try:
                from openai import OpenAI
                api_key = os.environ.get("AI_INTEGRATIONS_OPENAI_API_KEY")
                base_url = os.environ.get("AI_INTEGRATIONS_OPENAI_BASE_URL")
                self._client = OpenAI(api_key=api_key, base_url=base_url)
            except ImportError:
                raise RuntimeError("openai package not installed")
        return self._client

    def call_json(self, request: LLMRequest) -> LLMResponse:
        client = self._get_client()
        messages = []
        if request.system_prompt:
            messages.append({"role": "system", "content": request.system_prompt})
        messages.append({"role": "user", "content": request.prompt})

        response = client.chat.completions.create(
            model=self.model,
            messages=messages,
            max_completion_tokens=request.max_tokens,
            response_format={"type": "json_object"},
        )
        content = response.choices[0].message.content or ""
        usage = None
        if response.usage:
            usage = {
                "prompt_tokens": response.usage.prompt_tokens,
                "completion_tokens": response.usage.completion_tokens,
                "total_tokens": response.usage.total_tokens,
            }
        return LLMResponse(content=content, model=response.model or self.model, usage=usage)

    def call_text(self, request: LLMRequest) -> LLMResponse:
        client = self._get_client()
        messages = []
        if request.system_prompt:
            messages.append({"role": "system", "content": request.system_prompt})
        messages.append({"role": "user", "content": request.prompt})

        response = client.chat.completions.create(
            model=self.model,
            messages=messages,
            max_completion_tokens=request.max_tokens,
        )
        content = response.choices[0].message.content or ""
        usage = None
        if response.usage:
            usage = {
                "prompt_tokens": response.usage.prompt_tokens,
                "completion_tokens": response.usage.completion_tokens,
                "total_tokens": response.usage.total_tokens,
            }
        return LLMResponse(content=content, model=response.model or self.model, usage=usage)

    def call_stream(self, messages: list, max_tokens: int = 4096):
        client = self._get_client()
        return client.chat.completions.create(
            model=self.model,
            messages=messages,
            max_completion_tokens=max_tokens,
            stream=True,
        )
