import json
import os
import logging
from app.services.llm import call_llm, call_llm_text, generate_image, call_llm_routed, call_llm_text_routed, generate_image_routed
from app.services.validators import validate_analysis, validate_package, validate_site_copy, ValidationReport
from app.services.prompts import SYSTEM_PROMPT_AURA_BUILDER

logger = logging.getLogger(__name__)

SYSTEM_PROMPT_ANALYZER = """You are a domain valuation expert, niche strategist, and creative business consultant.
You specialize in analyzing domain names and identifying profitable business opportunities.
You think creatively about acronyms, cross-language meanings, mashups, and cross-domain tie-ins.
You always consider affiliate program opportunities and passive income business models.
Always respond with valid JSON."""

SYSTEM_PROMPT_BUILDER = SYSTEM_PROMPT_AURA_BUILDER
SYSTEM_PROMPT_BUILDER_LEGENDARY = SYSTEM_PROMPT_AURA_BUILDER


def _infer_discovery_from_niche(niche_name: str, niche_data: dict, domain: str) -> str:
    if not niche_data:
        return ""

    target = niche_data.get('target_audience', '')
    monetization = niche_data.get('monetization_model', '')
    description = niche_data.get('description', '')
    synopsis = niche_data.get('synopsis', '')

    return f"""
--- AUTO-INFERRED DISCOVERY CONTEXT ---
The client did NOT fill out the discovery questionnaire, so you MUST infer rich, specific context from the niche data to produce output that is just as compelling, textured, and personalized as if they had answered every question. Do NOT produce generic or flat content.

INFERRED BRAND PERSONALITY: Based on the "{niche_name}" niche, infer the ideal brand personality. Consider: What kind of person would this brand be? What tone of voice? What level of formality? What emotional register? Make it specific and vivid.

INFERRED TARGET AUDIENCE: {target if target else f'Infer the ideal customer persona for a {niche_name} business - their age, income, pain points, aspirations, daily life, and what keeps them up at night.'}

INFERRED VISUAL STYLE: Based on the industry ({niche_name}), select the most impactful visual direction. Consider what colors, imagery, and design patterns dominate successful brands in this space. Be specific about color mood, typography feel, and imagery style.

INFERRED CORE VALUES: Infer 3-5 core values that would resonate with the target audience of a {niche_name} business. These should feel authentic and differentiating, not generic.

INFERRED EMOTIONAL IMPACT: How should visitors FEEL when they land on this site? What transformation or relief should they experience? What problem does this solve for them emotionally, not just practically?

INFERRED PRIMARY ACTION: Based on the monetization model ({monetization}), what is the single most important action visitors should take? Design all copy to drive toward this action.

NICHE CONTEXT: {description} {synopsis}

CRITICAL INSTRUCTION: Use all inferred context above to produce content that feels deeply personalized, industry-specific, and emotionally resonant. Every headline, feature description, testimonial, and CTA must feel like it was written by someone who deeply understands this specific niche and its audience. NEVER produce generic "lorem ipsum"-style content.
--- END AUTO-INFERRED CONTEXT ---
"""


def _build_discovery_section(discovery_context: dict) -> str:
    if not discovery_context:
        return ""

    sections = []
    sections.append("\n--- CLIENT DISCOVERY CONTEXT ---")
    sections.append("The client has provided detailed answers about their vision and preferences. Use these to deeply personalize all generated content.\n")

    if discovery_context.get("project_story"):
        sections.append(f"PROJECT VISION & STORY: {discovery_context['project_story']}")
    if discovery_context.get("success_vision"):
        sections.append(f"SUCCESS VISION (1-5 years): {discovery_context['success_vision']}")
    if discovery_context.get("one_thing"):
        sections.append(f"THE ONE THING this site must do perfectly: {discovery_context['one_thing']}")
    if discovery_context.get("ideal_audience"):
        sections.append(f"IDEAL AUDIENCE: {discovery_context['ideal_audience']}")
    if discovery_context.get("audience_journey"):
        sections.append(f"AUDIENCE JOURNEY: {discovery_context['audience_journey']}")
    if discovery_context.get("brand_personality"):
        sections.append(f"BRAND PERSONALITY: {discovery_context['brand_personality']}")
    if discovery_context.get("core_values"):
        sections.append(f"CORE VALUES: {discovery_context['core_values']}")
    if discovery_context.get("differentiator"):
        sections.append(f"UNIQUE DIFFERENTIATOR: {discovery_context['differentiator']}")
    if discovery_context.get("desired_feeling"):
        sections.append(f"DESIRED FEELING when visitors interact: {discovery_context['desired_feeling']}")
    if discovery_context.get("color_palette"):
        sections.append(f"COLOR PALETTE PREFERENCE: {discovery_context['color_palette']}")
    if discovery_context.get("visual_style"):
        sections.append(f"VISUAL STYLE PREFERENCE: {discovery_context['visual_style']}")
    if discovery_context.get("sites_loved"):
        sections.append(f"WEBSITES THEY LOVE (and why): {discovery_context['sites_loved']}")
    if discovery_context.get("sites_disliked"):
        sections.append(f"WEBSITES THEY DISLIKE (and why): {discovery_context['sites_disliked']}")
    if discovery_context.get("primary_action"):
        sections.append(f"PRIMARY VISITOR ACTION: {discovery_context['primary_action']}")
    if discovery_context.get("key_features"):
        sections.append(f"KEY FEATURES NEEDED: {discovery_context['key_features']}")
    if discovery_context.get("future_vision"):
        sections.append(f"FUTURE VISION: {discovery_context['future_vision']}")
    if discovery_context.get("additional_notes"):
        sections.append(f"ADDITIONAL NOTES: {discovery_context['additional_notes']}")

    sections.append("--- END DISCOVERY CONTEXT ---\n")
    return "\n".join(sections)


def analyze_domain(domain: str, progress_callback=None, niche_hints: str = "") -> dict:
    clean_domain = domain.strip().lower()
    name_part = clean_domain.split(".")[0] if "." in clean_domain else clean_domain
    tld = clean_domain.split(".")[-1] if "." in clean_domain else "com"

    if progress_callback:
        progress_callback("Decomposing domain keywords and patterns...", 1, 3)

    niche_hint_block = ""
    if niche_hints:
        niche_hint_block = f"""

IMPORTANT — USER-SUGGESTED NICHES:
The domain owner has specifically suggested these niche directions be explored:
\"\"\"{niche_hints}\"\"\"

You MUST include at least one niche that directly addresses each user suggestion above, in addition to your own AI-discovered niches. Score them honestly — if a suggested niche is a poor fit, give it a low score but still include it so the user can see the analysis. Weave user suggestions into the 7-10 niches naturally (they count toward the total).
"""

    prompt = f"""Analyze the domain name "{clean_domain}" for business potential.

The domain name part is "{name_part}" with TLD ".{tld}".
{niche_hint_block}
Break down the domain into keywords and explore creative interpretations including:
- Literal meanings
- Acronyms (what could each letter stand for?)
- Cross-language meanings (Spanish, French, German, Japanese, etc.)
- Mashups and portmanteaus
- Cross-domain tie-ins
- Industry-specific interpretations

Generate 7-10 niche business ideas for this domain. For EACH niche, consider:
- Whether an affiliate program exists in that industry (list specific programs if known)
- Whether it could work as a passive/semi-passive income site
- How brandable the domain is for that niche

For each niche, provide:
- "name": short niche name (2-5 words)
- "description": 3-4 sentence description of the business concept, what it does, who it serves, and why it's viable
- "synopsis": A one-paragraph executive summary of what this business would look like, written for someone unfamiliar with the industry
- "monetization_model": primary revenue method (e.g., "SaaS subscription", "marketplace fees", "affiliate revenue", "ad-supported content", "e-commerce", "digital products")
- "affiliate_programs": list of specific affiliate programs or networks relevant to this niche (e.g., "Amazon Associates", "ShareASale", "CJ Affiliate")
- "target_audience": who would use this site
- "time_to_revenue": one of "fast", "medium", "slow"
- "valuation_band": estimated domain+business value range (e.g., "500-2000", "5000-20000", "20000-100000")
- "score": 0-10 rating of viability and potential
- "requires_inventory": boolean - true if the business model needs physical inventory, false if digital/affiliate/service

Return JSON in this exact format:
{{
  "domain": "{clean_domain}",
  "keywords": ["keyword1", "keyword2"],
  "interpretations": ["interpretation1", "interpretation2"],
  "domain_summary": "A one-paragraph summary of the domain's overall potential and most promising directions",
  "niches": [
    {{
      "name": "...",
      "description": "...",
      "synopsis": "...",
      "monetization_model": "...",
      "affiliate_programs": ["program1", "program2"],
      "target_audience": "...",
      "time_to_revenue": "fast|medium|slow",
      "valuation_band": "...",
      "score": 8,
      "requires_inventory": false
    }}
  ]
}}"""

    if progress_callback:
        progress_callback("Analyzing niches and business models with AI...", 2, 3)

    response = call_llm_routed("domain_analysis", prompt, SYSTEM_PROMPT_ANALYZER)
    try:
        result = json.loads(response)
    except json.JSONDecodeError:
        cleaned = response.strip()
        if cleaned.startswith("```"):
            cleaned = cleaned.split("\n", 1)[-1].rsplit("```", 1)[0].strip()
        start = cleaned.find("{")
        end = cleaned.rfind("}") + 1
        if start >= 0 and end > start:
            try:
                result = json.loads(cleaned[start:end])
            except json.JSONDecodeError:
                logger.error(f"AI response not valid JSON for {clean_domain}: {response[:500]}")
                raise ValueError("Failed to parse domain analysis response from AI")
        else:
            logger.error(f"AI response not valid JSON for {clean_domain}: {response[:500]}")
            raise ValueError("Failed to parse domain analysis response from AI")

    if not result.get("niches"):
        raise ValueError("AI returned no niche ideas for this domain")

    result, analysis_report = validate_analysis(result, auto_repair=True)
    if not analysis_report.is_valid:
        logger.error(f"Analysis validation failed for {clean_domain}: {analysis_report.errors}")
    if analysis_report.repairs:
        logger.info(f"Analysis auto-repairs for {clean_domain}: {len(analysis_report.repairs)} fixes applied")

    result["niches"] = sorted(result["niches"], key=lambda x: x.get("score", 0), reverse=True)
    result["_validation"] = analysis_report.to_dict()

    if progress_callback:
        progress_callback("Analysis complete!", 3, 3)

    return result


def _ensure_list_of_dicts(data, dict_keys):
    if data is None:
        return []
    if isinstance(data, dict):
        for k in dict_keys:
            if k in data and isinstance(data[k], list):
                return _ensure_list_of_dicts(data[k], dict_keys)
        return [data]
    if isinstance(data, list):
        result = []
        for item in data:
            if isinstance(item, dict):
                result.append(item)
            elif isinstance(item, str):
                if dict_keys and len(dict_keys) >= 2:
                    result.append({dict_keys[0]: item, dict_keys[1]: ""})
                else:
                    result.append({"text": item})
        return result
    return []


def _normalize_site_copy(site_copy: dict) -> dict:
    list_field_schemas = {
        "features": ["title", "description"],
        "testimonials": ["quote", "author"],
        "faq_items": ["question", "answer"],
        "stats": ["value", "label"],
        "how_it_works_steps": ["title", "description"],
        "pricing_tiers": ["name", "price"],
        "team_members": ["name", "role"],
        "gallery_sections": ["title", "description"],
        "resource_items": ["title", "description"],
    }

    for key, dict_keys in list_field_schemas.items():
        if key in site_copy:
            site_copy[key] = _ensure_list_of_dicts(site_copy[key], dict_keys)

    for key in list(site_copy.keys()):
        val = site_copy[key]
        if isinstance(val, list) and len(val) == 1 and isinstance(val[0], dict) and key in val[0]:
            site_copy[key] = val[0]

    wrapper_fields = {
        "features": "features",
        "stats": "stats",
        "faq": "faq_items",
        "trust": "testimonials",
        "pricing": "pricing_tiers",
        "testimonials": "testimonials",
    }
    for wrapper_key, inner_key in wrapper_fields.items():
        val = site_copy.get(wrapper_key)
        if isinstance(val, dict) and inner_key in val:
            inner = val.pop(inner_key)
            for k, v in val.items():
                if k not in site_copy:
                    site_copy[k] = v
            site_copy[inner_key] = _ensure_list_of_dicts(inner, list_field_schemas.get(inner_key, []))
            if wrapper_key != inner_key:
                del site_copy[wrapper_key]

    string_fields_with_nested = ["about", "hero"]
    for key in string_fields_with_nested:
        val = site_copy.get(key)
        if isinstance(val, dict):
            for inner_k, inner_v in val.items():
                if isinstance(inner_v, str) and inner_k not in site_copy:
                    site_copy[inner_k] = inner_v
            if key in site_copy and isinstance(site_copy[key], dict):
                text_val = val.get(key, val.get("content", val.get("body", "")))
                if isinstance(text_val, str):
                    site_copy[key] = text_val
                else:
                    del site_copy[key]

    section_dicts_to_flatten = [
        "problem", "solution", "how_it_works", "pricing", "comparison",
        "gallery", "team", "resources", "contact", "cta_final", "footer",
        "headline",
    ]
    for key in section_dicts_to_flatten:
        val = site_copy.get(key)
        if isinstance(val, dict):
            promoted = {}
            for inner_k, inner_v in val.items():
                promoted[inner_k] = inner_v
            del site_copy[key]
            for inner_k, inner_v in promoted.items():
                if inner_k not in site_copy:
                    site_copy[inner_k] = inner_v

    key_aliases = {
        "how_steps": "how_it_works_steps",
        "pricing_plans": "pricing_tiers",
    }
    for alias, canonical in key_aliases.items():
        if alias in site_copy and canonical not in site_copy:
            site_copy[canonical] = site_copy.pop(alias)

    for key, dict_keys in list_field_schemas.items():
        if key in site_copy:
            site_copy[key] = _ensure_list_of_dicts(site_copy[key], dict_keys)

    if "comparison_table" in site_copy:
        ct = site_copy["comparison_table"]
        if isinstance(ct, dict):
            if "headers" not in ct:
                ct["headers"] = []
            if "rows" not in ct:
                ct["rows"] = []
        else:
            site_copy["comparison_table"] = {"headers": [], "rows": []}

    return site_copy


def _build_structured_input(domain: str, chosen_niche: str, niche_data: dict,
                            discovery_context: dict, blueprint: dict, depth: str) -> dict:
    is_legendary = depth == "legendary"
    mode = "legendary" if is_legendary else "standard"

    niche_context = {}
    if niche_data:
        valuation_band = niche_data.get('valuation_band', '5000-20000')
        val_parts = valuation_band.replace(',', '').split('-')
        try:
            val_min = int(val_parts[0].strip()) if len(val_parts) > 0 else 5000
            val_max = int(val_parts[1].strip()) if len(val_parts) > 1 else val_min * 4
        except (ValueError, IndexError):
            val_min, val_max = 5000, 20000

        keywords = []
        name = niche_data.get('name', chosen_niche)
        desc = niche_data.get('description', '')
        for word in name.lower().split():
            if len(word) > 2:
                keywords.append(word)
        if niche_data.get('affiliate_programs'):
            keywords.extend([p.lower() for p in niche_data['affiliate_programs'][:3]])

        risks = []
        score = niche_data.get('score', 5)
        if score < 7:
            risks.append("moderate competition expected")
        if niche_data.get('requires_inventory'):
            risks.append("requires physical inventory")
        time_to_rev = niche_data.get('time_to_revenue', 'medium')
        if time_to_rev == 'slow':
            risks.append("longer time to revenue")

        niche_context = {
            "intent_level": "high" if score >= 7 else ("medium" if score >= 4 else "low"),
            "primary_keyword_cluster": keywords[:8],
            "risks": risks,
            "valuation_band_numeric": {"min": val_min, "max": val_max},
            "description": desc,
            "synopsis": niche_data.get('synopsis', ''),
            "monetization_model": niche_data.get('monetization_model', ''),
            "target_audience": niche_data.get('target_audience', ''),
            "affiliate_programs": niche_data.get('affiliate_programs', []),
        }

    discovery = {}
    if discovery_context:
        brand_personality = discovery_context.get('brand_personality', '')
        core_values = discovery_context.get('core_values', '')
        desired_feeling = discovery_context.get('desired_feeling', '')
        voice_parts = [p for p in [brand_personality, desired_feeling] if p]
        brand_voice_profile = "; ".join(voice_parts) if voice_parts else ""

        discovery = {
            "vision": discovery_context.get('project_story', discovery_context.get('success_vision', '')),
            "audience": discovery_context.get('ideal_audience', discovery_context.get('audience_journey', '')),
            "positioning": discovery_context.get('differentiator', ''),
            "brand_voice_profile": brand_voice_profile,
            "brand_personality": brand_personality,
            "core_values": core_values,
            "desired_feeling": desired_feeling,
            "color_preference": discovery_context.get('color_palette', ''),
            "visual_style": discovery_context.get('visual_style', ''),
            "sites_loved": discovery_context.get('sites_loved', ''),
            "sites_disliked": discovery_context.get('sites_disliked', ''),
            "primary_action": discovery_context.get('primary_action', ''),
            "one_thing": discovery_context.get('one_thing', ''),
            "additional_notes": discovery_context.get('additional_notes', ''),
        }
        discovery = {k: v for k, v in discovery.items() if v}

    enabled_sections = [s for s in blueprint.get("sections", []) if s.get("enabled")]
    section_keys = [s["key"] for s in enabled_sections]

    confidence = 0.85
    if niche_data:
        score = niche_data.get('score', 5)
        confidence = round(min(0.99, score / 10.0), 2)

    structured_input = {
        "mode": mode,
        "domain": domain,
        "primary_niche": chosen_niche,
        "primary_confidence": confidence,
        "niche_context": niche_context,
        "discovery": discovery,
        "blueprint": {
            "sections": section_keys
        }
    }

    return structured_input


def _translate_brand_output(brand_data: dict) -> dict:
    rec_idx = brand_data.get("recommended_index", brand_data.get("recommended", 0))
    if not isinstance(rec_idx, int):
        rec_idx = 0

    options = brand_data.get("options", [])

    color_primary = "#4F46E5"
    color_secondary = "#7C3AED"
    color_accent = "#06B6D4"

    if options and rec_idx < len(options):
        rec_option = options[rec_idx]
        palette = rec_option.get("palette", {})
        if palette:
            color_primary = palette.get("primary", color_primary)
            color_secondary = palette.get("primary_alt", color_secondary)
            color_accent = palette.get("accent", color_accent)

    result = {
        "options": options,
        "recommended": rec_idx,
        "color_primary": brand_data.get("color_primary", color_primary),
        "color_secondary": brand_data.get("color_secondary", color_secondary),
        "color_accent": brand_data.get("color_accent", color_accent),
        "industry_context": brand_data.get("industry_context", ""),
    }

    if result["color_primary"] == "#4F46E5" and color_primary != "#4F46E5":
        result["color_primary"] = color_primary
    if result["color_secondary"] == "#7C3AED" and color_secondary != "#7C3AED":
        result["color_secondary"] = color_secondary
    if result["color_accent"] == "#06B6D4" and color_accent != "#06B6D4":
        result["color_accent"] = color_accent

    return result


def _translate_site_copy_output(site_copy: dict) -> dict:
    if "problem_body" in site_copy and "problem_description" not in site_copy:
        site_copy["problem_description"] = site_copy["problem_body"]
    if "solution_body" in site_copy and "solution_description" not in site_copy:
        site_copy["solution_description"] = site_copy["solution_body"]
    if "hero_body" in site_copy and "hero_description" not in site_copy:
        site_copy["hero_description"] = site_copy["hero_body"]
    if "about_title" in site_copy and "about_heading" not in site_copy:
        site_copy["about_heading"] = site_copy["about_title"]

    if "problem" in site_copy and isinstance(site_copy["problem"], dict):
        prob = site_copy["problem"]
        if "problem_title" not in site_copy and "problem_title" in prob:
            site_copy["problem_title"] = prob["problem_title"]
        if "problem_description" not in site_copy and "problem_body" in prob:
            site_copy["problem_description"] = prob["problem_body"]
        if "problem_points" not in site_copy and "problem_points" in prob:
            site_copy["problem_points"] = prob["problem_points"]

    if "solution" in site_copy and isinstance(site_copy["solution"], dict):
        sol = site_copy["solution"]
        if "solution_title" not in site_copy and "solution_title" in sol:
            site_copy["solution_title"] = sol["solution_title"]
        if "solution_description" not in site_copy and "solution_body" in sol:
            site_copy["solution_description"] = sol["solution_body"]
        if "solution_points" not in site_copy and "solution_points" in sol:
            site_copy["solution_points"] = sol["solution_points"]

    if "hero" in site_copy and isinstance(site_copy["hero"], dict):
        hero = site_copy["hero"]
        for k, v in hero.items():
            if k not in site_copy:
                mapped_key = k
                if k == "hero_body":
                    mapped_key = "hero_description"
                site_copy[mapped_key] = v

    if "about" in site_copy and isinstance(site_copy["about"], dict):
        about = site_copy["about"]
        for k, v in about.items():
            if k == "about_title" and "about_heading" not in site_copy:
                site_copy["about_heading"] = v
            elif k not in site_copy:
                site_copy[k] = v

    if "features" in site_copy and isinstance(site_copy["features"], list):
        for feat in site_copy["features"]:
            if isinstance(feat, dict):
                if "summary" in feat and "description" not in feat:
                    desc_parts = [feat.get("summary", "")]
                    if feat.get("detail"):
                        desc_parts.append(feat["detail"])
                    feat["description"] = " ".join(desc_parts)
                if "icon_hint" in feat and "icon" not in feat:
                    feat["icon"] = feat["icon_hint"]

    if "stats" in site_copy and isinstance(site_copy["stats"], dict):
        items = site_copy["stats"].get("items", [])
        if isinstance(items, list):
            for item in items:
                if isinstance(item, dict) and "context" in item and "description" not in item:
                    item["description"] = item["context"]
            site_copy["stats"] = items

    if "testimonials" in site_copy and isinstance(site_copy["testimonials"], dict):
        items = site_copy["testimonials"].get("items", [])
        if isinstance(items, list):
            for item in items:
                if isinstance(item, dict):
                    if "name" in item and "author" not in item:
                        item["author"] = item["name"]
            site_copy["testimonials"] = items

    if "how_it_works" in site_copy and isinstance(site_copy["how_it_works"], dict):
        steps = site_copy["how_it_works"].get("steps", [])
        if isinstance(steps, list):
            site_copy["how_it_works_steps"] = steps
            if "how_title" not in site_copy:
                site_copy["how_title"] = site_copy["how_it_works"].get("title", "How It Works")
            if "how_subtitle" not in site_copy:
                site_copy["how_subtitle"] = site_copy["how_it_works"].get("subtitle", "")

    if "pricing" in site_copy and isinstance(site_copy["pricing"], dict):
        tiers = site_copy["pricing"].get("tiers", [])
        if isinstance(tiers, list):
            for tier in tiers:
                if isinstance(tier, dict):
                    if "features" in tier and "features_list" not in tier:
                        tier["features_list"] = tier["features"]
                    if "recommended" in tier and "highlighted" not in tier:
                        tier["highlighted"] = tier["recommended"]
            site_copy["pricing_tiers"] = tiers
            if "pricing_title" not in site_copy:
                site_copy["pricing_title"] = site_copy["pricing"].get("title", "Pricing")
            if "pricing_subtitle" not in site_copy:
                site_copy["pricing_subtitle"] = site_copy["pricing"].get("subtitle", "")

    if "faq" in site_copy and isinstance(site_copy["faq"], dict):
        faq_items = site_copy["faq"].get("faq_items", [])
        if isinstance(faq_items, list):
            site_copy["faq_items"] = faq_items

    if "comparison" in site_copy and isinstance(site_copy["comparison"], dict):
        comp = site_copy["comparison"]
        if "comparison_table" in comp and "comparison_table" not in site_copy:
            site_copy["comparison_table"] = comp["comparison_table"]
        if "comparison_title" in comp and "comparison_title" not in site_copy:
            site_copy["comparison_title"] = comp["comparison_title"]
        if "comparison_points" in comp and "comparison_points" not in site_copy:
            site_copy["comparison_points"] = comp["comparison_points"]

    return site_copy


def build_package(domain: str, chosen_niche: str, niche_data: dict = None,
                  progress_callback=None, discovery_context: dict = None,
                  template_type: str = "hero", blueprint: dict = None,
                  brandkit_context: str = "", assembled_context: str = "") -> dict:
    from app.services.blueprint import get_default_blueprint, blueprint_to_prompt_spec

    clean_domain = domain.strip().lower()

    if not blueprint:
        blueprint = get_default_blueprint("comprehensive")

    enabled_sections = [s for s in blueprint.get("sections", []) if s.get("enabled")]
    enabled_keys = [s["key"] for s in enabled_sections]
    section_count = len(enabled_sections)

    depth = blueprint.get("depth", "comprehensive")
    is_legendary = depth == "legendary"

    builder_system_prompt = SYSTEM_PROMPT_AURA_BUILDER

    if progress_callback:
        progress_callback("Creating brand identity options...", 1, 5)

    structured_input = _build_structured_input(
        clean_domain, chosen_niche, niche_data,
        discovery_context, blueprint, depth
    )

    context_block = ""
    if assembled_context:
        context_block = f"""
--- PROJECT CONTEXT (Running Memory) ---
{assembled_context}
--- END PROJECT CONTEXT ---
"""

    discovery_section = ""
    if discovery_context:
        discovery_section = _build_discovery_section(discovery_context)
    elif niche_data:
        discovery_section = _infer_discovery_from_niche(chosen_niche, niche_data, clean_domain)

    brand_prompt = f"""Generate a complete business-in-a-box package based on the following structured input:

{json.dumps(structured_input, indent=2)}

{context_block}
{discovery_section}
{brandkit_context}

Respond with STRICTLY VALID JSON following the RESPONSE SCHEMA from your instructions.
Generate content for ALL {section_count} sections listed in the blueprint.
For icon fields, use one of: shield|chart|globe|zap|users|star|target|clock|heart|check|book|rocket|sparkle|gem|fire|brain|leaf|sun|moon|flower|wave|lotus"""

    if progress_callback:
        progress_callback(f"Generating brand + {section_count} content sections...", 2, 5)

    brand_response = call_llm_routed("site_copy", brand_prompt, builder_system_prompt, max_tokens=16384)
    try:
        package_data = json.loads(brand_response)
    except json.JSONDecodeError:
        raise ValueError("Failed to parse brand/copy response from AI")

    if "meta" in package_data:
        meta = package_data.pop("meta")
        if meta.get("warnings"):
            logger.warning(f"AI warnings for {clean_domain}: {meta['warnings']}")
        if meta.get("assumptions"):
            logger.info(f"AI assumptions for {clean_domain}: {meta['assumptions']}")
        package_data["_meta"] = meta

    brand_data = package_data.get("brand", {})
    brand_data = _translate_brand_output(brand_data)
    from app.services.validators import validate_brand
    brand_data, brand_report = validate_brand(brand_data, auto_repair=True)
    if brand_report.repairs:
        logger.info(f"Brand auto-repairs for {clean_domain}: {len(brand_report.repairs)} fixes applied")
    package_data["brand"] = brand_data

    site_copy = package_data.get("site_copy", {})
    site_copy = _translate_site_copy_output(site_copy)
    site_copy = _normalize_site_copy(site_copy)
    site_copy, copy_report = validate_site_copy(site_copy, auto_repair=True)
    if not copy_report.is_valid:
        logger.error(f"Site copy validation failed for {clean_domain}: {copy_report.errors}")
    if copy_report.repairs:
        logger.info(f"Site copy auto-repairs for {clean_domain}: {len(copy_report.repairs)} fixes applied")
    package_data["site_copy"] = site_copy

    for section in enabled_sections:
        key = section["key"]
        if key not in site_copy:
            section_data = {}
            for field in section.get("fields", []):
                fkey = field["key"]
                if fkey in site_copy:
                    section_data[fkey] = site_copy[fkey]
            if not section_data:
                logger.warning(f"AI did not generate content for section: {key}")
    package_data["_blueprint"] = {
        "depth": blueprint.get("depth", "comprehensive"),
        "enabled_sections": enabled_keys,
        "section_count": section_count,
    }

    if progress_callback:
        progress_callback("Writing marketplace sales letter...", 3, 5)

    niche_desc_for_sales = ""
    if niche_data:
        niche_desc_for_sales = f"""
Niche details:
- Name: {niche_data.get('name', chosen_niche)}
- Description: {niche_data.get('description', '')}
- Synopsis: {niche_data.get('synopsis', '')}
- Monetization: {niche_data.get('monetization_model', '')}
- Target Audience: {niche_data.get('target_audience', '')}
- Affiliate Programs: {', '.join(niche_data.get('affiliate_programs', []))}
- Valuation band: {niche_data.get('valuation_band', '')}"""

    sales_prompt = f"""Write a compelling sales letter for the domain "{clean_domain}" targeting the "{chosen_niche}" niche.
{context_block}
{niche_desc_for_sales}
{discovery_section}
{brandkit_context}

This sales letter should be suitable for posting on domain marketplaces like Flippa or Sedo.

The letter should:
1. Open with a strong hook about the opportunity
2. Explain what this domain/business is (assume the reader knows nothing about the niche)
3. Detail 3-4 specific monetization methods and realistic revenue potential
4. Highlight the domain's brandability, memorability, and SEO value
5. Include what the buyer gets (the business-in-a-box concept: brand, website, copy, strategy)
6. Mention relevant affiliate programs or partnerships available
7. Close with urgency and a clear call to action

Write it in a professional but persuasive tone. Use markdown formatting (headers ##, bold **, bullet points -, numbered lists).
Keep it between 500-700 words. Make it specific to this domain and niche - not generic.

IMPORTANT: Return ONLY the markdown text. Do NOT wrap it in JSON or any other structure. No title field, no metadata — just the sales letter content in markdown."""

    sales_letter = call_llm_text_routed("sales_letter", sales_prompt, SYSTEM_PROMPT_BUILDER)

    hero_image_data = None
    if progress_callback:
        progress_callback("Generating hero image for site preview...", 4, 5)

    try:
        brand_info = package_data.get("brand", {})
        brand_name = "Business"
        opts = brand_info.get("options", [])
        rec = brand_info.get("recommended", 0)
        if opts and rec < len(opts):
            brand_name = opts[rec].get("name", "Business")

        visual_style = ""
        if discovery_context:
            if discovery_context.get("visual_style"):
                visual_style = f"Visual style: {discovery_context['visual_style']}."
            if discovery_context.get("color_palette"):
                visual_style += f" Color mood: {discovery_context['color_palette']}."
            if discovery_context.get("desired_feeling"):
                visual_style += f" The image should evoke: {discovery_context['desired_feeling']}."

        image_prompt = f"""Create a professional, modern hero banner image for a website about {chosen_niche}. 
The brand is called "{brand_name}". 
Style: Clean, modern, professional. High-quality stock-photo style.
The image should visually communicate the industry/niche of {chosen_niche}.
Do NOT include any text or logos in the image. Just a beautiful, relevant background image.
Color palette hint: use tones related to {brand_info.get('color_primary', '#4F46E5')}.
{visual_style}"""

        hero_image_data = generate_image_routed("hero_image", image_prompt, size="1536x1024")
        logger.info(f"Generated hero image for {clean_domain}")
    except Exception as e:
        logger.warning(f"Hero image generation failed for {clean_domain}: {e}")
        hero_image_data = None

    if progress_callback:
        progress_callback("Package complete!", 5, 5)

    result = {
        "domain": clean_domain,
        "chosen_niche": chosen_niche,
        "brand": package_data.get("brand", {}),
        "site_copy": package_data.get("site_copy", {}),
        "sales_letter": sales_letter,
    }

    if hero_image_data:
        result["hero_image_data"] = hero_image_data

    return result
