"""
Orchestrator Engine Tests — TDD: written BEFORE implementation.

The orchestrator composes all engines into a single pipeline:
  blueprint → valuation → context → theme → validator

These tests validate OUTPUT CONTRACTS — the actual data products each
stage produces and hands to the next consumer. Not just "did it crash?"
but "did it deliver the specific payload the next stage needs?"
"""
import pytest
from aura_core.orchestrator.engine import (
    PIPELINE_STAGES,
    get_pipeline_config,
    OrchestratorConfig,
    OrchestratorResult,
    run_stage,
    assemble_package,
    validate_stage_output,
)


class TestPipelineStages:
    def test_stages_defined(self):
        assert len(PIPELINE_STAGES) >= 5

    def test_stages_have_required_keys(self):
        for stage in PIPELINE_STAGES:
            assert "name" in stage
            assert "module" in stage
            assert "order" in stage
            assert "required" in stage

    def test_stages_ordered(self):
        orders = [s["order"] for s in PIPELINE_STAGES]
        assert orders == sorted(orders)

    def test_required_stages_present(self):
        required = [s["name"] for s in PIPELINE_STAGES if s["required"]]
        assert "blueprint" in required
        assert "theme" in required

    def test_stage_names_unique(self):
        names = [s["name"] for s in PIPELINE_STAGES]
        assert len(names) == len(set(names))


class TestOrchestratorConfig:
    def test_default_config(self):
        config = get_pipeline_config()
        assert isinstance(config, OrchestratorConfig)
        assert config.depth == "comprehensive"

    def test_custom_depth(self):
        config = get_pipeline_config(depth="legendary")
        assert config.depth == "legendary"

    def test_domain_required(self):
        config = get_pipeline_config(domain="test.com")
        assert config.domain == "test.com"

    def test_skip_stages(self):
        config = get_pipeline_config(skip_stages=["valuation"])
        assert "valuation" in config.skip_stages

    def test_default_has_no_skips(self):
        config = get_pipeline_config()
        assert len(config.skip_stages) == 0


class TestBlueprintStageOutput:
    """Blueprint stage must deliver: sections, prompt_spec, json_schema, validation."""

    def test_produces_blueprint_with_sections(self):
        result = run_stage("blueprint", {"depth": "minimal"})
        assert result.ok is True
        bp = result.output["blueprint"]
        assert "sections" in bp
        assert len(bp["sections"]) > 0

    def test_sections_have_fields_and_ai_instructions(self):
        result = run_stage("blueprint", {"depth": "standard"})
        bp = result.output["blueprint"]
        enabled = [s for s in bp["sections"] if s["enabled"]]
        for section in enabled:
            assert len(section["fields"]) > 0, f"{section['key']} has no fields"
            assert len(section["ai_instructions"]) > 20, f"{section['key']} ai_instructions too short"

    def test_produces_prompt_spec_with_section_instructions(self):
        result = run_stage("blueprint", {"depth": "minimal"})
        prompt_spec = result.output["prompt_spec"]
        assert "hero" in prompt_spec
        assert "Instructions:" in prompt_spec
        assert len(prompt_spec) > 200

    def test_produces_json_schema_with_field_definitions(self):
        result = run_stage("blueprint", {"depth": "minimal"})
        json_schema = result.output["json_schema"]
        assert '"site_copy"' in json_schema
        assert '"hero"' in json_schema
        assert "headline" in json_schema
        assert "chars" in json_schema

    def test_validation_passes_for_valid_blueprint(self):
        result = run_stage("blueprint", {"depth": "comprehensive"})
        validation = result.output["validation"]
        assert validation["valid"] is True
        assert len(validation["errors"]) == 0

    def test_depth_controls_enabled_section_count(self):
        minimal = run_stage("blueprint", {"depth": "minimal"})
        legendary = run_stage("blueprint", {"depth": "legendary"})
        min_enabled = [s for s in minimal.output["blueprint"]["sections"] if s["enabled"]]
        leg_enabled = [s for s in legendary.output["blueprint"]["sections"] if s["enabled"]]
        assert len(min_enabled) < len(leg_enabled)

    def test_legendary_uses_override_instructions(self):
        result = run_stage("blueprint", {"depth": "legendary"})
        prompt_spec = result.output["prompt_spec"]
        assert "Fortune 500" in prompt_spec or "CMO" in prompt_spec

    def test_content_multiplier_scales_field_lengths(self):
        minimal = run_stage("blueprint", {"depth": "minimal"})
        legendary = run_stage("blueprint", {"depth": "legendary"})
        assert "14-84 chars" in minimal.output["json_schema"] or "chars" in minimal.output["json_schema"]
        assert minimal.output["blueprint"]["content_multiplier"] < legendary.output["blueprint"]["content_multiplier"]


class TestThemeStageOutput:
    """Theme stage must deliver: colors (with rgb strings), mood profile,
    card_style, dividers, icons, animated_bg — everything needed to render."""

    def _run_theme(self):
        return run_stage("theme", {
            "primary_color": "#4F46E5",
            "secondary_color": "#7C3AED",
            "accent_color": "#06B6D4",
            "mood": "professional",
            "niche": "technology",
        })

    def test_produces_color_system(self):
        result = self._run_theme()
        assert result.ok is True
        colors = result.output["colors"]
        assert "primary" in colors
        assert "primary_rgb" in colors
        assert "secondary" in colors
        assert "accent" in colors
        assert colors["primary"].startswith("#")
        assert "," in colors["primary_rgb"]

    def test_produces_mood_profile(self):
        result = self._run_theme()
        profile = result.output["profile"]
        assert "card_style" in profile
        assert "section_header_style" in profile
        assert "border_radius" in profile
        assert "shadow_intensity" in profile

    def test_produces_section_backgrounds(self):
        result = self._run_theme()
        bgs = result.output["section_backgrounds"]
        assert isinstance(bgs, dict)
        assert len(bgs) > 0

    def test_produces_dividers(self):
        result = self._run_theme()
        dividers = result.output["dividers"]
        assert isinstance(dividers, dict)
        assert len(dividers) > 0

    def test_produces_icons(self):
        result = self._run_theme()
        icons = result.output["icons"]
        assert isinstance(icons, dict)
        assert len(icons) > 0

    def test_produces_animated_bg_keyframes(self):
        result = self._run_theme()
        assert "animated_bg_keyframes" in result.output
        assert len(result.output["animated_bg_keyframes"]) > 0

    def test_produces_animated_bg_layers(self):
        result = self._run_theme()
        assert "animated_bg_layers" in result.output
        assert len(result.output["animated_bg_layers"]) > 0

    def test_different_moods_produce_different_profiles(self):
        pro = run_stage("theme", {"mood": "professional", "niche": "tech"})
        bold = run_stage("theme", {"mood": "bold", "niche": "tech"})
        assert pro.output["profile"] != bold.output["profile"]
        assert pro.output["card_style"] != bold.output["card_style"]


class TestValuationStageOutput:
    """Valuation stage must deliver: domain intrinsic, revenue models,
    developed value — or gracefully skip when missing inputs."""

    def test_skips_without_domain(self):
        result = run_stage("valuation", {})
        assert result.ok is True
        assert result.output.get("skipped") is True

    def test_skips_without_niches(self):
        result = run_stage("valuation", {"domain": "test.com"})
        assert result.ok is True
        assert result.output.get("skipped") is True

    def test_produces_intrinsic_valuation(self):
        result = run_stage("valuation", {
            "domain": "techsolutions.com",
            "niches": [{"name": "SaaS tools", "monetization_model": "saas", "score": 8.0}],
        })
        assert result.ok is True
        assert "domain_intrinsic" in result.output
        intrinsic = result.output["domain_intrinsic"]
        assert intrinsic["tld"] == ".com"
        assert intrinsic["domain_only_value"] > 0
        assert intrinsic["name_part"] == "techsolutions"
        assert intrinsic["length"] > 0
        assert isinstance(intrinsic["tld_multiplier"], float)

    def test_produces_revenue_models_with_net_profit(self):
        result = run_stage("valuation", {
            "domain": "techsolutions.com",
            "niches": [{"name": "SaaS tools", "monetization_model": "saas", "score": 8.0}],
        })
        nv = result.output["niche_valuations"][0]
        assert len(nv["revenue_by_model"]) >= 1
        primary = nv["revenue_by_model"][0]
        assert primary["is_primary_fit"] is True
        assert primary["model_key"] == "saas"
        for model in nv["revenue_by_model"]:
            mr = model["monthly_revenue"]
            assert "net_profit_low" in mr
            assert "net_profit_high" in mr
            assert "gross_low" in mr
            assert "gross_high" in mr
            assert mr["gross_high"] >= mr["gross_low"]
            assert "fulfillment_cost_low" in mr
            assert "fulfillment_cost_high" in mr
            assert isinstance(mr["net_profit_low"], int)
            assert isinstance(mr["net_profit_high"], int)

    def test_produces_best_developed_value(self):
        result = run_stage("valuation", {
            "domain": "techsolutions.com",
            "niches": [{"name": "SaaS tools", "monetization_model": "saas", "score": 8.0}],
        })
        assert result.output["best_developed_value"] > 0
        assert result.output["best_monthly_net"] > 0


class TestValidatorStageOutput:
    """Validator stage must produce validation report with scores, errors, repairs."""

    def test_validates_empty_site_copy(self):
        result = run_stage("validator", {"site_copy": {}, "brand": {}})
        assert result.ok is True
        report = result.output
        assert "valid" in report
        assert "score" in report
        assert "error_count" in report

    def test_validates_partial_site_copy(self):
        result = run_stage("validator", {
            "site_copy": {"headline": "Welcome to Our Platform"},
            "brand": {"name": "TestBrand"},
        })
        assert result.ok is True
        assert result.output["score"] >= 0


class TestContextStageOutput:
    """Context stage must produce domain context string for AI prompt injection."""

    def test_produces_context_with_domain(self):
        result = run_stage("context", {"domain": "test.com"})
        assert result.ok is True
        assert isinstance(result.output, dict)
        assert result.output["domain"] == "test.com"
        assert "context_text" in result.output
        assert "test.com" in result.output["context_text"]

    def test_context_text_is_nonempty_string(self):
        result = run_stage("context", {"domain": "example.com"})
        assert isinstance(result.output["context_text"], str)
        assert len(result.output["context_text"]) > 10


class TestStageErrorPropagation:
    def test_unknown_stage_fails_gracefully(self):
        result = run_stage("nonexistent", {})
        assert result.ok is False
        assert len(result.errors) > 0
        assert "Unknown stage" in result.errors[0]

    def test_stage_returns_timing_even_on_success(self):
        result = run_stage("blueprint", {"depth": "minimal"})
        assert result.elapsed_ms >= 0

    def test_stage_returns_timing_even_on_failure(self):
        result = run_stage("nonexistent", {})
        assert result.elapsed_ms >= 0


class TestValidateStageOutput:
    def test_valid_blueprint_output_has_sections(self):
        result = run_stage("blueprint", {"depth": "minimal"})
        is_valid, errors = validate_stage_output("blueprint", result.output)
        assert is_valid is True
        assert len(errors) == 0

    def test_none_output_invalid(self):
        is_valid, errors = validate_stage_output("blueprint", None)
        assert is_valid is False
        assert len(errors) > 0


class TestAssemblePackage:
    def test_assembles_blueprint_and_theme_outputs(self):
        bp_result = run_stage("blueprint", {"depth": "minimal"})
        theme_result = run_stage("theme", {
            "primary_color": "#4F46E5",
            "secondary_color": "#7C3AED",
            "accent_color": "#06B6D4",
            "mood": "professional",
            "niche": "technology",
        })
        package = assemble_package(
            domain="test.com",
            stage_results={"blueprint": bp_result, "theme": theme_result},
        )
        assert package["domain"] == "test.com"
        assert "blueprint" in package
        assert "theme" in package
        assert "sections" in package["blueprint"]["blueprint"]
        assert "colors" in package["theme"]
        assert len(package["stages_completed"]) == 2

    def test_assembled_blueprint_contains_prompt_spec(self):
        bp_result = run_stage("blueprint", {"depth": "comprehensive"})
        package = assemble_package(
            domain="test.com",
            stage_results={"blueprint": bp_result},
        )
        assert "prompt_spec" in package["blueprint"]
        assert "json_schema" in package["blueprint"]
        assert len(package["blueprint"]["prompt_spec"]) > 200

    def test_assembled_theme_contains_css_variables(self):
        theme_result = run_stage("theme", {
            "primary_color": "#4F46E5",
            "mood": "professional",
            "niche": "tech",
        })
        package = assemble_package(
            domain="test.com",
            stage_results={"theme": theme_result},
        )
        colors = package["theme"]["colors"]
        assert colors["primary"].startswith("#")
        assert "," in colors["primary_rgb"]

    def test_empty_results_still_valid(self):
        package = assemble_package(domain="test.com", stage_results={})
        assert package["domain"] == "test.com"
        assert len(package["stages_completed"]) == 0

    def test_includes_total_timing(self):
        bp_result = run_stage("blueprint", {"depth": "minimal"})
        package = assemble_package(
            domain="test.com",
            stage_results={"blueprint": bp_result},
        )
        assert "total_elapsed_ms" in package
        assert package["total_elapsed_ms"] > 0

    def test_failed_stage_tracked_separately(self):
        good = run_stage("blueprint", {"depth": "minimal"})
        bad = run_stage("nonexistent", {})
        package = assemble_package(
            domain="test.com",
            stage_results={"blueprint": good, "bad_stage": bad},
        )
        assert "blueprint" in package["stages_completed"]
        assert "bad_stage" in package["stages_failed"]
        assert "blueprint" in package
        assert "bad_stage" not in package


class TestOrchestratorResult:
    def test_result_structure(self):
        result = OrchestratorResult(
            domain="test.com",
            success=True,
            stages_completed=["blueprint", "theme"],
            stages_failed=[],
            package={"domain": "test.com"},
            total_elapsed_ms=42.0,
        )
        assert result.success is True
        assert len(result.stages_completed) == 2
        assert result.total_elapsed_ms == 42.0

    def test_failed_result(self):
        result = OrchestratorResult(
            domain="test.com",
            success=False,
            stages_completed=["blueprint"],
            stages_failed=["theme"],
            package={},
            total_elapsed_ms=10.0,
        )
        assert result.success is False
        assert "theme" in result.stages_failed
