import json
import logging
import datetime
from typing import Optional

from app.services.context_engine import assemble_context, get_full_context_for_domain
from app.services.llm import call_llm_text

logger = logging.getLogger(__name__)

CONCEPT_STATUSES = ["captured", "analyzed", "planned", "combined", "staged", "implemented", "parked"]

ADVISOR_ANALYZE_STEPS = [
    {"key": "init", "label": "Preparing analysis", "description": "Loading concept and project context", "est_seconds": 2},
    {"key": "context", "label": "Assembling context", "description": "Gathering roadmap state, tasks, and project data", "est_seconds": 3},
    {"key": "ai_analysis", "label": "AI analyzing concept", "description": "Classifying NEW vs LEGACY, identifying dependencies and scope", "est_seconds": 20},
    {"key": "complete", "label": "Analysis complete", "description": "Concept analyzed with classification and recommendations", "est_seconds": 0},
]

ADVISOR_PLAN_STEPS = [
    {"key": "init", "label": "Preparing plan", "description": "Loading concept analysis and project state", "est_seconds": 2},
    {"key": "context", "label": "Assembling context", "description": "Gathering full project context for planning", "est_seconds": 3},
    {"key": "ai_plan", "label": "AI generating plan", "description": "Creating staged implementation plan with tasks, risks, and tests", "est_seconds": 30},
    {"key": "complete", "label": "Plan complete", "description": "Implementation plan generated with staged tasks", "est_seconds": 0},
]

ADVISOR_BATCH_STEPS = [
    {"key": "init", "label": "Preparing batch", "description": "Loading all selected concepts", "est_seconds": 2},
    {"key": "context", "label": "Assembling context", "description": "Merging concept analyses and project state", "est_seconds": 3},
    {"key": "ai_batch", "label": "AI synthesizing batch plan", "description": "Creating parallel tracks, dependency graph, and staged execution plan", "est_seconds": 40},
    {"key": "complete", "label": "Batch plan complete", "description": "Unified implementation plan with parallel processing tracks", "est_seconds": 0},
]


def build_advisor_context(db_session) -> dict:
    from app.models import RoadmapItem, BuildTask, Package, Domain

    roadmap_items = db_session.query(RoadmapItem).all()
    roadmap_state = {
        "planned": [],
        "in_progress": [],
        "done": [],
    }
    for item in roadmap_items:
        entry = {"id": item.id, "title": item.title, "description": item.description or "", "category": item.category or ""}
        if item.status == "planned":
            roadmap_state["planned"].append(entry)
        elif item.status == "in_progress":
            roadmap_state["in_progress"].append(entry)
        elif item.status == "done":
            entry["completed_at"] = item.completed_at.isoformat() if item.completed_at else None
            roadmap_state["done"].append(entry)

    tasks = db_session.query(BuildTask).all()
    task_state = {}
    for t in tasks:
        if t.list_name not in task_state:
            task_state[t.list_name] = []
        task_state[t.list_name].append({
            "title": t.title,
            "status": t.status,
            "category": t.category or "",
        })

    domains = db_session.query(Domain).all()
    domain_state = []
    for d in domains:
        pkg_count = db_session.query(Package).filter(Package.domain_id == d.id).count()
        domain_state.append({
            "domain": d.domain,
            "analyzed": d.analyzed_at is not None,
            "packages": pkg_count,
        })

    return {
        "roadmap": roadmap_state,
        "tasks": task_state,
        "domains": domain_state,
        "timestamp": datetime.datetime.utcnow().isoformat() + "Z",
    }


def build_analysis_prompt(concept_title: str, concept_input: str, advisor_context: dict, project_context: str = "") -> str:
    prompt = f"""You are Aura Advisor, an AI architectural analyst for the Aura platform — an AI-powered domain-to-business generator.

--- CONTEXT DOCTRINE ---
You must use ALL provided context to inform your analysis. Context is the active ingredient, not background. The most expensive analysis is the one that ignores available data.

--- CURRENT PROJECT STATE ---
ROADMAP:
- Planned ({len(advisor_context.get('roadmap', {}).get('planned', []))} items): {json.dumps(advisor_context.get('roadmap', {}).get('planned', []), indent=None)}
- In Progress ({len(advisor_context.get('roadmap', {}).get('in_progress', []))} items): {json.dumps(advisor_context.get('roadmap', {}).get('in_progress', []), indent=None)}
- Done ({len(advisor_context.get('roadmap', {}).get('done', []))} items): {json.dumps(advisor_context.get('roadmap', {}).get('done', []), indent=None)}

ACTIVE DOMAINS: {json.dumps(advisor_context.get('domains', []), indent=None)}

TASK LISTS: {json.dumps(advisor_context.get('tasks', {}), indent=None)}

{f"--- DOMAIN-SPECIFIC CONTEXT ---{chr(10)}{project_context}" if project_context else ""}

--- CONCEPT TO ANALYZE ---
Title: {concept_title}
Description: {concept_input}

--- YOUR TASK ---
Analyze this concept and produce a structured JSON response with these fields:
{{
    "classification": "NEW" or "LEGACY" (NEW = entirely new behavior, LEGACY = changes to existing behavior),
    "classification_reasoning": "Why this is NEW or LEGACY",
    "scope": "small" | "medium" | "large" | "epic",
    "scope_estimate": "Human-readable estimate of effort",
    "dependencies": ["List of existing systems/features this depends on or affects"],
    "risks": ["Potential risks or things that could go wrong"],
    "prerequisites": ["Things that must exist or be done first"],
    "related_roadmap_items": ["Titles of related roadmap items already tracked"],
    "suggested_approach": "High-level approach recommendation",
    "key_questions": ["Questions that should be answered before implementation"],
    "impact_areas": ["Parts of the codebase or user experience this would affect"]
}}

Respond with ONLY valid JSON."""
    return prompt


def build_plan_prompt(concept_title: str, concept_input: str, analysis: dict, advisor_context: dict, project_context: str = "") -> str:
    prompt = f"""You are Aura Advisor, an AI implementation planner for the Aura platform — an AI-powered domain-to-business generator.

--- CONTEXT DOCTRINE ---
You must use ALL provided context. Full structured context in every prompt. No assumptions — use what's provided.

--- DEVELOPMENT PROTOCOL ---
- For NEW behavior: strict TDD — write failing test first, then minimal code to pass, then refactor.
- For LEGACY behavior: characterize first — add tests capturing current behavior, then evolve.
- Always assume this code is serving production content.
- Prefer small, composable functions over large rewrites.

--- CURRENT PROJECT STATE ---
ROADMAP:
- Planned: {json.dumps(advisor_context.get('roadmap', {}).get('planned', []), indent=None)}
- In Progress: {json.dumps(advisor_context.get('roadmap', {}).get('in_progress', []), indent=None)}
- Done: {json.dumps(advisor_context.get('roadmap', {}).get('done', []), indent=None)}

TASK LISTS: {json.dumps(advisor_context.get('tasks', {}), indent=None)}

{f"--- DOMAIN-SPECIFIC CONTEXT ---{chr(10)}{project_context}" if project_context else ""}

--- CONCEPT ---
Title: {concept_title}
Description: {concept_input}

--- PRIOR ANALYSIS ---
{json.dumps(analysis, indent=2)}

--- YOUR TASK ---
Generate a detailed implementation plan as structured JSON:
{{
    "plan_title": "Descriptive title for this implementation plan",
    "classification": "{analysis.get('classification', 'NEW')}",
    "phases": [
        {{
            "phase_number": 1,
            "title": "Phase title",
            "description": "What this phase accomplishes",
            "tasks": [
                {{
                    "title": "Task title",
                    "description": "What to do",
                    "type": "test" | "implementation" | "refactor" | "config",
                    "files_affected": ["list of files"],
                    "estimated_effort": "small" | "medium" | "large"
                }}
            ]
        }}
    ],
    "test_requirements": [
        {{
            "test_name": "Name of test",
            "test_type": "unit" | "integration" | "e2e",
            "what_it_validates": "Description of what this test proves",
            "is_characterization": false
        }}
    ],
    "risks_and_mitigations": [
        {{
            "risk": "What could go wrong",
            "severity": "low" | "medium" | "high",
            "mitigation": "How to prevent or handle it"
        }}
    ],
    "success_criteria": ["List of things that must be true when this is done"],
    "estimated_total_effort": "Human-readable total estimate"
}}

Respond with ONLY valid JSON."""
    return prompt


def build_batch_prompt(concepts: list, analyses: dict, advisor_context: dict) -> str:
    concept_summaries = []
    for c in concepts:
        analysis = analyses.get(c["id"], {})
        concept_summaries.append({
            "id": c["id"],
            "title": c["title"],
            "description": c["raw_input"][:500],
            "classification": analysis.get("classification", "unknown"),
            "scope": analysis.get("scope", "unknown"),
            "dependencies": analysis.get("dependencies", []),
        })

    prompt = f"""You are Aura Advisor, an AI batch planner for the Aura platform.

--- CONTEXT DOCTRINE ---
Use ALL provided context. Identify shared infrastructure, parallel tracks, and sequential dependencies across concepts.

--- CURRENT PROJECT STATE ---
ROADMAP: {json.dumps(advisor_context.get('roadmap', {}), indent=None)}
TASKS: {json.dumps(advisor_context.get('tasks', {}), indent=None)}

--- CONCEPTS TO BATCH ---
{json.dumps(concept_summaries, indent=2)}

--- YOUR TASK ---
Synthesize these concepts into a single staged implementation plan with parallel processing where possible. Return structured JSON:
{{
    "batch_title": "Descriptive title for this batch plan",
    "parallel_tracks": [
        {{
            "track_name": "Name of parallel track",
            "description": "What this track accomplishes",
            "concept_ids": [1, 2],
            "can_run_parallel_with": ["other track names"],
            "phases": [
                {{
                    "phase_number": 1,
                    "title": "Phase title",
                    "tasks": [
                        {{
                            "title": "Task title",
                            "concept_id": 1,
                            "estimated_effort": "small" | "medium" | "large"
                        }}
                    ]
                }}
            ]
        }}
    ],
    "dependency_graph": {{
        "sequential_dependencies": [
            {{"before": "track/concept name", "after": "track/concept name", "reason": "why"}}
        ],
        "shared_infrastructure": ["things multiple concepts need that should be built once"]
    }},
    "execution_order": ["Ordered list of what to build first, second, etc."],
    "total_estimated_effort": "Human-readable estimate",
    "risk_summary": ["Top risks across all concepts"]
}}

Respond with ONLY valid JSON."""
    return prompt
