#!/usr/bin/env python3
"""
ICAC v1.0 — Intelligence City Administration Console
Backend FastAPI — Port 7778
"""

import os
import json
import hashlib
import re
import time
import asyncio
import logging
from datetime import datetime
from pathlib import Path
from typing import Optional

# Load .env before anything reads os.environ
from dotenv import load_dotenv
load_dotenv(Path(__file__).parent / ".env")

from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from pydantic import BaseModel

from agents_python.synthesizer import Synthesizer
from agents_python.commune_data import get_commune_data
from agents_python.data_fetcher import DataFetcher, fetch_association_rna, fetch_web_fallback, fetch_for_query
from agents_python.csv_subventions import search_subventions
from agents_python.audit_mandats import audit_from_rag
from agents_python.rag_engine import RAGEngine
from agents_python.scraper_besseges import (
    search_association, list_associations, scrape_page
)

# --- CONFIG ---
PORT = 7778
COMMUNE_INSEE = os.environ.get("COMMUNE_INSEE", "30034")
CACHE_DIR = Path(__file__).parent / "cache"
DATA_DIR = Path(__file__).parent / "data"
CONFIG_DIR = Path(__file__).parent / "config"
DOCS_DIR = DATA_DIR / "documents"

CACHE_DIR.mkdir(exist_ok=True)
DOCS_DIR.mkdir(parents=True, exist_ok=True)

# --- LOGGING ---
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s [%(levelname)s] %(message)s",
    datefmt="%H:%M:%S"
)
log = logging.getLogger("icac")

# --- Fn MENUS (mirror of frontend) ---
FN_MENUS = {
    "F1": {
        "category": "subventions",
        "label": "SUBVENTIONS & FINANCEMENTS",
        "questions": [
            "Quelles subventions sont disponibles pour la rénovation de bâtiments publics ?",
            "Quels appels à projets DETR sont ouverts pour notre commune ?",
            "Existe-t-il des fonds européens accessibles pour notre projet de voirie ?",
            "Quelles aides régionales sont disponibles pour le développement numérique ?",
            "Quel est le calendrier des dossiers DSIL pour 2026 ?"
        ]
    },
    "F2": {
        "category": "finances",
        "label": "FINANCES & BUDGET",
        "questions": [
            "Quelle est la situation financière globale de la commune ?",
            "Comment nos dépenses se comparent-elles aux communes de même strate ?",
            "Quel est notre taux d'endettement et notre capacité de désendettement ?",
            "Évolution de l'épargne brute sur les 5 dernières années ?",
            "Quels sont nos principaux postes de dépenses de fonctionnement ?"
        ]
    },
    "F3": {
        "category": "depenses",
        "label": "ANALYSE DES DÉPENSES",
        "questions": [
            "Comment ont été dépensés les 119 000€ de communication et relations publiques ?",
            "Quels fournisseurs ont été payés sur le chapitre 65 cette année ?",
            "Y a-t-il des paiements fractionnés sous le seuil des marchés publics ?",
            "Liste des mandats émis sur le poste fêtes et cérémonies en 2023 ?",
            "Comparer les dépenses de personnel avec la moyenne nationale de notre strate ?"
        ]
    },
    "F4": {
        "category": "associations",
        "label": "ASSOCIATIONS",
        "questions": [
            "Quelles associations sont actives sur la commune ?",
            "Quelles subventions municipales ont été versées aux associations cette année ?",
            "L'association X est-elle toujours déclarée en préfecture ?",
            "Quelles associations du secteur culturel sont présentes sur la commune ?",
            "Historique des subventions versées à une association sur 5 ans ?"
        ]
    },
    "F5": {
        "category": "entreprises",
        "label": "ENTREPRISES LOCALES",
        "questions": [
            "Quelles entreprises sont actives sur la commune ?",
            "Créations et fermetures d'entreprises sur les 12 derniers mois ?",
            "Quelles entreprises du BTP sont immatriculées dans un rayon de 20km ?",
            "Quels sont les principaux secteurs d'activité de la commune ?",
            "Évolution de l'emploi privé sur la commune sur 5 ans ?"
        ]
    },
    "F6": {
        "category": "deliberations",
        "label": "DÉLIBÉRATIONS & ARCHIVES",
        "questions": [
            "Quand a-t-on voté la dernière révision du PLU ?",
            "Quelle délibération autorise le maire à signer ce contrat ?",
            "Rechercher toutes les délibérations sur la thématique voirie en 2024 ?",
            "Historique des décisions de Conseil Municipal sur les marchés publics ?",
            "Quelles associations ont reçu des subventions par délibération cette année ?"
        ]
    },
    "F7": {
        "category": "marches",
        "label": "MARCHÉS PUBLICS",
        "questions": [
            "Quels marchés publics arrivent à échéance dans les 6 prochains mois ?",
            "Quels appels d'offres publics correspondent à nos besoins actuels ?",
            "Quel est notre engagement total avec notre principal prestataire ?",
            "Obligations de publicité pour notre prochain marché de travaux ?",
            "Seuils actuels des marchés publics pour les communes ?"
        ]
    },
    "F8": {
        "category": "documents",
        "label": "DOCUMENTS UPLOADÉS",
        "questions": [
            "Résumer le dernier compte de gestion uploadé ?",
            "Extraire les données financières clés du budget primitif ?",
            "Analyser les factures du dernier trimestre ?",
            "Comparer le budget primitif avec le compte administratif ?",
            "Lister tous les documents disponibles dans la base ?"
        ]
    }
}

# --- REQUEST MODELS ---
class QueryRequest(BaseModel):
    query: str
    commune_insee: str = COMMUNE_INSEE

class FnRequest(BaseModel):
    fn: str          # "F1" to "F8"
    index: int       # 0-4
    commune_insee: str = COMMUNE_INSEE

# --- DETECT INTENT ---
def detect_intent(query: str) -> dict:
    """
    Analyse la requête et retourne les sources à activer.
    """
    q = query.lower()

    intent = {
        "need_associations":  False,
        "need_entreprises":   False,
        "need_subventions":   False,
        "need_marches":       False,
        "need_besseges_site": False,
        "need_jo":            False,
        "need_sirene":        False,
        "entity_name":        None,
        "entity_type":        None,
        "doc_type":           None,
    }

    # Associations
    assoc_keywords = [
        "association", "président", "presidente",
        "bureau", "siège", "club", "comité",
        "jardins", "perséphone", "loi 1901",
        "activité associative", "adhérent", "bénévole",
        "vie associative", "dirige", "préside",
        "félin", "maison",
    ]
    if any(k in q for k in assoc_keywords):
        intent["need_associations"] = True
        intent["need_jo"] = True
        intent["need_besseges_site"] = True

    # Entreprises
    if any(k in q for k in [
        "entreprise", "société", "siren", "siret",
        "commerce", "artisan", "gérant", "employeur"
    ]):
        intent["need_entreprises"] = True
        intent["need_sirene"] = True

    # Subventions
    if any(k in q for k in [
        "subvention", "financement", "aide", "detr",
        "dsil", "dotation", "fonds", "appel à projet",
        "subventions 2025", "subventions aux associations",
    ]):
        intent["need_subventions"] = True

    # Marchés publics
    if any(k in q for k in [
        "marché", "appel d'offre", "boamp",
        "prestataire", "attributaire", "lot"
    ]):
        intent["need_marches"] = True

    # Site de la mairie
    if any(k in q for k in [
        "mairie", "conseil municipal", "délibération",
        "bulletin", "arrêté", "maire", "élu", "conseiller"
    ]):
        intent["need_besseges_site"] = True

    # Extraction nom propre (entité spécifique)
    proper_nouns = re.findall(
        r'\b[A-ZÀÂÉÈÊËÎÏÔÙÛÜÇ][a-zàâéèêëîïôùûüç]+(?:\s+'
        r'[A-ZÀÂÉÈÊËÎÏÔÙÛÜÇ][a-zàâéèêëîïôùûüç]+)*\b',
        query
    )
    stopwords = {
        "Quels", "Quelle", "Quelles", "Comment",
        "Combien", "Donner", "Donne", "Trouver",
        "Bessèges", "Commune", "Mairie", "Quel",
        "Liste", "Rechercher", "Historique",
        "Qui", "Est", "Les", "Des", "Sur",
    }
    entities = [n for n in proper_nouns if n not in stopwords]
    if entities:
        intent["entity_name"] = entities[0]
        if intent["need_associations"]:
            intent["entity_type"] = "association"
        elif intent["need_entreprises"]:
            intent["entity_type"] = "entreprise"
        else:
            intent["entity_type"] = "personne"

    # --- Routing doc_type pour le RAG ---
    if any(k in q for k in [
        "dépense", "dépensé", "recette", "chapitre", "article",
        "compte", "gestion", "bp", "budget primitif",
        "résultat", "excédent", "déficit", "m14",
        "mandatement", "titre", "régie",
        "communication", "fournisseur", "mandat",
        "fractionn", "prestataire",
    ]):
        intent["doc_type"] = "compte_gestion"
    elif any(k in q for k in [
        "délibération", "vote", "unanimité",
        "séance", "pv", "procès verbal", "ordre du jour"
    ]):
        intent["doc_type"] = "pv_conseil"
    elif any(k in q for k in [
        "budget prévisionnel", "budget voté",
        "investissement prévu", "fonctionnement prévu"
    ]):
        intent["doc_type"] = "budget_primitif"
    elif any(k in q for k in [
        "bulletin", "info municipale", "magazine"
    ]):
        intent["doc_type"] = "bulletin_municipal"
    elif intent["need_marches"]:
        intent["doc_type"] = "marche_public"

    return intent


async def enrich_with_scraping(query: str, intent: dict, commune_insee: str) -> tuple:
    """
    Effectue le scraping temps réel selon l'intention détectée.
    Retourne (extra_sources: list, media_result: dict|None).
    """
    extra_sources = []
    media_result = None

    # Associations → besseges.fr + RNA
    if intent["need_associations"]:
        entity = intent.get("entity_name")

        if entity:
            # Cherche l'association spécifique
            asso_data = await search_association(entity)

            if asso_data["found"]:
                extra_sources.append({
                    "domain": "besseges.fr",
                    "text":   asso_data["text"],
                    "url":    asso_data["url"]
                })
                if asso_data.get("photo_url"):
                    media_result = {
                        "type":    "photo",
                        "url":     asso_data["photo_url"],
                        "caption": entity
                    }

            # RNA
            rna_data = await fetch_association_rna(entity, commune_insee)
            if rna_data["found"]:
                extra_sources.append({
                    "domain": "RNA/JO",
                    "text": (
                        f"Association {rna_data['name']} "
                        f"RNA: {rna_data['rna']} "
                        f"Adresse: {rna_data['adresse']} "
                        f"Créée: {rna_data['date_creation']}"
                    )
                })
        else:
            # Liste générale des associations
            page_text = await scrape_page(
                "https://www.besseges.fr/associations/"
            )
            if page_text:
                extra_sources.append({
                    "domain": "besseges.fr",
                    "text":   page_text
                })

    # Site mairie pour tout sujet local
    if intent["need_besseges_site"] and not intent["need_associations"]:
        page = await scrape_page("https://www.besseges.fr")
        if page:
            extra_sources.append({
                "domain": "besseges.fr/actualites",
                "text":   page[:2000]
            })

    return extra_sources, media_result


# --- EXTRACT MEDIA ---
def _extract_media(sources: list) -> Optional[dict]:
    """Extrait une photo si une source en contient une."""
    for s in sources:
        if s.get("photo_url"):
            return {
                "type":    "photo",
                "url":     s["photo_url"],
                "caption": s.get("entity", "")
            }
    return None


# --- CACHE ---
def cache_key(query: str, insee: str) -> str:
    raw = f"{query}::{insee}".lower().strip()
    return hashlib.md5(raw.encode()).hexdigest()

def get_cached(key: str, max_age: int = 3600) -> Optional[dict]:
    path = CACHE_DIR / f"{key}.json"
    if not path.exists():
        return None
    try:
        data = json.loads(path.read_text())
        if time.time() - data.get("ts", 0) > max_age:
            return None
        return data
    except Exception:
        return None

def set_cache(key: str, result: dict):
    data = {"ts": time.time(), **result}
    path = CACHE_DIR / f"{key}.json"
    path.write_text(json.dumps(data, ensure_ascii=False, indent=2))

# --- APP ---
app = FastAPI(
    title="ICAC API",
    version="1.0",
    description="Intelligence City Administration Console — Backend"
)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# --- SINGLETONS ---
synthesizer: Optional[Synthesizer] = None
data_fetcher: Optional[DataFetcher] = None
rag_engine: Optional[RAGEngine] = None

# --- REQUEST QUEUE ---
request_queue = asyncio.Queue(maxsize=50)
processing = False

@app.on_event("startup")
async def startup():
    global synthesizer, data_fetcher, rag_engine
    log.info("ICAC Backend starting — port %d", PORT)
    log.info("Commune INSEE: %s", COMMUNE_INSEE)

    synthesizer = Synthesizer()
    data_fetcher = DataFetcher()
    rag_engine = RAGEngine(
        docs_dir=str(DOCS_DIR),
        chroma_dir=str(DATA_DIR / "chroma")
    )

    log.info("Synthesizer ready (Groq: %s, Ollama fallback: %s)",
             "OK" if synthesizer.groq_available else "NO KEY",
             synthesizer.ollama_url)
    log.info("RAG Engine: %s", rag_engine.get_stats())
    log.info("ICAC Backend ready.")

# --- ROUTES ---

@app.get("/api/status")
async def status():
    """État du système et des connexions."""
    docs_count = len(list(DOCS_DIR.glob("*.pdf")))
    return {
        "status": "online",
        "version": "1.0",
        "commune_insee": COMMUNE_INSEE,
        "groq_available": synthesizer.groq_available if synthesizer else False,
        "ollama_url": synthesizer.ollama_url if synthesizer else None,
        "docs_indexed": docs_count,
        "sources": [
            {"name": "data.gouv.fr", "status": "configured"},
            {"name": "sirene", "status": "configured"},
            {"name": "aides-territoires", "status": "configured"},
            {"name": "jo-associations", "status": "configured"},
            {"name": "boamp", "status": "configured"},
        ],
        "cache_entries": len(list(CACHE_DIR.glob("*.json"))),
        "timestamp": datetime.now().isoformat()
    }

@app.get("/api/commune/{insee}")
async def commune_data(insee: str):
    """Données statiques + dynamiques de la commune."""
    data = get_commune_data(insee)
    if not data:
        raise HTTPException(status_code=404, detail=f"Commune {insee} non trouvée")
    return data

@app.post("/api/query")
async def query(req: QueryRequest):
    """Requête libre — pipeline v2.2 : audit mandats + subventions + propositions."""
    log.info("QUERY [%s]: %s", req.commune_insee, req.query[:80])

    # Check cache
    ck = cache_key(req.query, req.commune_insee)
    cached = get_cached(ck)
    if cached:
        log.info("Cache hit: %s", ck[:8])
        return {"source": "cache", "lines": cached["lines"], "synth": cached["synth"],
                "media": cached.get("media")}

    # --- Détection d'intention (pour RAG ciblé) ---
    intent = detect_intent(req.query)
    log.info("Intent: %s", intent)

    # Determine category from query
    category = synthesizer.detect_category(req.query)

    # --- Config commune pour fetch_for_query ---
    commune_cfg = {
        "insee": req.commune_insee,
        "nom_court": "Bessèges",
        "cp": "30160",
        "departement": "30",
    }

    # Fetch context data (API structurées)
    context = ""
    try:
        context = await data_fetcher.get_context(req.query, req.commune_insee, category)
    except Exception as e:
        log.warning("Data fetch failed: %s", e)

    # --- ÉTAPE 1 : RAG local (PDFs, seuil 0.35) ---
    rag_ctx = ""
    rag_chunks = []
    if rag_engine and intent.get("doc_type"):
        rag_chunks = rag_engine.search(req.query, doc_type=intent["doc_type"])
        rag_ctx = rag_engine.get_context(req.query, doc_type=intent["doc_type"])
        log.info("RAG ciblé (%s): %dc", intent["doc_type"], len(rag_ctx))

    if not rag_ctx and rag_engine:
        rag_chunks = rag_engine.search(req.query)
        rag_ctx = rag_engine.get_context(req.query, doc_type=None)
        log.info("RAG global: %dc", len(rag_ctx))

    if rag_ctx:
        context = f"{context}\n\n{rag_ctx}" if context else rag_ctx

    # --- ÉTAPE 1b : Audit mandats (pour catégorie dépenses) ---
    if category == "depenses":
        try:
            audit_ctx = audit_from_rag(rag_chunks, req.query)
            if audit_ctx:
                context = f"{context}\n\n{audit_ctx}" if context else audit_ctx
                log.info("Audit mandats injecté: %dc", len(audit_ctx))
        except Exception as e:
            log.warning("Audit mandats failed: %s", e)

    # --- ÉTAPE 2 : Fouille web SYSTÉMATIQUE — TOUJOURS ---
    rag_empty = len(rag_ctx.strip()) == 0
    web_sources = []
    try:
        web_sources = await fetch_for_query(req.query, commune_cfg)
    except Exception as e:
        log.warning("fetch_for_query failed: %s", e)

    # --- ÉTAPE 3 : Photo si disponible ---
    media_result = _extract_media(web_sources)

    # Ajouter les sources web au contexte
    if web_sources:
        scraping_ctx = "\n\n".join([
            f"[Source: {s['domain']}]\n{s['text']}"
            for s in web_sources if s.get("text")
        ])
        context = f"{context}\n\n{scraping_ctx}" if context else scraping_ctx

    # --- ÉTAPE 4 : Synthèse Groq (tout le contexte) ---
    try:
        result = await synthesizer.synthesize(
            query=req.query,
            category=category,
            commune_insee=req.commune_insee,
            context=context,
            rag_empty=rag_empty
        )
    except Exception as e:
        log.error("Synthesis failed: %s", e)
        result = {
            "lines": [
                {"t": "icac", "h": "ERREUR — Synthèse indisponible."},
                {"t": "dim", "h": str(e)[:80]}
            ],
            "synth": f"Erreur de synthèse:\n{e}"
        }

    # Enrichir le résultat
    if media_result:
        result["media"] = media_result
    result["intent"] = intent
    result["sources"] = (
        ["Documents locaux"] if rag_ctx else []
    ) + [s["domain"] for s in web_sources]
    result["sources_scraping"] = [s.get("domain") for s in web_sources]

    # Propositions (v2.2)
    if "propositions" not in result:
        result["propositions"] = []

    # Cache result
    set_cache(ck, result)

    return {"source": "live", **result}

@app.post("/api/fn")
async def fn_query(req: FnRequest):
    """Question prête depuis panneau Fn — pipeline v2.2."""
    menu = FN_MENUS.get(req.fn)
    if not menu:
        raise HTTPException(status_code=400, detail=f"Menu {req.fn} inconnu")
    if req.index < 0 or req.index >= len(menu["questions"]):
        raise HTTPException(status_code=400, detail=f"Index {req.index} hors limite")

    question = menu["questions"][req.index]
    category = menu["category"]

    log.info("FN [%s][%d]: %s", req.fn, req.index, question[:60])

    # Check cache
    ck = cache_key(question, req.commune_insee)
    cached = get_cached(ck)
    if cached:
        log.info("Cache hit: %s", ck[:8])
        return {"source": "cache", "question": question, "lines": cached["lines"],
                "synth": cached["synth"], "media": cached.get("media")}

    # --- Config commune ---
    commune_cfg = {
        "insee": req.commune_insee,
        "nom_court": "Bessèges",
        "cp": "30160",
        "departement": "30",
    }

    # --- Détection d'intention (pour RAG ciblé) ---
    intent = detect_intent(question)
    log.info("Intent: %s", intent)

    # Fetch context (API structurées)
    context = ""
    try:
        context = await data_fetcher.get_context(question, req.commune_insee, category)
    except Exception as e:
        log.warning("Data fetch failed: %s", e)

    # --- ÉTAPE 1 : RAG local ---
    rag_ctx = ""
    rag_chunks = []
    if rag_engine and intent.get("doc_type"):
        rag_chunks = rag_engine.search(question, doc_type=intent["doc_type"])
        rag_ctx = rag_engine.get_context(question, doc_type=intent["doc_type"])
        log.info("RAG ciblé (%s): %dc", intent["doc_type"], len(rag_ctx))

    if not rag_ctx and rag_engine:
        rag_chunks = rag_engine.search(question)
        rag_ctx = rag_engine.get_context(question, doc_type=None)
        log.info("RAG global: %dc", len(rag_ctx))

    if rag_ctx:
        context = f"{context}\n\n{rag_ctx}" if context else rag_ctx

    # --- ÉTAPE 1b : Audit mandats (pour catégorie dépenses) ---
    if category == "depenses":
        try:
            audit_ctx = audit_from_rag(rag_chunks, question)
            if audit_ctx:
                context = f"{context}\n\n{audit_ctx}" if context else audit_ctx
                log.info("Audit mandats injecté: %dc", len(audit_ctx))
        except Exception as e:
            log.warning("Audit mandats failed: %s", e)

    # --- ÉTAPE 2 : Fouille web SYSTÉMATIQUE — TOUJOURS ---
    rag_empty = len(rag_ctx.strip()) == 0
    web_sources = []
    try:
        web_sources = await fetch_for_query(question, commune_cfg)
    except Exception as e:
        log.warning("fetch_for_query failed: %s", e)

    # --- ÉTAPE 3 : Photo si disponible ---
    media_result = _extract_media(web_sources)

    if web_sources:
        scraping_ctx = "\n\n".join([
            f"[Source: {s['domain']}]\n{s['text']}"
            for s in web_sources if s.get("text")
        ])
        context = f"{context}\n\n{scraping_ctx}" if context else scraping_ctx

    # --- ÉTAPE 4 : Synthèse Groq ---
    try:
        result = await synthesizer.synthesize(
            query=question,
            category=category,
            commune_insee=req.commune_insee,
            context=context,
            rag_empty=rag_empty
        )
    except Exception as e:
        log.error("Synthesis failed: %s", e)
        result = {
            "lines": [{"t": "icac", "h": "ERREUR — Synthèse indisponible."}, {"t": "dim", "h": str(e)[:80]}],
            "synth": f"Erreur de synthèse:\n{e}"
        }

    if media_result:
        result["media"] = media_result
    result["intent"] = intent
    result["sources"] = (
        ["Documents locaux"] if rag_ctx else []
    ) + [s["domain"] for s in web_sources]

    # Propositions (v2.2)
    if "propositions" not in result:
        result["propositions"] = []

    set_cache(ck, result)
    return {"source": "live", "question": question, **result}

@app.get("/api/docs")
async def list_docs():
    """Liste des documents indexés."""
    docs = []
    for f in sorted(DOCS_DIR.glob("*.pdf")):
        stat = f.stat()
        docs.append({
            "name": f.name,
            "size": stat.st_size,
            "modified": datetime.fromtimestamp(stat.st_mtime).isoformat()
        })
    return {"count": len(docs), "documents": docs}

@app.post("/api/upload")
async def upload_doc(file: UploadFile = File(...)):
    """Upload un document PDF."""
    if not file.filename.lower().endswith(".pdf"):
        raise HTTPException(status_code=400, detail="Seuls les fichiers PDF sont acceptés")

    dest = DOCS_DIR / file.filename
    content = await file.read()
    dest.write_bytes(content)

    log.info("Document uploadé: %s (%d octets)", file.filename, len(content))

    # Index in RAG engine
    chunks = 0
    if rag_engine:
        try:
            chunks = rag_engine.index_document(str(dest))
            log.info("RAG indexed %s: %d chunks", file.filename, chunks)
        except Exception as e:
            log.warning("RAG indexation failed for %s: %s", file.filename, e)

    return {
        "status": "uploaded",
        "filename": file.filename,
        "size": len(content),
        "indexed": chunks > 0,
        "chunks": chunks,
        "message": f"Document uploadé et indexé ({chunks} chunks)." if chunks else "Document uploadé (RAG non disponible)."
    }

@app.get("/api/rag/stats")
async def rag_stats():
    """Stats du moteur RAG."""
    if not rag_engine:
        return {"initialized": False}
    return rag_engine.get_stats()

@app.post("/api/rag/index-all")
async def rag_index_all():
    """Indexer tous les PDFs du dossier documents."""
    if not rag_engine:
        raise HTTPException(status_code=503, detail="RAG engine not available")
    result = rag_engine.index_all_documents()
    return result

# --- SERVE STATIC (icac_shell.html) ---
@app.get("/")
async def serve_shell():
    html = Path(__file__).parent / "icac_shell.html"
    if html.exists():
        return FileResponse(html)
    return {"message": "ICAC API running", "docs": "/docs"}

# --- MAIN ---
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=PORT, log_level="info")
