#!/usr/bin/env python3
"""
[CYBER-STRAT] Serveur local de developpement v1.2
Port 7777 — Pipeline multi-sources + synthese Ollama + geo-resolution
Usage : python3 server.py
"""

import http.server
import json
import re
import urllib.request
import urllib.parse
import os
import hashlib
import time
import sys
import concurrent.futures

PORT = int(os.getenv("PORT", "7777"))
HOST = os.getenv("HOST", "0.0.0.0")
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
PUBLIC_DIR = os.path.join(BASE_DIR, "public")
CACHE_DIR = os.path.join(BASE_DIR, "cache")
AGENTS_DIR = os.path.join(BASE_DIR, "agents_python")
CACHE_TTL = 86400  # 24h en secondes
MAX_SUMMARY_CHARS = 4500
WIKI_TEXT_BUDGET = 2500   # v4.8f — budget texte Wikipedia pour synthese
PRESS_TEXT_BUDGET = 8000  # v4.9f — budget texte presse/medias pour synthese (etait 3500)
MAX_IMAGES = 2
WIKI_MIN_CHARS = 200  # Seuil pour considerer Wikipedia comme source primaire
# Note : l'API REST Wikipedia /page/summary retourne des extraits courts (~200-400 chars)
# Le seuil original de 300 provoquait des faux negatifs (ex: Vladimir Poutine = 240 chars)

# Ajouter agents_python au path pour imports des modules
sys.path.insert(0, AGENTS_DIR)

# Import du module de synthese dedie (Groq prioritaire + Ollama fallback)
# Config Ollama dans ollama_config.py, config Groq dans synthesizer.py
from synthesizer import GeoSynthesizer
_synthesizer = GeoSynthesizer()

# Import du module de geo-resolution (v1.2 — Nominatim + Leaflet)
from geo_resolver import GeoResolver
_geo_resolver = GeoResolver()


def _strip_accents(text):
    """Retirer les accents d'un texte (ex: 'hervé' → 'herve').
    v4.6 — utilise dans filter_sources_by_person et autres filtres.
    """
    import unicodedata
    nfkd = unicodedata.normalize("NFKD", text)
    return "".join(c for c in nfkd if not unicodedata.combining(c))


# ── v4.8 — FONCTIONS WIKIPEDIA : PAGE DIRECTE OU ABANDON ─────────────

def _extract_lastname(query):
    """v4.8 — Extraire le nom de famille (compose ou simple).
    Gere les particules : Le, La, De, Du, Des, Van, Von, etc.
    Retourne en minuscule sans accents pour comparaison.

    Exemples :
    'Marie Le Boiteux' → 'le boiteux'
    'Fabrice Epelboin'  → 'epelboin'
    'Etienne Klein'     → 'klein'
    'Jean de La Fontaine' → 'de la fontaine'
    """
    _PARTICLES = {
        "le", "la", "de", "du", "des",
        "von", "van", "di", "el", "al", "ben", "ibn",
    }
    parts = query.strip().split()
    if len(parts) < 2:
        return _strip_accents(query.strip().lower())

    # Remonter depuis la fin : accumuler particules
    lastname_parts = [parts[-1]]
    for i in range(len(parts) - 2, 0, -1):
        if parts[i].lower() in _PARTICLES:
            lastname_parts.insert(0, parts[i])
        else:
            break

    return _strip_accents(" ".join(lastname_parts).lower())


def _build_name_variants(name):
    """v4.8 — Variantes du nom pour URL directe Wikipedia.
    Retourne une liste dedupliquee de slugs (underscores).

    'Fabrice Epelboin' → ['Fabrice_Epelboin', 'Fabrice_Epelboin' (ascii),
                           'Epelboin_Fabrice', ...]
    """
    variants = [name.replace(" ", "_")]

    ascii_name = _strip_accents(name)
    if ascii_name != name:
        variants.append(ascii_name.replace(" ", "_"))

    # Inversion Prenom / Nom
    parts = name.split()
    if len(parts) == 2:
        inverted = f"{parts[1]} {parts[0]}"
        variants.append(inverted.replace(" ", "_"))
        ascii_inv = _strip_accents(inverted)
        if ascii_inv != inverted:
            variants.append(ascii_inv.replace(" ", "_"))

    # Deduplication en preservant l'ordre
    seen = set()
    unique = []
    for v in variants:
        if v not in seen:
            seen.add(v)
            unique.append(v)
    return unique


def _is_contemporary(extract):
    """v4.8 — Retourne False si mort avant 1950 ou ne avant 1900.
    Analyse les 500 premiers caracteres de l'extrait Wikipedia.

    Note : utilise .*? (non-greedy) au lieu de [^0-9]* pour
    traverser les chiffres de jours/mois (ex: "mort le 22 sept 1897").
    """
    import re
    text = extract[:500]

    # Patterns deces — .*? traverse les chiffres de jours
    patterns_death = [
        r"d[ée]c[ée]d[ée]e?.*?(\d{4})",
        r"\bmort\b.*?(\d{4})",
        r"\bdied\b.*?(\d{4})",
        r"[†✝]\s*(\d{4})",
        r"\(\d{4}[-–]\s*(\d{4})\)",
    ]
    for p in patterns_death:
        m = re.search(p, text, re.IGNORECASE)
        if m and int(m.group(1)) < 1950:
            _log_static("INFO",
                f"[CONTEMPORARY] Deces {m.group(1)} < 1950 → rejet")
            return False

    # Patterns naissance — .*? traverse les chiffres de jours
    patterns_birth = [
        r"\bn[ée]e?\b.*?(\d{4})",
        r"\bborn\b.*?(\d{4})",
        r"\((\d{4})[-–]",
    ]
    for p in patterns_birth:
        m = re.search(p, text, re.IGNORECASE)
        if m and int(m.group(1)) < 1900:
            _log_static("INFO",
                f"[CONTEMPORARY] Naissance {m.group(1)} < 1900 → rejet")
            return False

    return True


def _page_matches_person(title, extract, query):
    """v4.8 — Fonction centrale : la page Wikipedia correspond-elle
    a la personne cherchee ?

    Remplace _is_relevant_wikipedia_result() (v4.5/v4.6).

    Regles par ordre de priorite :
    1. Organisation detectee dans l'extrait → rejete
    2. Nom de famille compose dans le titre → OK (si contemporain)
    3. Nom de famille simple dans le titre → OK (si contemporain)
    4. Sinon → rejete (trop risque)

    Cas concrets :
    - 'Fabrice Epelboin' vs 'Thinkerview' → regle 1 (org) → REJETE
    - 'Marie Le Boiteux' vs 'Gaston Boiteux' → regle 3
      mais mort 19e siecle → _is_contemporary() → REJETE
    - 'Etienne Klein' vs 'Etienne Klein' → regle 3 → ACCEPTE
    """
    title_ascii = _strip_accents(title.lower())
    extract_lower = extract.lower()

    # ── Regle 1 : Rejeter les organisations ─────────────────
    _ORG_SIGNALS = [
        "est une émission",
        "est une chaîne",
        "est un média",
        "est une association",
        "est une entreprise",
        "est une société",
        "est un parti",
        "est une organisation",
        "est un mouvement",
        "est un collectif",
        "est un groupe",
        "est un site web",
        "est un journal",
        "est une revue",
        "is a youtube channel",
        "is a television",
        "is a media",
        "is an organization",
        "is a company",
        "is a website",
        "is a newspaper",
    ]
    for signal in _ORG_SIGNALS:
        if signal in extract_lower[:300]:
            _log_static("WARN",
                f"[PAGE_MATCH] Organisation rejetee: "
                f"'{title}' (signal: '{signal}')")
            return False

    # ── Regle 2 : Nom de famille compose dans le titre ──────
    lastname = _extract_lastname(query)
    if lastname and len(lastname) > 4:
        if lastname in title_ascii:
            if _is_contemporary(extract):
                # v4.8e — Verifier conflit prenom avant d'accepter
                if _firstname_conflicts(title_ascii, query):
                    _log_static("WARN",
                        f"[PAGE_MATCH] '{query}' → '{title}': "
                        f"nom compose '{lastname}' present mais "
                        f"prenom different → REJETE")
                    return False
                _log_static("INFO",
                    f"[PAGE_MATCH] '{query}' → '{title}': "
                    f"nom compose '{lastname}' → ACCEPTE")
                return True
            _log_static("WARN",
                f"[PAGE_MATCH] '{query}' → '{title}': "
                f"'{lastname}' trouve mais non contemporain → REJETE")
            return False
        # v4.8c — Nom compose absent du titre → REJETER
        # immediatement. Ne PAS tomber en Regle 3 (nom simple)
        # qui accepterait un homonyme partiel.
        # Ex: "Khelil Ben Osman" → lastname="ben osman"
        #     titre="Habib Osman" → "ben osman" absent → REJET
        #     Sans ce bloc, Regle 3 trouverait "osman" → ACCEPTE
        _log_static("WARN",
            f"[PAGE_MATCH] '{query}' → '{title}': "
            f"nom compose '{lastname}' ABSENT du titre → REJETE")
        return False

    # ── Regle 3 : Nom de famille simple dans le titre ───────
    query_ascii = _strip_accents(query.lower())
    parts = [p for p in query_ascii.split() if len(p) > 3]
    if parts:
        simple_lastname = parts[-1]
        if simple_lastname in title_ascii:
            if _is_contemporary(extract):
                # v4.8e — Verifier conflit prenom avant d'accepter
                if _firstname_conflicts(title_ascii, query):
                    _log_static("WARN",
                        f"[PAGE_MATCH] '{query}' → '{title}': "
                        f"nom simple '{simple_lastname}' present mais "
                        f"prenom different → REJETE")
                    return False
                _log_static("INFO",
                    f"[PAGE_MATCH] '{query}' → '{title}': "
                    f"nom simple '{simple_lastname}' → ACCEPTE")
                return True
            _log_static("WARN",
                f"[PAGE_MATCH] '{query}' → '{title}': "
                f"'{simple_lastname}' trouve mais "
                f"non contemporain → REJETE")
            return False

    # ── Regle 4 : Aucune correspondance → rejete ───────────
    _log_static("WARN",
        f"[PAGE_MATCH] '{query}' → '{title}': "
        f"aucune correspondance nom → REJETE")
    return False


def _firstname_conflicts(title_ascii, query):
    """v4.8e — Detecter un conflit de prenom entre la requete et le titre
    Wikipedia. Retourne True si le titre contient le nom de famille
    de la requete mais avec un PRENOM DIFFERENT → risque d'homonyme.

    Args:
        title_ascii: titre en minuscules sans accents
                     (deja normalise par _page_matches_person)
        query: requete originale (casse mixte, accents)

    Exemples:
        ("marguerite chapon", "Adrien Chapon")    → True  (conflit)
        ("adrien chapon", "Adrien Chapon")        → False (meme personne)
        ("etienne klein", "Étienne Klein")        → False
        ("gaston le boiteux", "Marie Le Boiteux") → True  (conflit)
        ("chapon", "Adrien Chapon")               → False (pas de prenom
                                                           dans le titre)
    """
    query_ascii = _strip_accents(query.lower())
    query_parts = query_ascii.split()

    if len(query_parts) < 2:
        return False

    query_firstname = query_parts[0]
    if len(query_firstname) <= 2:
        return False

    # Prenom de la requete present dans le titre → pas de conflit
    if query_firstname in title_ascii:
        return False

    # Prenom ABSENT du titre → chercher un autre prenom
    _PARTICLES = {
        "le", "la", "de", "du", "des",
        "von", "van", "di", "el", "al", "ben", "ibn",
    }
    lastname = _extract_lastname(query)
    lastname_parts = set(lastname.split())
    title_words = title_ascii.split()

    for word in title_words:
        clean = word.replace("-", "")
        if (len(word) > 2
                and clean.isalpha()
                and word not in lastname_parts
                and word not in _PARTICLES):
            _log_static("WARN",
                f"[FIRSTNAME_CONFLICT] '{query}' vs titre "
                f"'{title_ascii}': prenom attendu "
                f"'{query_firstname}' absent, "
                f"autre prenom '{word}' detecte → CONFLIT")
            return True

    return False


def _cap_press_budget(sources, press_budget=PRESS_TEXT_BUDGET):
    """v4.9f — Limiter le texte total des sources presse/medias.

    v4.9f : budget augmente a 8000, allocation intelligente
    (min 800 / max 3000 par source, sources riches prioritaires).
    Les sources Wikipedia ne sont PAS affectees.
    Retourne une NOUVELLE liste (immutabilite).
    """
    MIN_PER_SOURCE = 800
    MAX_PER_SOURCE = 3000

    wiki_sources = []
    press_sources = []

    for src in sources:
        if src.get("domain") == "wikipedia.org":
            wiki_sources.append(src)
        else:
            press_sources.append(src)

    # Trier les sources presse par longueur de contenu (riches d'abord)
    press_sorted = sorted(
        press_sources,
        key=lambda s: len(s.get("text", "")),
        reverse=True)

    result_press = []
    remaining_budget = press_budget

    for src in press_sorted:
        if remaining_budget <= 0:
            break
        text = src.get("text", "")
        if not text:
            continue

        allocation = min(MAX_PER_SOURCE, len(text), remaining_budget)
        allocation = max(allocation, min(MIN_PER_SOURCE, remaining_budget))

        if len(text) > allocation:
            truncated = text[:allocation]
            last_dot = truncated.rfind(".")
            if last_dot > allocation * 0.5:
                truncated = truncated[:last_dot + 1]
            result_press.append({**src, "text": truncated})
            remaining_budget -= len(truncated)
        else:
            result_press.append(src)
            remaining_budget -= len(text)

    # Reconstituer : wiki en tete, puis presse
    return wiki_sources + result_press


# ── FILTRAGE SOURCES PAR NOM (v2.2 — anti-derive identite) ────────────

# v4.9b — Sources institutionnelles exclues du filtre proximite
# (donnees structurees/JSON — noms dans champs separes)
INSTITUTIONAL_DOMAINS = [
    "data.gouv.fr",
    "wikidata.org",
    "lepolitique.net",
    "elus-locaux.fr",
    "elections.interieur.gouv.fr",
    "annuaire-administration.fr",
]


def _is_institutional(url):
    """v4.9b — Retourne True si l'URL est une source institutionnelle."""
    url_lower = url.lower()
    return any(d in url_lower for d in INSTITUTIONAL_DOMAINS)


def _name_proximity_check(text, firstname, lastname, radius=150):
    """v4.9b — Verifie que prenom et nom de famille
    apparaissent a moins de `radius` chars l'un de l'autre.

    Empeche les cas ou le nom est dans les credits
    et le prenom n'apparait nulle part a proximite.
    Ex: "credits traduction: charpenet g." sans "gil" a cote.
    """
    positions = [
        m.start()
        for m in re.finditer(re.escape(lastname), text)
    ]
    if not positions:
        return False

    for pos in positions:
        window_start = max(0, pos - radius)
        window_end = min(len(text), pos + len(lastname) + radius)
        window = text[window_start:window_end]
        if firstname in window:
            return True

    return False


# v4.9d — Domaines de registres commerciaux (SIRET, capital social)
# Rejetes pour les requetes PERSON (pas pertinent pour bio)
COMMERCIAL_REGISTRY_DOMAINS = {
    "societe.com", "societes.com", "dnb.com", "infogreffe.fr",
    "verif.com", "manageo.fr", "companeo.com", "ellisphere.com",
    "pappers.fr", "annuaire-entreprises.data.gouv.fr",
    "bilan-gratuit.fr", "dirigeant.info",
    # Sous-domaines entreprises de medias (SIRET, pas bio)
    "entreprises.lefigaro.fr", "societe.leparisien.fr",
}


def _is_commercial_registry(url):
    """v4.9d — True si l'URL est un registre commercial."""
    try:
        domain = urllib.parse.urlparse(url).netloc.lower()
        domain = domain.replace("www.", "")
        return any(domain == d or domain.endswith("." + d)
                   for d in COMMERCIAL_REGISTRY_DOMAINS)
    except Exception:
        return False


def filter_sources_by_person(sources, query, min_matches=1):
    """Separer les sources en pertinentes/rejetees par nom.
    v4.6 : matching sans accents ('herve' == 'hervé').
    v4.9b : ajout filtre de proximite prenom/nom (>200 chars).
            Sources institutionnelles exclues du filtre proximite.
    Une source est pertinente si le nom complet y apparait au moins
    min_matches fois, OU si TOUTES les parties significatives du nom
    (>2 chars) sont presentes dans le texte.
    Retourne (relevant, rejected).
    """
    query_ascii = _strip_accents(query.lower())
    name_parts = [p for p in query_ascii.split() if len(p) > 2]
    firstname = name_parts[0] if name_parts else ""
    lastname = _strip_accents(_extract_lastname(query).lower())
    relevant = []
    rejected = []

    for src in sources:
        raw_text = (
            src.get("text", "") +
            " " + src.get("title", "") +
            " " + src.get("snippet", "")
        )
        text = _strip_accents(raw_text.lower())
        url = src.get("url", "")

        # Regle 0 (v4.9d) : registre commercial → rejete
        # (SIRET/capital social non pertinent pour bio personnelle)
        if _is_commercial_registry(url):
            rejected.append(src)
            _log_static("INFO",
                f"[R1] Registre commercial ignore: "
                f"{src.get('domain', '?')} — "
                f"{src.get('title', '')[:50]}")
            continue

        # Compter les occurrences du nom complet (sans accents)
        full_name_count = text.count(query_ascii)

        # Compter les parties du nom presentes (sans accents)
        parts_found = sum(1 for p in name_parts if p in text)

        # Regle 1 : nom absent → rejete
        if not (full_name_count >= min_matches
                or parts_found == len(name_parts)):
            rejected.append(src)
            _log_static("INFO",
                f"Source rejetee (nom absent): "
                f"{src.get('domain', '?')} — "
                f"{src.get('title', '')[:50]}")
            continue

        # Regle 2 (v4.9b) : proximite prenom/nom
        # Skip pour : sources institutionnelles, textes courts,
        # nom complet deja present, ou donnees insuffisantes
        if (len(text) > 200
                and firstname and lastname
                and full_name_count == 0
                and not _is_institutional(url)):
            if not _name_proximity_check(
                    text, firstname, lastname):
                rejected.append(src)
                _log_static("INFO",
                    f"Source rejetee (proximite): "
                    f"{src.get('domain', '?')} — "
                    f"{src.get('title', '')[:50]}")
                continue

        relevant.append(src)

    _log_static("INFO",
        f"Filtrage sources '{query}': "
        f"{len(relevant)} pertinentes, {len(rejected)} rejetees")
    return relevant, rejected


def build_identity_block(query, identity):
    """Construire le bloc d'identite a injecter dans le prompt LLM.
    Ancre la synthese sur la personne exacte recherchee.
    """
    if not identity or identity.get("confidence") == "low":
        return (
            f"SUJET DE LA SYNTHESE : {query}\n"
            f"NE PAS DERIVER vers d'autres personnes ou sujets.\n"
        )

    block = f"SUJET DE LA SYNTHESE : {query}\n"
    block += "NE PAS DERIVER vers d'autres personnes ou sujets.\n\n"
    block += "IDENTITE CONFIRMEE :\n"
    block += f"- Nom : {identity['confirmed_name']}\n"

    if identity.get("profession"):
        block += f"- Profession/Activite : {identity['profession']}\n"
    if identity.get("location"):
        block += f"- Localisation : {identity['location']}\n"
    if identity.get("bio_snippet"):
        block += f"- Contexte : {identity['bio_snippet'][:200]}\n"

    if identity.get("profile_links"):
        block += "- Presence en ligne :\n"
        for lnk in identity["profile_links"][:3]:
            block += (
                f"  . {lnk['platform']} : {lnk['url']}\n")

    block += (
        f"\nREGLE ABSOLUE : La synthese doit porter UNIQUEMENT sur "
        f"'{query}' tel qu'identifie ci-dessus. "
        "Si les sources parlent d'une autre personne, les ignorer.\n"
    )
    return block


def _log_static(level, message):
    """Log prefixe (version statique pour fonctions hors classe)"""
    print(f"[CYBER-STRAT][{level}] {message}")


class CyberStratHandler(http.server.SimpleHTTPRequestHandler):
    """Gestionnaire HTTP pour Cyber Strat — Pipeline multi-sources v1.2"""

    def __init__(self, *args, **kwargs):
        super().__init__(*args, directory=PUBLIC_DIR, **kwargs)

    def do_OPTIONS(self):
        """Gestion CORS preflight"""
        self.send_response(204)
        self._send_cors_headers()
        self.end_headers()

    def do_POST(self):
        """Traitement des requetes API"""
        if self.path == "/api/query" or self.path == "/api/query.php":
            self._handle_query()
        else:
            self.send_error(404, "Endpoint non trouve")

    def do_GET(self):
        """Servir les fichiers statiques ou rediriger les requetes API"""
        if self.path.startswith("/api/"):
            self.send_error(405, "Utilisez POST pour les requetes API")
            return
        # Fichiers statiques depuis public/
        super().do_GET()

    # ─────────────────────────────────────────────────────────────────────
    # PIPELINE MULTI-SOURCES v1.1
    # ─────────────────────────────────────────────────────────────────────

    def _handle_query(self):
        """Point d'entree API principal — Pipeline multi-sources
        Etape 1 : Wikipedia (fr + en)
        Etape 2 : Recherche medias DDG (si Wiki insuffisant)
        Etape 3 : Sources complementaires paralleles (Wikidata + DDG Instant)
        Regle : minimum 3 sources consultees
        """
        start_time = time.time()

        try:
            # Lire le body JSON
            content_length = int(self.headers.get("Content-Length", 0))
            body = self.rfile.read(content_length)
            data = json.loads(body) if body else {}
        except (json.JSONDecodeError, ValueError):
            self._send_json(400, {"status": "error", "message": "JSON invalide"})
            return

        # Sanitisation
        query = data.get("query", "").strip()[:200]
        lang = data.get("lang", "fr")
        if lang not in ("fr", "en"):
            lang = "fr"

        if not query:
            self._send_json(400, {"status": "error", "message": "Requete vide"})
            return

        self._log("INFO", f"Requete recue: '{query}' (lang: {lang})")

        # Verification du cache
        cache_key = f"{lang}:{query}"
        cached = self._cache_get(cache_key)
        if cached:
            self._log("INFO", f"Cache hit pour: '{query}'")
            cached["processing_time_ms"] = round((time.time() - start_time) * 1000)
            self._send_json(200, cached)
            return

        # ── ETAPE 1b : Reflets.info PRIORITAIRE (noms/pseudos) ─────
        reflets_priority_sources = []
        reflets_already_done = False
        reflets_author_articles = []
        reflets_is_contributor = False
        try:
            from media_fetcher import is_reflets_priority, RefletsScraper
            if is_reflets_priority(query):
                self._log("INFO",
                    f"ETAPE 1b -- Requete prioritaire Reflets.info: '{query}'")
                try:
                    reflets = RefletsScraper()
                    reflets_data = reflets.fetch_sources(
                        query, max_results=3)
                    # fetch_sources retourne un dict structuré
                    reflets_priority_sources = reflets_data.get(
                        "sources", [])
                    reflets_author_articles = reflets_data.get(
                        "author_articles", [])
                    reflets_is_contributor = reflets_data.get(
                        "is_contributor", False)
                    reflets_already_done = True
                    if reflets_priority_sources:
                        self._log("INFO",
                            f"Reflets PRIORITE: {len(reflets_priority_sources)} "
                            f"sources, contributeur={reflets_is_contributor}, "
                            f"articles signes={len(reflets_author_articles)}")
                    else:
                        self._log("INFO", "Reflets PRIORITE: AUCUN RESULTAT")
                except Exception as e:
                    self._log("WARN", f"Echec Reflets priorite: {e}")
            else:
                self._log("INFO", "ETAPE 1b -- SKIP (pas de priorite Reflets)")
        except ImportError:
            pass

        # ── ETAPE 1 : Wikipedia ──────────────────────────────────────
        self._log("INFO", f"ETAPE 1 -- Interrogation Wikipedia pour '{query}'")
        wiki_data = self._wikipedia_summary(query, lang)

        wikipedia_found = False
        wiki_summary = ""
        wiki_images = []
        wiki_title = query
        wiki_entity_type = "concept"
        wiki_url = None

        if not wiki_data.get("error"):
            wiki_summary = wiki_data.get("summary", "")
            wiki_images = wiki_data.get("images", [])
            wiki_title = wiki_data.get("title", query)
            wiki_entity_type = wiki_data.get("entity_type", "concept")
            wiki_url = wiki_data.get("wikipedia_url")
            wikipedia_found = len(wiki_summary) >= WIKI_MIN_CHARS
            self._log("INFO",
                f"Wikipedia: {'SUBSTANTIEL' if wikipedia_found else 'INSUFFISANT'} "
                f"({len(wiki_summary)} chars)")
        else:
            self._log("INFO", "Wikipedia: AUCUN RESULTAT")

        # ── ETAPE 2 : Recherche medias EN PREMIER (v4.6 — avant notoriete) ──
        # v4.6 : medias AVANT notoriete web pour preserver le budget DDG.
        # Wikipedia est une source PARMI d'autres, pas un court-circuit.
        # Les medias sont cherches systematiquement pour enrichir la synthese.
        media_sources = []
        self._log("INFO", f"ETAPE 2 -- Recherche medias pour '{query}'")
        try:
            from media_fetcher import MediaFetcher
            fetcher = MediaFetcher()
            media_sources = fetcher.fetch_media_sources(query, max_results=5)
            self._log("INFO", f"Medias: {len(media_sources)} sources exploitables")

            # Fallback DDG ciblé quand aucune source média standard
            if len(media_sources) == 0:
                self._log("INFO",
                    f"Aucune source media standard pour '{query}' "
                    f"— activation recherche DDG ciblee")
                try:
                    ddg_sources = fetcher.fetch_ddg_targeted(query)
                    media_sources.extend(ddg_sources)
                    self._log("INFO",
                        f"DDG cible: {len(ddg_sources)} sources trouvees")
                except Exception as e:
                    self._log("ERROR", f"DDG cible echec: {e}")

        except Exception as e:
            self._log("ERROR", f"Echec recherche medias: {e}")

        # ── ETAPE 1c : DISCRIMINATION PERSONNE (v4.9e) ──────────────
        # v4.9e : remplace fetch_notoriete_web + resolve_person_identity
        # par un algorithme unique avec 1-2 appels Serper.
        notoriete_data = None
        notoriete_portrait = None
        person_identity = None
        _nw_query_words = query.strip().split()
        _nw_looks_like_name = (
            len(_nw_query_words) >= 2
            and all(w[0].isupper() for w in _nw_query_words
                    if len(w) > 1)
            and all(len(w) < 20 for w in _nw_query_words)
        )
        if _nw_looks_like_name:
            wiki_status = "Wikipedia present" if wikipedia_found else "Wikipedia absent"
            self._log("INFO",
                f"ETAPE 1c -- Discrimination personne '{query}' "
                f"({wiki_status})")
            try:
                from media_fetcher import MediaFetcher
                disc_fetcher = MediaFetcher()
                disc_result = disc_fetcher.discriminate_person(
                    query)
                person_identity = disc_result.get("identity")
                notoriete_data = disc_result.get("notoriete")
                if notoriete_data:
                    nw_sources = notoriete_data.get("sources", [])
                    notoriete_portrait = notoriete_data.get(
                        "portrait", None)
                    nw_meta = notoriete_data.get("meta", {})
                    self._log("INFO",
                        f"Discrimination: "
                        f"status={nw_meta.get('discrimination_status')}, "
                        f"{len(nw_sources)} sources, "
                        f"portrait={'OUI' if notoriete_portrait else 'NON'}, "
                        f"credits={nw_meta.get('serper_credits')}, "
                        f"{nw_meta.get('elapsed_ms', '?')}ms")
                if person_identity:
                    self._log("INFO",
                        f"Identite: "
                        f"confidence={person_identity.get('confidence')}, "
                        f"profession={person_identity.get('profession')}, "
                        f"links={len(person_identity.get('profile_links', []))}")
            except Exception as e:
                self._log("ERROR",
                    f"Echec discrimination personne: {e}")
        else:
            self._log("INFO",
                "ETAPE 1c -- SKIP discrimination "
                "(requete non-personne)")

        # ── Injection sources NOTORIETE WEB dans media_sources (v4.5) ──
        # Les sources notoriete viennent APRES les medias standards
        # pour ne pas les eclipser si les deux existent
        if notoriete_data and notoriete_data.get("sources"):
            nw_sources = notoriete_data["sources"]
            # Eviter les doublons d'URL avec les medias deja trouves
            existing_urls = {s.get("url", "") for s in media_sources}
            new_nw = [s for s in nw_sources
                      if s.get("url", "") not in existing_urls]
            if new_nw:
                media_sources.extend(new_nw)
                self._log("INFO",
                    f"Notoriete Web injectee: {len(new_nw)} sources "
                    f"ajoutees (total medias: {len(media_sources)})")

        # ── Injection sources Reflets prioritaires dans media_sources ──
        if reflets_priority_sources:
            media_sources = reflets_priority_sources + media_sources
            self._log("INFO",
                f"Sources Reflets prioritaires injectees en tete "
                f"(total medias: {len(media_sources)})")

        # ── ETAPE 2b : Reflets.info (si thematique cyber/influence) ──
        # SKIP si deja fait en ETAPE 1b (requete prioritaire)
        try:
            from media_fetcher import is_reflets_relevant, RefletsScraper
            if reflets_already_done:
                self._log("INFO",
                    "ETAPE 2b -- SKIP (deja traite en ETAPE 1b)")
            elif is_reflets_relevant(query):
                self._log("INFO",
                    "ETAPE 2b -- Recherche Reflets.info "
                    "(thematique cyber/influence)")
                try:
                    reflets = RefletsScraper()
                    reflets_data = reflets.fetch_sources(query, max_results=3)
                    # fetch_sources retourne un dict structuré
                    reflets_sources = reflets_data.get("sources", [])
                    if reflets_sources:
                        media_sources.extend(reflets_sources)
                        self._log("INFO",
                            f"Reflets: {len(reflets_sources)} sources ajoutees "
                            f"(total medias: {len(media_sources)})")
                        # Recuperer info auteur si pas deja fait
                        if not reflets_is_contributor:
                            reflets_author_articles = reflets_data.get(
                                "author_articles", [])
                            reflets_is_contributor = reflets_data.get(
                                "is_contributor", False)
                    else:
                        self._log("INFO", "Reflets: AUCUN RESULTAT")
                except Exception as e:
                    self._log("WARN", f"Echec Reflets: {e}")
            else:
                self._log("INFO", "ETAPE 2b -- SKIP (theme non Reflets)")
        except ImportError:
            pass  # is_reflets_relevant non disponible

        # ── ETAPE 3 : Sources complementaires (parallele) ───────────
        self._log("INFO", "ETAPE 3 -- Sources complementaires (Wikidata + DDG Instant)")
        wikidata_info = None
        ddg_instant = None

        try:
            with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
                wikidata_future = executor.submit(self._fetch_wikidata, query, lang)
                ddg_future = executor.submit(self._fetch_ddg_instant, query)
                wikidata_info = wikidata_future.result(timeout=6)
                ddg_instant = ddg_future.result(timeout=6)
        except concurrent.futures.TimeoutError:
            self._log("WARN", "Timeout sur sources complementaires")
        except Exception as e:
            self._log("WARN", f"Erreur sources complementaires: {e}")

        if wikidata_info:
            self._log("INFO", f"Wikidata: OK ({wikidata_info.get('wikidata_id', '?')})")
        else:
            self._log("INFO", "Wikidata: AUCUN RESULTAT")

        if ddg_instant:
            self._log("INFO", f"DDG Instant: OK ({len(ddg_instant.get('abstract', ''))} chars)")
        else:
            self._log("INFO", "DDG Instant: AUCUN RESULTAT")

        # ── PRE-RESOLUTION entity_type (avant content_type) ─────────
        # wiki_entity_type peut etre "concept" si pas de page Wikipedia
        # On enrichit via Wikidata + heuristique medias AVANT la synthese
        resolved_entity_type = wiki_entity_type or "concept"

        if resolved_entity_type == "concept" and wikidata_info:
            if wikidata_info.get("description"):
                resolved_entity_type = self._detect_entity_type_from_description(
                    wikidata_info["description"])
            if resolved_entity_type == "concept" and wikidata_info.get("properties"):
                instance_of = wikidata_info["properties"].get("instance_of", [])
                if any(qid in instance_of for qid in ("Q6256", "Q3624078")):
                    resolved_entity_type = "country"

        if resolved_entity_type == "concept" and media_sources:
            combined_text = " ".join(
                s.get("text", "")[:500] for s in media_sources
            ).lower()
            person_kw = [
                "psychiatre", "medecin", "auteur", "ecrivain", "journaliste",
                "professeur", "directeur", "fondateur", "chercheur", "docteur",
                "ne en", "nee en", "specialiste", "expert", "avocat",
                "il est", "elle est", "son livre", "son ouvrage", "sa carriere",
                "ancien ministre", "philosophe", "sociologue", "historien",
                "artiste", "musicien", "acteur", "actrice", "realisateur"
            ]
            if sum(1 for kw in person_kw if kw in combined_text) >= 1:
                resolved_entity_type = "person"

        self._log("INFO",
            f"Pre-resolution entity_type: {wiki_entity_type} -> {resolved_entity_type}")

        # ── DETECTION CONTENT_TYPE ──────────────────────────────────
        content_type = self._detect_content_type(
            query, wiki_summary, media_sources, resolved_entity_type)
        self._log("INFO", f"Content type detecte: {content_type}")

        # ── PROMOTION CONTENT_TYPE person_notoriete (v4.5) ──────────
        # Si Wikipedia absent ET sources notoriete web presentes
        # ET content_type est person/default → forcer person_notoriete
        if (not wikipedia_found
                and notoriete_data
                and notoriete_data.get("sources")
                and content_type in ("person", "default")):
            content_type = "person_notoriete"
            self._log("INFO",
                f"Content type promu: person_notoriete "
                f"(Wikipedia absent, {len(notoriete_data['sources'])} "
                f"sources notoriete)")

        # ── ETAPE 2c : IDENTITE + FILTRAGE (v4.9e) ─────────────────
        # v4.9e : person_identity deja resolu par discriminate_person()
        # en ETAPE 1c. On ne refait PAS l'appel Serper.
        # Seule la logique de promotion entity/content_type est conservee.
        identity_block = ""
        query_words = query.strip().split()
        looks_like_name = (
            len(query_words) >= 2
            and all(w[0].isupper() for w in query_words if len(w) > 1)
            and all(len(w) < 20 for w in query_words)
        )
        entity_is_person = resolved_entity_type == "person"
        if entity_is_person and len(query_words) >= 2:
            if not all(w[0].isupper() for w in query_words if len(w) > 1):
                entity_is_person = False
        if (entity_is_person
                or content_type == "person"
                or looks_like_name):
            if person_identity:
                self._log("INFO",
                    f"ETAPE 2c -- Identite deja resolue "
                    f"(discrimination v4.9e)")
            else:
                # Fallback : appel classique si discrimination
                # n'a pas ete executee (requete non-nom)
                self._log("INFO",
                    f"ETAPE 2c -- Resolution identite "
                    f"fallback pour '{query}'")
                try:
                    from media_fetcher import MediaFetcher
                    id_fetcher = MediaFetcher()
                    person_identity = (
                        id_fetcher.resolve_person_identity(
                            query))
                    self._log("INFO",
                        f"Identite resolue (fallback): "
                        f"confidence="
                        f"{person_identity['confidence']}")
                except Exception as e:
                    self._log("WARN",
                        f"Echec resolution identite: {e}")

            # Promotion : si looks_like_name a declenche l'identite,
            # forcer entity_type et content_type a "person"
            # pour activer le pipeline person-strict (portrait + pubs)
            if looks_like_name and resolved_entity_type != "person":
                resolved_entity_type = "person"
                entity_type = "person"
                if content_type == "default":
                    content_type = "person"
                self._log("INFO",
                    f"Promotion entity/content_type -> person "
                    f"(looks_like_name)")

            # Filtrer les sources medias par nom exact
            if media_sources:
                original_sources = list(media_sources)
                relevant, rejected = filter_sources_by_person(
                    media_sources, query, min_matches=1)
                self._log("INFO",
                    f"Filtrage anti-derive: "
                    f"{len(relevant)} pertinentes, "
                    f"{len(rejected)} rejetees sur "
                    f"{len(media_sources)} totales")
                media_sources = relevant

                # Garde-fou : si TOUT est rejete, conserver les
                # sources originales plutot que tomber a zero
                if (not media_sources
                        and (not person_identity
                             or person_identity.get("confidence")
                             == "low")):
                    media_sources = original_sources
                    self._log("WARN",
                        f"Filtre anti-derive trop agressif "
                        f"(identite low) — sources originales "
                        f"conservees ({len(media_sources)})")

                # DDG ciblé post-filtre : si toutes les sources
                # standard ont été rejetées par le filtre anti-dérive,
                # chercher des sources spécialisées via DDG
                if (not media_sources
                        and person_identity
                        and person_identity.get("confidence")
                        in ("medium", "high")):
                    self._log("INFO",
                        f"Sources standard toutes rejetees pour "
                        f"'{query}' — DDG cible post-filtre")
                    try:
                        from media_fetcher import MediaFetcher
                        ddg_fetcher = MediaFetcher()
                        # Réutiliser les résultats DDG de
                        # resolve_person_identity (évite rate-limit)
                        ddg_raw = person_identity.get(
                            "ddg_raw_results", None)
                        ddg_post = ddg_fetcher.fetch_ddg_targeted(
                            query, ddg_results=ddg_raw)
                        if ddg_post:
                            media_sources.extend(ddg_post)
                            self._log("INFO",
                                f"DDG cible post-filtre: "
                                f"{len(ddg_post)} sources trouvees")
                    except Exception as e:
                        self._log("ERROR",
                            f"DDG cible post-filtre echec: {e}")

                # Si aucune source pertinente mais identite trouvee :
                # creer une source synthetique depuis l'identite
                if (not media_sources
                        and person_identity
                        and person_identity.get("confidence")
                        in ("medium", "high")):
                    synthetic_text = (
                        f"{query} est "
                        f"{person_identity.get('profession') or 'une personne'}."
                    )
                    if person_identity.get("location"):
                        synthetic_text += (
                            f" Localisation : "
                            f"{person_identity['location']}.")
                    if person_identity.get("bio_snippet"):
                        synthetic_text += (
                            f" {person_identity['bio_snippet']}")
                    media_sources = [{
                        "type": "identity",
                        "domain": "identity_resolver",
                        "url": "",
                        "title": f"Profil de {query}",
                        "text": synthetic_text,
                    }]
                    self._log("INFO",
                        f"Source synthetique creee depuis identite "
                        f"pour '{query}'")

            # Enrichir identite avec Wikidata description si disponible
            if (wikidata_info and wikidata_info.get("description")
                    and person_identity):
                wd_desc = wikidata_info["description"]
                # Injecter comme profession si profession absente ou
                # comme bio_snippet complement si profession presente
                if not person_identity.get("profession"):
                    person_identity["profession"] = wd_desc
                    self._log("INFO",
                        f"Profession injectee depuis Wikidata: {wd_desc}")
                elif not person_identity.get("bio_snippet"):
                    person_identity["bio_snippet"] = wd_desc
                    self._log("INFO",
                        f"Bio injectee depuis Wikidata: {wd_desc}")
                elif wd_desc.lower() not in person_identity["bio_snippet"].lower():
                    person_identity["bio_snippet"] = (
                        wd_desc + ". " + person_identity["bio_snippet"])
                    self._log("INFO",
                        f"Bio enrichie avec Wikidata: {wd_desc}")

            # Construire le bloc d'identite pour le prompt
            identity_block = build_identity_block(
                query, person_identity)
            self._log("INFO",
                f"Bloc identite construit "
                f"({len(identity_block)} chars)")
        else:
            self._log("INFO",
                "ETAPE 2c -- SKIP (pas person)")

        # ── ETAPE 2d : DETECTION ELU LOCAL (RNE data.gouv.fr) ─────────
        # Pour les personnes : interroger le Repertoire National des Elus
        # Si la personne est un maire : forcer content_type = "elu_local"
        # et enrichir les sources avec les donnees de la commune
        elu_data = None
        elu_commune_source = None
        if (entity_is_person or looks_like_name):
            self._log("INFO",
                f"ETAPE 2d -- Recherche elu RNE pour '{query}'")
            try:
                from media_fetcher import MediaFetcher
                rne_fetcher = MediaFetcher()
                elu_data = rne_fetcher.fetch_elu_officiel(query)
            except Exception as e:
                self._log("WARN", f"Echec recherche RNE: {e}")

            if elu_data:
                # Forcer content_type a "elu_local"
                content_type = "elu_local"
                resolved_entity_type = "person"
                entity_type = "person"
                self._log("INFO",
                    f"ELU DETECTE: {elu_data['prenom']} {elu_data['nom']}, "
                    f"Maire de {elu_data['commune']} "
                    f"({elu_data['departement']})")

                # Construire un bloc RNE a injecter dans identity_block
                rne_block = (
                    f"\nDONNEES OFFICIELLES (RNE — Repertoire National des Elus) :\n"
                    f"Fonction : {elu_data['fonction']}\n"
                    f"Commune : {elu_data['commune']}"
                )
                if elu_data.get("code_insee"):
                    rne_block += f" (INSEE: {elu_data['code_insee']})"
                rne_block += f"\nDepartement : {elu_data['departement']}"
                if elu_data.get("date_debut_mandat"):
                    rne_block += (
                        f"\nDebut mandat : {elu_data['date_debut_mandat']}")
                if elu_data.get("date_debut_fonction"):
                    rne_block += (
                        f"\nDebut fonction : {elu_data['date_debut_fonction']}")
                if elu_data.get("csp"):
                    rne_block += (
                        f"\nCategorie socio-professionnelle : "
                        f"{elu_data['csp']}")
                rne_block += "\n"
                identity_block += rne_block

                # Chercher Wikipedia pour la commune (enrichissement)
                commune_name = elu_data.get("commune", "")
                if commune_name:
                    self._log("INFO",
                        f"Recherche Wikipedia pour commune: "
                        f"'{commune_name}'")
                    commune_wiki = self._wikipedia_summary(
                        commune_name, lang)
                    if not commune_wiki.get("error"):
                        commune_text = commune_wiki.get("summary", "")
                        if commune_text and len(commune_text) > 50:
                            elu_commune_source = {
                                "type": "encyclopedia",
                                "domain": "wikipedia.org",
                                "url": commune_wiki.get(
                                    "wikipedia_url", ""),
                                "title": (
                                    f"Wikipedia — {commune_name}"),
                                "text": commune_text[:1500],
                            }
                            self._log("INFO",
                                f"Wikipedia commune OK: "
                                f"{len(commune_text)} chars")
                    else:
                        self._log("INFO",
                            f"Wikipedia commune: pas de page "
                            f"pour '{commune_name}'")
            else:
                self._log("INFO",
                    f"ETAPE 2d -- Pas d'elu RNE pour '{query}'")
        else:
            self._log("INFO", "ETAPE 2d -- SKIP (pas person)")

        # ── CONSTRUCTION BLOC REFLETS (activite editoriale) ──────────
        reflets_block = ""
        if reflets_is_contributor and reflets_author_articles:
            articles_list = "\n".join(
                f"- {a['title']} — {a['url']}"
                for a in reflets_author_articles[:5]
            )
            reflets_block = (
                "\nACTIVITE EDITORIALE SUR REFLETS.INFO :\n"
                "La personne est auteur/contributeur sur Reflets.info "
                "(media d'investigation specialise cybersecurite, "
                "surveillance, hackers).\n"
                f"Articles publies :\n{articles_list}\n\n"
                "-> Mentionner explicitement cette activite editoriale "
                "dans la synthese.\n"
                "-> Decrire le positionnement editorial de ces articles "
                "si le contenu le permet.\n"
            )
            self._log("INFO",
                f"Bloc Reflets contributeur injecte "
                f"({len(reflets_author_articles)} articles)")

        # ── INJECTION SOURCES ELU LOCAL ───────────────────────────────
        if elu_data:
            # Source synthetique RNE (toujours presente quand elu detecte)
            rne_text = (
                f"{elu_data['prenom']} {elu_data['nom']} est "
                f"{elu_data['fonction']} de {elu_data['commune']}"
                f" ({elu_data['departement']})."
            )
            if elu_data.get("date_debut_fonction"):
                rne_text += (
                    f" En fonction depuis le "
                    f"{elu_data['date_debut_fonction']}.")
            if elu_data.get("csp"):
                rne_text += (
                    f" Categorie socio-professionnelle : "
                    f"{elu_data['csp']}.")
            media_sources.insert(0, {
                "type": "official",
                "domain": "data.gouv.fr",
                "url": "https://www.data.gouv.fr/fr/datasets/"
                       "repertoire-national-des-elus-1/",
                "title": f"RNE — {elu_data['prenom']} {elu_data['nom']}",
                "text": rne_text,
            })
            self._log("INFO",
                f"Source synthetique RNE injectee en tete "
                f"({len(rne_text)} chars)")

        if elu_commune_source:
            media_sources.append(elu_commune_source)
            self._log("INFO",
                f"Source Wikipedia commune injectee dans medias "
                f"(total: {len(media_sources)})")

        # ── BLOC WIKIDATA DESCRIPTION (v3.3 — enrichit tous les types) ──
        wikidata_block = ""
        if wikidata_info and wikidata_info.get("description"):
            wd_desc = wikidata_info["description"]
            wd_props = wikidata_info.get("properties", {})
            wd_parts = [f"WIKIDATA : {wd_desc}"]
            for prop_key in ("instance_of", "country", "headquarters",
                             "inception", "official_language"):
                val = wd_props.get(prop_key)
                if val:
                    label = prop_key.replace("_", " ").title()
                    if isinstance(val, list):
                        val = ", ".join(str(v) for v in val[:3])
                    wd_parts.append(f"- {label} : {val}")
            wikidata_block = "\n".join(wd_parts) + "\n"
            self._log("INFO",
                f"Bloc Wikidata injecte ({len(wikidata_block)} chars)")

        # ── DDG INSTANT COMME SOURCE TEXTE (v4.4) ────────────────────
        # DDG Instant abstract est un resume factuel (~200-400 chars)
        # L'injecter dans media_sources pour enrichir le USER prompt
        if ddg_instant and ddg_instant.get("abstract"):
            media_sources.append({
                "type": "instant",
                "domain": "duckduckgo.com",
                "url": ddg_instant.get("url", ""),
                "title": ("DuckDuckGo -- "
                          + ddg_instant.get("heading", query)),
                "text": ddg_instant.get("abstract", ""),
            })
            self._log("INFO",
                f"DDG Instant injecte comme source texte "
                f"({len(ddg_instant['abstract'])} chars)")

        # ── v4.9f : Dedup media_sources par domaine (max 2) ────────
        if media_sources:
            from collections import Counter
            domain_counts = Counter(
                s.get("domain", "") for s in media_sources
                if s.get("domain", "") != "wikipedia.org"
            )
            over = {d for d, c in domain_counts.items() if c > 2}
            if over:
                seen = {}
                deduped = []
                for s in media_sources:
                    dom = s.get("domain", "")
                    if dom in over:
                        seen[dom] = seen.get(dom, 0) + 1
                        if seen[dom] > 2:
                            continue
                    deduped.append(s)
                self._log("INFO",
                    f"Dedup media_sources: {len(media_sources)} -> "
                    f"{len(deduped)} (domaines {over})")
                media_sources = deduped

        # ── CONSTRUCTION DU RESULTAT ─────────────────────────────────
        title = wiki_title
        summary = ""
        images = wiki_images
        # Utiliser resolved_entity_type (enrichi + promu) au lieu de
        # wiki_entity_type pour respecter la promotion looks_like_name
        entity_type = resolved_entity_type
        synthesis_method = "unknown"
        sources_used = []

        # Cas ELU : elu local detecte via RNE — synthese forcee
        # On combine toutes les sources (RNE + commune + wiki + medias)
        # pour produire une synthese avec content_type="elu_local"
        if elu_data and media_sources:
            self._log("INFO",
                "Synthese ELU LOCAL via GeoSynthesizer...")
            combined_elu = list(media_sources)
            # Ajouter wiki_summary de la personne si disponible
            if wiki_summary:
                combined_elu.append({
                    "type": "encyclopedia",
                    "domain": "wikipedia.org",
                    "url": wiki_url or "",
                    "title": f"Wikipedia — {wiki_title}",
                    "text": wiki_summary[:WIKI_TEXT_BUDGET],
                })
            combined_extra = identity_block + wikidata_block + reflets_block
            # v4.8f — budget presse
            budgeted_elu = _cap_press_budget(combined_elu[:6])
            synth_result = _synthesizer.synthesize(
                query=query,
                entity_type=entity_type,
                sources=budgeted_elu,
                lang=lang,
                content_type=content_type,
                extra_context=combined_extra
            )
            summary = synth_result["summary"]
            synthesis_method = synth_result["method"]
            for ms in budgeted_elu:
                sources_used.append({
                    "type": ms.get("type", "media"),
                    "domain": ms["domain"],
                    "url": ms.get("url", ""),
                    "title": ms.get("title", "")
                })
            # Titre : Prenom Nom — Maire de Commune
            title = (
                f"{elu_data['prenom'].title()} "
                f"{elu_data['nom'].title()} — "
                f"Maire de {elu_data['commune']}")

        # Cas 0 : Requete prioritaire Reflets + sources disponibles
        # Force synthese combinee (Reflets + Wiki + medias)
        elif reflets_priority_sources and media_sources:
            self._log("INFO",
                "Synthese combinee Reflets PRIORITE + sources...")
            # Injecter wiki comme source media si disponible
            combined = list(media_sources)  # contient deja Reflets en tete
            if wiki_summary:
                combined.insert(len(reflets_priority_sources), {
                    "type": "encyclopedia",
                    "domain": "wikipedia.org",
                    "url": wiki_url or "",
                    "title": f"Wikipedia -- {wiki_title}",
                    "text": wiki_summary[:WIKI_TEXT_BUDGET]
                })
            # Combiner identity_block + reflets_block en extra_context
            combined_extra = identity_block + wikidata_block + reflets_block
            # v4.8f — budget presse
            budgeted_reflets = _cap_press_budget(combined[:6])
            synth_result = _synthesizer.synthesize(
                query=query,
                entity_type=entity_type,
                sources=budgeted_reflets,
                lang=lang,
                content_type=content_type,
                extra_context=combined_extra
            )
            summary = synth_result["summary"]
            synthesis_method = synth_result["method"]
            for ms in budgeted_reflets:
                sources_used.append({
                    "type": ms.get("type", "media"),
                    "domain": ms["domain"],
                    "url": ms.get("url", ""),
                    "title": ms.get("title", "")
                })
            if wikidata_info and wikidata_info.get("label"):
                title = wikidata_info["label"]

        # Cas 1+2 : Wikipedia et/ou sources medias disponibles
        # v4.3 fix : TOUJOURS combiner Wikipedia + medias et passer
        # par le LLM. Fini le copier-coller brut de Wikipedia.
        elif wikipedia_found or media_sources:
            combined_sources = list(media_sources)
            # Injecter Wikipedia comme source si disponible
            if wiki_summary:
                combined_sources.insert(0, {
                    "type": "encyclopedia",
                    "domain": "wikipedia.org",
                    "url": wiki_url or "",
                    "title": f"Wikipedia -- {wiki_title}",
                    "text": wiki_summary[:WIKI_TEXT_BUDGET],
                })
            self._log("INFO",
                f"Synthese combinee: {len(combined_sources)} sources "
                f"(wiki={'OUI' if wiki_summary else 'NON'}, "
                f"medias={len(media_sources)})")
            combined_extra = identity_block + wikidata_block + reflets_block
            # v4.8f — budget presse
            budgeted_combined = _cap_press_budget(combined_sources[:6])
            synth_result = _synthesizer.synthesize(
                query=query,
                entity_type=entity_type,
                sources=budgeted_combined,
                lang=lang,
                content_type=content_type,
                extra_context=combined_extra
            )
            summary = synth_result["summary"]
            synthesis_method = synth_result["method"]
            for ms in budgeted_combined:
                sources_used.append({
                    "type": ms.get("type", "media"),
                    "domain": ms.get("domain", "?"),
                    "url": ms.get("url", ""),
                    "title": ms.get("title", "")
                })
            if wikidata_info and wikidata_info.get("label"):
                title = wikidata_info["label"]

        # Cas 3 : Wikipedia partiel seul (< 200 chars, pas de medias)
        elif wiki_summary:
            # Meme avec un extrait court, passer par le LLM
            combined_extra = identity_block + wikidata_block + reflets_block
            synth_result = _synthesizer.synthesize(
                query=query,
                entity_type=entity_type,
                sources=[{
                    "type": "encyclopedia",
                    "domain": "wikipedia.org",
                    "url": wiki_url or "",
                    "title": f"Wikipedia -- {wiki_title}",
                    "text": wiki_summary,
                }],
                lang=lang,
                content_type=content_type,
                extra_context=combined_extra
            )
            summary = synth_result["summary"]
            synthesis_method = synth_result["method"]
            sources_used.append({
                "type": "encyclopedia",
                "domain": "wikipedia.org",
                "url": wiki_url or "",
                "title": f"Wikipedia -- {wiki_title}"
            })

        # Cas 4 : Seulement DDG Instant Answer
        elif ddg_instant and ddg_instant.get("abstract"):
            summary = ddg_instant["abstract"]
            synthesis_method = "ddg_instant"
            title = ddg_instant.get("heading", query)

        # Cas 5 : Aucune source — entite inconnue
        else:
            self._log("WARN", f"AUCUNE SOURCE pour: '{query}'")
            self._send_json(404, {
                "status": "not_found",
                "query": query,
                "message": "ENTITE INCONNUE -- AUCUNE SOURCE TROUVEE",
                "processing_time_ms": round((time.time() - start_time) * 1000)
            })
            return

        # ── Enrichissement depuis sources complementaires ────────────

        # Ajouter Wikidata aux sources
        if wikidata_info:
            sources_used.append({
                "type": "structured",
                "domain": "wikidata.org",
                "url": f"https://www.wikidata.org/wiki/{wikidata_info.get('wikidata_id', '')}",
                "title": f"Wikidata -- {wikidata_info.get('label', query)}"
            })
            # Enrichir entity_type si non detecte par Wikipedia
            if entity_type == "concept" and wikidata_info.get("description"):
                entity_type = self._detect_entity_type_from_description(
                    wikidata_info["description"]
                )
            # Detection country via Wikidata P31 (instance_of)
            if entity_type == "concept" and wikidata_info.get("properties"):
                instance_of = wikidata_info["properties"].get("instance_of", [])
                # Q6256 = country, Q3624078 = sovereign state
                if any(qid in instance_of for qid in ("Q6256", "Q3624078")):
                    entity_type = "country"
                    self._log("INFO", f"Entity type force a 'country' via Wikidata P31")

        # Ajouter DDG Instant aux sources (si pas deja present — v4.4)
        ddg_already = any(
            s.get("domain") == "duckduckgo.com"
            for s in sources_used)
        if (ddg_instant and ddg_instant.get("abstract")
                and not ddg_already):
            sources_used.append({
                "type": "instant",
                "domain": "duckduckgo.com",
                "url": ddg_instant.get("url", ""),
                "title": f"DuckDuckGo -- {ddg_instant.get('heading', query)}"
            })
            # Utiliser l'image DDG si aucune image Wikipedia
            if not images and ddg_instant.get("image"):
                images = [{
                    "url": ddg_instant["image"],
                    "caption": ddg_instant.get("heading", ""),
                    "source": "duckduckgo"
                }]
            # Detection country via DDG abstract (si entity_type toujours "concept")
            if entity_type == "concept" and ddg_instant.get("abstract"):
                ddg_text = ddg_instant["abstract"].lower()
                country_indicators = ["pays", "republic", "état", "etat", "population", "superficie", "capitale", "country", "sovereign"]
                if sum(1 for kw in country_indicators if kw in ddg_text) >= 2:
                    entity_type = "country"
                    self._log("INFO", f"Entity type force a 'country' via DDG keywords")

        # ── Heuristique : detection personne depuis sources medias ────
        if entity_type == "concept" and media_sources:
            combined_text = " ".join(
                s.get("text", "")[:500] for s in media_sources
            ).lower()
            person_kw_media = [
                "psychiatre", "medecin", "auteur", "ecrivain", "journaliste",
                "professeur", "directeur", "fondateur", "chercheur", "docteur",
                "ne en", "nee en", "specialiste", "expert", "avocat",
                "il est", "elle est", "son livre", "son ouvrage", "sa carriere",
                "ancien ministre", "philosophe", "sociologue", "historien",
                "artiste", "musicien", "acteur", "actrice", "realisateur"
            ]
            matches = sum(1 for kw in person_kw_media if kw in combined_text)
            if matches >= 1:
                entity_type = "person"
                self._log("INFO",
                    f"Entity type force a 'person' via heuristique medias "
                    f"({matches} keywords)")

        # ── ETAPE 3b/3c : Images + Publications (branch par entity_type) ─
        publications = None
        author_profile = None

        # Timeout global pour le scraping portrait + publications
        SCRAPING_TIMEOUT_TOTAL = 18  # v4.8g — augmente (Wikipedia biblio)

        if entity_type == "person":
            # === PIPELINE PERSON-STRICT (v3.2 — PARALLELE) ===
            # Portrait + Publications lances en parallele avec
            # timeout global 12s pour ne pas retarder la synthese
            self._log("INFO",
                "ETAPE 3b -- Pipeline person-strict PARALLELE "
                "(portrait + publications, timeout %ds)"
                % SCRAPING_TIMEOUT_TOTAL)

            try:
                from media_fetcher import MediaFetcher
                fetcher = MediaFetcher()

                portrait = None
                pubs_list = None

                # Verifier si Wikipedia a deja fourni un portrait
                wiki_portrait = next(
                    (img for img in images
                     if img.get("source", "").startswith("wikipedia")),
                    None
                )

                need_portrait = wiki_portrait is None
                if wiki_portrait:
                    portrait = wiki_portrait
                    portrait["type"] = "person_portrait"
                    self._log("INFO", "Portrait Wikipedia conserve")

                # Extraire URLs presse pour portrait og:image
                press_urls = [
                    s["url"] for s in media_sources
                    if s.get("url")
                ] if media_sources else []

                # Lancer portrait + publications + author_profile EN PARALLELE
                deadline = time.time() + SCRAPING_TIMEOUT_TOTAL
                with concurrent.futures.ThreadPoolExecutor(
                        max_workers=3) as exe:
                    f_portrait = (
                        exe.submit(
                            fetcher.fetch_person_portrait,
                            query,
                            press_urls=press_urls,
                            identity=person_identity)
                        if need_portrait else None
                    )
                    f_pubs = exe.submit(
                        fetcher.fetch_publications,
                        query, 10, person_identity,
                        wikipedia_found)
                    f_author = exe.submit(
                        fetcher.fetch_author_profile,
                        query, media_sources)

                    # Recuperer portrait
                    if f_portrait:
                        remaining = max(1.0, deadline - time.time())
                        try:
                            portrait = f_portrait.result(
                                timeout=remaining)
                            if portrait:
                                self._log("INFO",
                                    f"Portrait trouve: "
                                    f"{portrait['source']}")
                            else:
                                self._log("WARN",
                                    "Aucun portrait disponible "
                                    f"pour '{query}'")
                        except concurrent.futures.TimeoutError:
                            self._log("WARN",
                                "[TIMEOUT] Portrait abandonne "
                                f"apres {SCRAPING_TIMEOUT_TOTAL}s")
                        except Exception as e:
                            self._log("WARN",
                                f"Erreur portrait: {e}")

                    # Recuperer publications
                    remaining = max(1.0, deadline - time.time())
                    try:
                        pubs_list = f_pubs.result(
                            timeout=remaining)
                    except concurrent.futures.TimeoutError:
                        self._log("WARN",
                            "[TIMEOUT] Publications abandonnees "
                            f"apres {SCRAPING_TIMEOUT_TOTAL}s")
                        pubs_list = None
                    except Exception as e:
                        self._log("WARN",
                            f"Erreur publications: {e}")
                        pubs_list = None

                    # Recuperer author_profile (v4.2)
                    remaining = max(1.0, deadline - time.time())
                    author_profile = None
                    try:
                        author_profile = f_author.result(
                            timeout=remaining)
                        if author_profile:
                            self._log("INFO",
                                "[AUTHOR-PROFILE] %s"
                                % author_profile["summary"])
                    except concurrent.futures.TimeoutError:
                        self._log("WARN",
                            "[TIMEOUT] Author profile abandonne")
                    except Exception as e:
                        self._log("WARN",
                            f"Erreur author_profile: {e}")

                if pubs_list:
                    featured = fetcher._select_featured(pubs_list)
                    others = [
                        p for p in pubs_list if p is not featured
                    ]
                    publications = {
                        "featured": featured,
                        "others": others
                    }
                    self._log("INFO",
                        f"Publication featured: {featured['title']} "
                        f"({featured['year']})")
                else:
                    self._log("INFO", "Publications: AUCUN RESULTAT")

                jacket = None
                # v4.8g — chercher cover dans featured d'abord,
                # puis dans les autres publications
                cover_pub = None
                if pubs_list and featured:
                    if featured.get("cover_url"):
                        cover_pub = featured
                    else:
                        for op in (others or []):
                            if op.get("cover_url"):
                                cover_pub = op
                                break
                if cover_pub:
                    jacket = {
                        "url": cover_pub["cover_url"],
                        "caption": cover_pub["title"],
                        "link": cover_pub.get("link", ""),
                        "type": "publication_cover",
                        "source": cover_pub.get("source", "")
                    }

                # === FILET SECURITE : validation portrait URL ===
                if portrait:
                    p_url = portrait.get("url", "")
                    google_reject = [
                        "googleusercontent.com", "ggpht.com",
                        "gstatic.com", "blogspot.com",
                        "=s900", "=s800", "no-rj", "c0x00ffffff",
                    ]
                    if any(pat in p_url.lower()
                           for pat in google_reject):
                        self._log("WARN",
                            "[SAFETY-NET] Portrait Google proxy "
                            f"rejete: {p_url[:80]}")
                        portrait = None
                    elif not p_url.startswith("https://"):
                        self._log("WARN",
                            "[SAFETY-NET] Portrait non-HTTPS "
                            f"rejete: {p_url[:60]}")
                        portrait = None

                # === FILTRE CONFIANCE IDENTITE (v3.8) ===
                # Rejeter les portraits DDG si identite non confirmee
                if portrait:
                    p_source = portrait.get("source", "")
                    id_confidence = (
                        person_identity.get("confidence", "low")
                        if person_identity else "low"
                    )
                    # Sources fiables : Wikipedia, Wikimedia, press, notoriete
                    TRUSTED_SOURCES = {
                        "wikipedia", "wikipedia_fr", "wikipedia_en",
                        "wikimedia_commons", "press_og_image",
                        "duckduckgo_linkedin",
                        "duckduckgo_portrait",  # v4.9c
                        "notoriete_web_ddg",
                        "notoriete_web_serper",  # v4.9c
                    }
                    if (p_source not in TRUSTED_SOURCES
                            and id_confidence == "low"):
                        self._log("WARN",
                            f"[CONFIDENCE-FILTER] Portrait "
                            f"'{p_source}' rejete — identite "
                            f"confidence=low")
                        portrait = None

                # === FALLBACK PORTRAIT NOTORIETE WEB (v4.5) ===
                # Si aucun portrait trouve par le pipeline standard,
                # utiliser le portrait notoriete web (DDG Images)
                if not portrait and notoriete_portrait:
                    portrait = notoriete_portrait
                    self._log("INFO",
                        "[PORTRAIT-NOTORIETE] Fallback portrait "
                        f"notoriete web: {portrait.get('source')}")

                # === ASSEMBLAGE FINAL ===
                images = []
                if portrait:
                    images.append(portrait)
                elif jacket:
                    # Regle v2.8 : jacket en slot 0 si pas de
                    # portrait (personne publiee)
                    self._log("INFO",
                        "[JACKET-SLOT0] Pas de portrait — "
                        "jacket promue en slot 0")
                if jacket:
                    if jacket not in images:
                        images.append(jacket)

                self._log("INFO",
                    "Images finales person: "
                    + str([(i["type"], i.get("caption", "")[:25])
                           for i in images]))

            except Exception as e:
                self._log("WARN",
                    f"Echec pipeline person-strict: {e}")

        else:
            # === PIPELINE NON-PERSON (inchange) ===
            if not images:
                self._log("INFO",
                    "ETAPE 3b -- Recherche images complementaires "
                    "(DDG Images + Wikimedia)")
                try:
                    from media_fetcher import MediaFetcher
                    img_fetcher = MediaFetcher()
                    extra_images = img_fetcher.fetch_image(
                        query, max_results=MAX_IMAGES)
                    if extra_images:
                        images = extra_images[:MAX_IMAGES]
                        self._log("INFO",
                            f"Images complementaires: {len(images)} trouvees")
                    else:
                        self._log("INFO", "Images complementaires: AUCUNE")
                except Exception as e:
                    self._log("WARN",
                        f"Echec recherche images complementaires: {e}")
            self._log("INFO",
                f"ETAPE 3c -- SKIP publications (entity_type: {entity_type})")

        # ── ETAPE 4 : Geo-resolution (v1.2) ──────────────────────────
        self._log("INFO", f"ETAPE 4 -- Geo-resolution pour '{query}' (type: {entity_type})")
        geo_data = None
        try:
            geo_data = _geo_resolver.resolve(query, entity_type)
            if geo_data:
                self._log("INFO",
                    f"Geo: {geo_data['lat']}, {geo_data['lon']} "
                    f"({geo_data['display_mode']})")
            else:
                self._log("INFO", "Geo: AUCUN RESULTAT")
        except Exception as e:
            self._log("WARN", f"Echec geo-resolution: {e}")

        # ── Troncature et validation ─────────────────────────────────

        # Regle absolue : 4500 chars max (coupe a la derniere phrase)
        if len(summary) > MAX_SUMMARY_CHARS:
            truncated = summary[:MAX_SUMMARY_CHARS]
            last_period = max(truncated.rfind(". "), truncated.rfind(".\n"))
            if last_period > MAX_SUMMARY_CHARS * 0.8:
                summary = truncated[:last_period + 1]
            else:
                summary = truncated[:MAX_SUMMARY_CHARS - 3] + "..."

        # Regle absolue : 2 images max
        images = images[:MAX_IMAGES]

        # ── DISPLAY_TYPE : label affiche dans le badge frontend ──────
        # Priorite : content_type > entity_type
        # "fiction" → "FICTION", "person" → "PERSONNE", etc.
        DISPLAY_TYPE_LABELS = {
            "fiction": "FICTION",
            "person": "PERSONNE",
            "country": "PAYS",
            "organization": "ORGANISATION",
            "concept": "CONCEPT",
            "default": "ANALYSE",
        }
        # Utiliser content_type s'il est specifique (pas "default")
        # Sinon utiliser entity_type (sauf faux positif "person")
        if content_type and content_type != "default":
            display_type = DISPLAY_TYPE_LABELS.get(
                content_type, content_type.upper())
        else:
            # Corriger le faux positif entity_type "person"
            # pour les requetes multi-mots en minuscule
            effective_type = entity_type
            if entity_type == "person":
                q_words = query.strip().split()
                if (len(q_words) >= 2
                        and not all(w[0].isupper()
                                    for w in q_words if len(w) > 1)):
                    effective_type = "concept"
            display_type = DISPLAY_TYPE_LABELS.get(
                effective_type, effective_type.upper())
        self._log("INFO", f"Display type: {display_type} "
                  f"(content_type={content_type}, entity_type={entity_type})")

        # ── Reponse finale ───────────────────────────────────────────

        result = {
            "status": "ok",
            "query": query,
            "result": {
                "title": title,
                "summary": summary,
                "images": images,
                "geo": geo_data,
                "charts": None,
                "entity_type": entity_type,
                "content_type": content_type,
                "display_type": display_type,
                "wikipedia_url": wiki_url,
                "wikipedia_found": wikipedia_found,
                "sources_used": sources_used,
                "synthesis_method": synthesis_method,
                "cached": False,
                "publications": publications,
                "reflets_contributor": reflets_is_contributor,
                "reflets_author_articles": [
                    {"title": a["title"], "url": a["url"]}
                    for a in reflets_author_articles
                ] if reflets_author_articles else [],
                "identity": person_identity,
                "author_profile": author_profile,
                "elu_data": elu_data,
                "notoriete_web": (
                    notoriete_data.get("meta") if notoriete_data
                    else None),
            },
            "processing_time_ms": None
        }

        # v4.9c — Garde anti-cache : ne pas cacher les resultats degrades
        _skip_cache = False
        if synthesis_method == "heuristic":
            _skip_cache = True
            self._log("WARN", "Cache SKIP: synthese heuristique (LLM indisponible)")
        elif synthesis_method == "ddg_instant":
            _skip_cache = True
            self._log("WARN", "Cache SKIP: DDG Instant seulement (pas de synthese LLM)")
        elif "LLM INDISPONIBLE" in summary:
            _skip_cache = True
            self._log("WARN", "Cache SKIP: marqueur LLM INDISPONIBLE detecte")
        elif not sources_used:
            _skip_cache = True
            self._log("WARN", "Cache SKIP: 0 sources utilisees")

        # Mise en cache (ne doit jamais bloquer la reponse)
        if not _skip_cache:
            try:
                self._cache_set(cache_key, result)
            except Exception as e:
                self._log("WARN", f"Echec ecriture cache: {e}")
        else:
            self._log("INFO", f"Resultat NON cache pour '{query}'")

        result["processing_time_ms"] = round((time.time() - start_time) * 1000)
        src_count = len(sources_used)
        self._log("INFO",
            f"Reponse pour '{query}': {src_count} sources, "
            f"{len(summary)} chars, {result['processing_time_ms']}ms")
        self._send_json(200, result)

    # ─────────────────────────────────────────────────────────────────────
    # SOURCES DE DONNEES
    # ─────────────────────────────────────────────────────────────────────

    def _wikipedia_summary(self, query, lang="fr"):
        """v4.8 — Recuperer le resume Wikipedia d'une entite.

        REGLE FONDAMENTALE (v4.8) — pour les requetes-personne :
        Wikipedia accepte SEULEMENT si la page trouvee EST la personne.
        Search API = resolution de slug uniquement, pas source de contenu.
        Si page ≠ personne → abandon total, pipeline presse.

        Pour les requetes non-personne : comportement standard.
        """
        # Detecter si la requete ressemble a un nom de personne
        query_words = query.strip().split()
        is_person_query = (
            len(query_words) >= 2
            and all(w[0].isupper() for w in query_words
                    if len(w) > 1)
            and all(len(w) < 20 for w in query_words)
        )

        # Generer les variantes de slug
        if is_person_query:
            slugs = _build_name_variants(query)
            self._log("INFO",
                f"Wikipedia v4.8: requete-personne '{query}' "
                f"— {len(slugs)} variantes directes")
        else:
            try:
                from media_fetcher import MediaFetcher
                _mf = MediaFetcher()
                slugs = _mf._normalize_wikipedia_slug(query)
            except Exception:
                slugs = [query.replace(" ", "_")]

        langs_to_try = [lang]
        if lang == "fr":
            langs_to_try.append("en")

        # ── PHASE 1 : TENTATIVES DIRECTES ───────────────────
        for try_lang in langs_to_try:
            for slug in slugs:
                encoded = urllib.parse.quote(slug)
                url = (f"https://{try_lang}.wikipedia.org/"
                       f"api/rest_v1/page/summary/{encoded}")
                response = self._fetch_url(url)

                if (response is None
                        or (isinstance(response, dict)
                            and "not_found"
                            in response.get("type", ""))):
                    continue

                # Page de disambiguation : skip
                if (isinstance(response, dict)
                        and response.get("type")
                        == "disambiguation"):
                    self._log("INFO",
                        f"Wikipedia disambiguation pour "
                        f"'{slug}' ({try_lang}) — skip")
                    continue

                extract = response.get("extract", "")
                title = response.get("title", "")

                if not extract:
                    continue

                # v4.8 — Verification stricte pour personne
                if is_person_query:
                    if not _page_matches_person(
                            title, extract, query):
                        self._log("INFO",
                            f"Wikipedia direct rejete: "
                            f"'{title}' ≠ '{query}'")
                        continue
                    self._log("INFO",
                        f"Wikipedia OK (direct): "
                        f"'{title}' ({try_lang})")
                else:
                    self._log("INFO",
                        f"Wikipedia trouve: "
                        f"'{slug}' ({try_lang})")

                # Enrichissement + extraction images
                summary = self._enrich_wikipedia_extract(
                    extract,
                    response.get("title", query),
                    try_lang)
                if len(summary) > MAX_SUMMARY_CHARS:
                    summary = (
                        summary[:MAX_SUMMARY_CHARS - 3] + "...")

                images = []
                original = response.get("originalimage", {})
                thumbnail = response.get("thumbnail", {})
                wiki_page_url = (
                    response.get("content_urls", {})
                    .get("desktop", {}).get("page", ""))

                if original.get("source"):
                    images.append({
                        "url": original["source"],
                        "caption": response.get(
                            "description", ""),
                        "source": "wikipedia",
                        "source_url": wiki_page_url
                    })
                elif thumbnail.get("source"):
                    images.append({
                        "url": thumbnail["source"],
                        "caption": response.get(
                            "description", ""),
                        "source": "wikipedia",
                        "source_url": wiki_page_url
                    })

                return {
                    "title": response.get("title", query),
                    "summary": summary,
                    "images": images[:MAX_IMAGES],
                    "entity_type": self._detect_entity_type(
                        response),
                    "wikipedia_url": (
                        response.get("content_urls", {})
                        .get("desktop", {}).get("page")),
                    "lang": response.get("lang", try_lang),
                    "slug_used": slug
                }

        # ── PHASE 2 : SEARCH API → SLUG UNIQUEMENT ──────────
        # v4.8 : Search API utilisee UNIQUEMENT pour trouver
        # le bon slug URL, pas comme source de contenu.
        # Si page trouvee ≠ personne → ABANDON TOTAL.
        # srlimit=1 : un seul candidat par langue.
        for try_lang in langs_to_try:
            search_url = (
                f"https://{try_lang}.wikipedia.org/w/api.php"
                f"?action=query&list=search"
                f"&srsearch={urllib.parse.quote(query)}"
                f"&format=json&srlimit=1"
            )
            search_resp = self._fetch_url(search_url)
            if not (search_resp
                    and isinstance(search_resp, dict)
                    and "query" in search_resp):
                continue

            hits = search_resp["query"].get("search", [])
            if not hits:
                continue

            found_title = hits[0]["title"]
            self._log("INFO",
                f"Wikipedia search API: '{query}' → "
                f"'{found_title}' ({try_lang})")

            found_slug = found_title.replace(" ", "_")
            encoded = urllib.parse.quote(found_slug)
            summary_url = (
                f"https://{try_lang}.wikipedia.org/"
                f"api/rest_v1/page/summary/{encoded}"
            )
            response = self._fetch_url(summary_url)

            if not (response
                    and isinstance(response, dict)
                    and not response.get("type", "")
                    .startswith("not_found")
                    and response.get("extract")):
                continue

            extract = response.get("extract", "")
            title = response.get("title", "")

            # v4.8 — Verification STRICTE pour personne
            # Si page ≠ personne → ABANDON TOTAL (pas de fallback)
            if is_person_query:
                if not _page_matches_person(
                        title, extract, query):
                    self._log("WARN",
                        f"Wikipedia search API: "
                        f"'{title}' ≠ '{query}' "
                        f"→ abandon, pipeline presse")
                    return {"error": "not_found"}
                self._log("INFO",
                    f"Wikipedia OK (search→direct): "
                    f"'{title}' ({try_lang})")
            else:
                self._log("INFO",
                    f"Wikipedia trouve via search API: "
                    f"'{found_title}' ({try_lang})")

            # Enrichissement
            summary = self._enrich_wikipedia_extract(
                extract,
                response.get("title", found_title),
                try_lang)
            if len(summary) > MAX_SUMMARY_CHARS:
                summary = (
                    summary[:MAX_SUMMARY_CHARS - 3]
                    + "...")

            images = []
            original = response.get(
                "originalimage", {})
            thumbnail = response.get("thumbnail", {})
            wiki_page_url = (
                response.get("content_urls", {})
                .get("desktop", {}).get("page", ""))
            if original.get("source"):
                images.append({
                    "url": original["source"],
                    "caption": response.get(
                        "description", ""),
                    "source": "wikipedia",
                    "source_url": wiki_page_url
                })
            elif thumbnail.get("source"):
                images.append({
                    "url": thumbnail["source"],
                    "caption": response.get(
                        "description", ""),
                    "source": "wikipedia",
                    "source_url": wiki_page_url
                })
            return {
                "title": response.get(
                    "title", found_title),
                "summary": summary,
                "images": images[:MAX_IMAGES],
                "entity_type":
                    self._detect_entity_type(response),
                "wikipedia_url": wiki_page_url,
                "lang": response.get(
                    "lang", try_lang),
                "slug_used": found_slug
            }

        self._log("INFO",
            f"Wikipedia: aucune page pour '{query}' "
            f"(slugs + search API epuises) "
            f"→ pipeline presse")
        return {"error": "not_found"}

    # v4.8 — _is_relevant_wikipedia_result() SUPPRIMEE
    # Remplacee par _page_matches_person() (module-level, v4.8)
    # qui applique : detection organisation, nom compose/simple,
    # contemporaneite. Voir lignes ~52-200.

    def _enrich_wikipedia_extract(self, summary, title, lang):
        """v4.4 — Enrichir un extrait Wikipedia court via MediaWiki API.

        Le REST API /page/summary retourne souvent ~100-400 chars.
        Le MediaWiki API prop=extracts retourne l'article complet en
        plaintext (~5000-20000 chars), qu'on tronque a 3000 chars max.
        Seuil de declenchement : summary < 1000 chars.
        """
        if not summary or len(summary) >= 1000:
            return summary
        try:
            extracts_url = (
                f"https://{lang}.wikipedia.org/w/api.php"
                f"?action=query&prop=extracts&explaintext=1"
                f"&titles={urllib.parse.quote(title)}"
                f"&format=json"
            )
            extracts_resp = self._fetch_url(extracts_url)
            if extracts_resp and isinstance(extracts_resp, dict):
                pages = extracts_resp.get(
                    "query", {}).get("pages", {})
                for page_data in pages.values():
                    long_extract = page_data.get("extract", "")
                    if (long_extract
                            and len(long_extract) > len(summary)):
                        # Tronquer a 3000 chars, couper a la phrase
                        if len(long_extract) > 3000:
                            trunc = long_extract[:3000]
                            last_dot = trunc.rfind(". ")
                            if last_dot > 2000:
                                long_extract = trunc[:last_dot + 1]
                            else:
                                long_extract = trunc
                        self._log("INFO",
                            f"Wikipedia extract enrichi: "
                            f"{len(summary)} -> "
                            f"{len(long_extract)} chars "
                            f"(MediaWiki API)")
                        return long_extract
        except Exception as e:
            self._log("WARN",
                f"Echec enrichissement Wikipedia: {e}")
        return summary

    def _fetch_wikidata(self, query, lang="fr"):
        """Recuperer les donnees structurees Wikidata (source complementaire)"""
        try:
            from wikidata_client import WikidataClient
            client = WikidataClient()
            return client.get_structured_data(query, lang)
        except Exception as e:
            self._log("WARN", f"Echec Wikidata: {e}")
            return None

    def _fetch_ddg_instant(self, query):
        """DuckDuckGo Instant Answer API (gratuit, zero cle)
        Source complementaire systematique
        """
        try:
            encoded = urllib.parse.urlencode({
                "q": query,
                "format": "json",
                "no_html": "1",
                "skip_disambig": "1"
            })
            url = f"https://api.duckduckgo.com/?{encoded}"
            result = self._fetch_url(url)
            if not result:
                return None

            abstract = result.get("Abstract", "") or result.get("AbstractText", "")
            if not abstract:
                return None

            image = result.get("Image", "")
            if image and not image.startswith("http"):
                image = f"https://duckduckgo.com{image}"

            return {
                "abstract": abstract,
                "url": result.get("AbstractURL", ""),
                "source": result.get("AbstractSource", ""),
                "heading": result.get("Heading", query),
                "image": image
            }
        except Exception as e:
            self._log("WARN", f"Echec DDG Instant Answer: {e}")
            return None

    # ─────────────────────────────────────────────────────────────────────
    # SYNTHESE — deleguee au module synthesizer.py (GeoSynthesizer)
    # Methodes retirees en v1.1 : _synthesize_from_media, _heuristic_synthesis,
    # _ollama_available, _call_ollama
    # Maintenant gerees par : _synthesizer.synthesize()
    # ─────────────────────────────────────────────────────────────────────

    # ─────────────────────────────────────────────────────────────────────
    # DETECTION DE TYPE
    # ─────────────────────────────────────────────────────────────────────

    def _detect_entity_type(self, data):
        """Detection du type d'entite depuis les donnees Wikipedia"""
        description = (data.get("description") or "").lower()
        return self._detect_entity_type_from_description(description)

    def _detect_entity_type_from_description(self, description):
        """Detection du type d'entite depuis une description textuelle
        Utilisable avec Wikipedia ou Wikidata
        """
        description = description.lower()

        person_kw = [
            "homme politique", "femme politique", "president", "politician",
            "premier ministre", "roi", "reine", "general", "dirigeant",
            "chef d'etat", "chef d'état",
            "homme d'etat", "homme d'état", "femme d'état", "femme d'etat",
            "journaliste", "ecrivain", "scientifique", "militaire",
            "ne en", "nee en", "born in", "ne le", "nee le",
            "ne le", "nee le"
        ]
        for kw in person_kw:
            if kw in description:
                return "person"

        country_kw = [
            "pays", "republic", "royaume", "country", "etat souverain",
            "nation", "federation", "emirat"
        ]
        for kw in country_kw:
            if kw in description:
                return "country"

        org_kw = [
            "entreprise", "corporation", "company", "organisation",
            "institution", "alliance", "agence"
        ]
        for kw in org_kw:
            if kw in description:
                return "organization"

        return "concept"

    # ─────────────────────────────────────────────────────────────────────
    # DETECTION CONTENT_TYPE
    # ─────────────────────────────────────────────────────────────────────

    def _detect_content_type(self, query, wiki_summary, media_sources, entity_type):
        """Detecter le type de contenu reel pour adapter le prompt de synthese.
        Priorite : signaux dans les sources > entity_type devine.
        Retourne : fiction, person, country, organization, ou default.
        """
        # Combiner texte des sources pour analyse
        combined = query.lower()
        if wiki_summary:
            combined += " " + wiki_summary[:500].lower()
        if media_sources:
            combined += " " + " ".join(
                s.get("text", "")[:300] for s in media_sources[:3]
            ).lower()

        query_lower = query.lower()

        # v4.5 — Les types confirmes par Wikipedia/Wikidata sont fiables
        # et ne doivent PAS etre eclipses par des signaux fiction
        # (ex: Étienne Klein = physicien qui ecrit des livres → person, pas fiction)
        # Sauf pour les requetes multi-mots en minuscule (faux positifs)
        if entity_type == "person":
            q_words = query.strip().split()
            if (len(q_words) >= 2
                    and not all(w[0].isupper() for w in q_words if len(w) > 1)):
                # Multi-mots en minuscule = probablement pas un nom
                # Continuer la detection au lieu de forcer "person"
                pass
            else:
                return "person"
        if entity_type == "country":
            return "country"
        if entity_type == "organization":
            return "organization"

        # Fiction / oeuvre culturelle (APRES les types confirmes)
        fiction_signals = [
            "roman", "livre", "auteur de fiction", "personnage",
            "fiction", "fantasy", "heros", "saga", "tome",
            "prophetie", "prophétie", "chapitre", "recit", "récit",
            "novella", "science-fiction", "dystopie", "conte",
            "imperia", "gidzar", "darath"
        ]
        fiction_matches = sum(
            1 for s in fiction_signals
            if s in combined or s in query_lower
        )
        if fiction_matches >= 2:
            return "fiction"
        # Un seul signal fort dans le titre de la requete
        if any(s in query_lower for s in
               ["prophetie", "prophétie", "saga", "tome", "roman"]):
            return "fiction"

        # Detection supplementaire pays
        country_signals = [
            "capitale", "population", "superficie",
            "president", "président", "gouvernement", "frontiere", "frontière"
        ]
        if sum(1 for s in country_signals if s in combined) >= 2:
            return "country"

        # Detection supplementaire personne
        person_signals = [
            "ne le", "né le", "ne a", "né à",
            "journaliste", "psychiatre", "auteur", "ecrivain", "écrivain",
            "expert", "entrepreneur", "il est", "elle est"
        ]
        if sum(1 for s in person_signals if s in combined) >= 2:
            return "person"

        return "default"

    # ─────────────────────────────────────────────────────────────────────
    # UTILITAIRES
    # ─────────────────────────────────────────────────────────────────────

    def _fetch_url(self, url):
        """Requete HTTP GET vers une API externe"""
        try:
            req = urllib.request.Request(url, headers={
                "User-Agent": "CyberStrat/1.0 (console-intelligence-geopolitique)"
            })
            with urllib.request.urlopen(req, timeout=5) as resp:
                return json.loads(resp.read().decode("utf-8"))
        except Exception as e:
            self._log("ERROR", f"Echec requete: {url} -- {e}")
            return None

    def _cache_get(self, key):
        """Lire depuis le cache fichier"""
        filepath = os.path.join(CACHE_DIR, hashlib.md5(key.encode()).hexdigest() + ".json")
        if not os.path.exists(filepath):
            return None
        # Verifier la fraicheur
        if time.time() - os.path.getmtime(filepath) > CACHE_TTL:
            os.remove(filepath)
            return None
        try:
            with open(filepath, "r", encoding="utf-8") as f:
                data = json.load(f)
                data["result"]["cached"] = True
                return data
        except (json.JSONDecodeError, KeyError):
            return None

    def _cache_set(self, key, data):
        """Ecrire dans le cache fichier"""
        os.makedirs(CACHE_DIR, exist_ok=True)
        filepath = os.path.join(CACHE_DIR, hashlib.md5(key.encode()).hexdigest() + ".json")
        with open(filepath, "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

    def _send_json(self, code, data):
        """Envoyer une reponse JSON"""
        self.send_response(code)
        self._send_cors_headers()
        self.send_header("Content-Type", "application/json; charset=utf-8")
        self.end_headers()
        self.wfile.write(json.dumps(data, ensure_ascii=False).encode("utf-8"))

    def _send_cors_headers(self):
        """Headers CORS"""
        self.send_header("Access-Control-Allow-Origin", "*")
        self.send_header("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
        self.send_header("Access-Control-Allow-Headers", "Content-Type")

    def _log(self, level, message):
        """Log console prefixe"""
        print(f"[CYBER-STRAT][{level}] {message}")

    def log_message(self, format, *args):
        """Override pour prefixer les logs HTTP"""
        print(f"[CYBER-STRAT][HTTP] {args[0]}")


def main():
    print(f"""
    ╔═══════════════════════════════════════════════════════╗
    ║   CYBER STRAT v4.8 — Serveur de Developpement        ║
    ║   Port : {PORT}                                         ║
    ║   URL  : http://{HOST}:{PORT}                            ║
    ║   Pipeline : Wiki + Medias + Wikidata + DDG + Geo    ║
    ║   Synthese : Groq > Ollama > heuristique            ║
    ║   Geo      : Nominatim + Leaflet                     ║
    ╚═══════════════════════════════════════════════════════╝
    """)

    server = http.server.HTTPServer((HOST, PORT), CyberStratHandler)
    try:
        server.serve_forever()
    except KeyboardInterrupt:
        print("\n[CYBER-STRAT] Arret du serveur.")
        server.server_close()


if __name__ == "__main__":
    main()
