# [CYBER-STRAT] Configuration Ollama — CPU-only VPS (8GB RAM, pas de GPU)
# Optimise pour vitesse : petits modeles en priorite, generation courte

OLLAMA_URL = "http://localhost:11434"
OLLAMA_MODEL = "llama3.2:3b"

OLLAMA_MODELS_AVAILABLE = [
    "llama3.2:3b",
    "gemma2:2b",
    "mistral:latest",
    "nomic-embed-text:latest",
]

# Ordre de fallback : plus petit/rapide en premier (CPU-only)
# llama3.2:3b (2.0GB) > gemma2:2b (1.6GB) > mistral (4.4GB, lent sur CPU)
OLLAMA_FALLBACK_CHAIN = ["gemma2:2b"]

MAX_SUMMARY_CHARS = 4500
MIN_SUMMARY_CHARS = 800
OLLAMA_TEMPERATURE = 0.3
OLLAMA_NUM_PREDICT = 500
