# Local LLM configuration # Copy to .env and fill in your values # Model to run MODEL_NAME=llama3 # Port to expose PORT=11434