home-lab/.aider.conf.yml
Geir Okkenhaug Jerstad fb45c41a93 before
2025-06-24 09:19:33 +02:00

99 lines
No EOL
2.3 KiB
YAML

##########################################################
# Aider Configuration for Home Lab Project
# Place in your home dir, or at the root of your git repo.
##########################################################
#############
# Main model:
## Specify the model to use for the main chat (using Ollama with qwen2.5-coder)
model: ollama/qwen2.5-coder:7b
########################
# API Keys and settings:
## Set environment variables for Ollama
set-env:
- OLLAMA_API_BASE=http://grey-area:11434
#################
# Model settings:
## Specify a file with aider model settings for unknown models
model-settings-file: .aider.model.settings.yml
## Model aliases for convenience
alias:
- "code:ollama/qwen2.5-coder:7b"
- "chat:ollama/llama3.1:8b"
- "reason:ollama/deepseek-r1:latest"
- "task:ollama/taskmaster-qwen:latest"
- "research:ollama/research-deepseek:latest"
- "fast:ollama/qwen3:4b"
## Specify what edit format the LLM should use
edit-format: diff
## Specify the model to use for commit messages and chat history summarization
weak-model: ollama/qwen3:4b
## Verify the SSL cert when connecting to models
verify-ssl: false
## Timeout in seconds for API calls (increased for slower CPU inference)
timeout: 300
## Disable model warnings for faster startup
show-model-warnings: false
###################
# Repomap settings:
## Suggested number of tokens to use for repo map (reduced for performance)
map-tokens: 1024
## Control how often the repo map is refreshed
map-refresh: manual
######################
# File handling:
## Auto-load convention files for this project
read:
- CONVENTIONS.md
################
# History Files:
## Specify the chat input history file
input-history-file: .aider.input.history
## Specify the chat history file
chat-history-file: .aider.chat.history.md
#################
# Cache settings:
## Enable caching of prompts for better performance
cache-prompts: true
## Keep cache warm to reduce latency
cache-keepalive-pings: 2
###################
# Performance settings:
## Disable model checking for faster startup
check-model-accepts-settings: false
## Reduce chat history to save tokens
max-chat-history-tokens: 4096
###################
# UI/UX settings:
## Use dark mode
dark-mode: true
## Show model warnings
show-model-warnings: false