sudo docker compose up -d
# Get a shell into ollama container
docker exec -it ollama-poc-ollama-1 bash
# List models
ollama list
# Pull or update models
ollama pull llama3.1:8b
ollama pull codellama:7b
curl http://localhost:11434/api/generate -d '{
"model": "llama3.1:8b",
"prompt":"Which programming languajes do you know?",
"stream": false
}'
- (Install codex cli from here)[https://github.com/openai/codex-cli]
- Set configuration file
~/.codex/config.json
to:
{
"model": "qwen2.5-coder:7b",
"provider": "ollama",
"providers": {
"ollama": {
"name": "Ollama",
"baseURL": "http://localhost:11434/v1",
"envKey": "OLLAMA_API_KEY"
}
},
"history": {
"maxSize": 1000,
"saveHistory": true,
"sensitivePatterns": []
}
}
- Run codex cli
codex --model qwen2.5-coder:7b --provider ollama