Files
dotfiles/.codex/config.toml
2026-02-27 11:41:16 +01:00

138 lines
3.5 KiB
TOML

# doc: https://developers.openai.com/codex/config-advanced
# https://developers.openai.com/codex/config-reference
approval_policy = "on-request"
preferred_auth_method = "apikey"
# model = "oca/gpt-5-codex"
# profile = "gpt-5-codex"
# model = "oca/gpt-5.1-codex"
# profile = "gpt-5-1-codex"
# model = "oca/gpt-5.1-codex-mini"
# profile = "gpt-5-1-codex-mini"
model = "oca/gpt-5.3-codex"
profile = "gpt-5-3-codex"
web_search_request = true
trust_level = "trusted"
sandbox_mode = "workspace-write"
[features]
multi_agent = true
[tui]
alternate_screen = "always"
[sandbox_workspace_write]
network_access = true
writable_roots = [ "/Users/jetpac/.codex/skills" ]
[analytics]
enabled = false
[model_providers.oca-responses]
base_url = "https://code-internal.aiservice.us-chicago-1.oci.oraclecloud.com/20250206/app/litellm"
http_headers = { "client" = "codex-cli", "client-version" = "0" }
model = "oca/gpt5"
name = "Oracle Code Assist Responses"
wire_api = "responses"
[model_providers.oca-chat]
base_url = "https://code-internal.aiservice.us-chicago-1.oci.oraclecloud.com/20250206/app/litellm"
http_headers = { "client" = "codex-cli", "client-version" = "0" }
model = "oca/gpt5"
name = "Oracle Code Assist Chat"
wire_api = "responses"
[profiles.grok-4]
model = "oca/grok4"
model_provider = "oca-chat"
review_model = "oca/grok4"
[profiles.grok-4-fast-reasoning]
model = "oca/grok4-fast-reasoning"
model_provider = "oca-chat"
review_model = "oca/grok4-fast-reasoning"
[profiles.grok-code-fast-1]
model = "oca/grok-code-fast-1"
model_provider = "oca-chat"
review_model = "oca/grok-code-fast-1"
[profiles.gpt-4-1]
model = "oca/gpt-4.1"
model_provider = "oca-chat"
review_model = "oca/gpt-4.1"
[profiles.gpt-5]
model = "oca/gpt5"
model_provider = "oca-responses"
review_model = "oca/gpt5"
[profiles.gpt-5-1]
model = "oca/gpt-5.1"
model_provider = "oca-chat"
review_model = "oca/gpt-5.1"
[profiles.gpt-5-2]
model = "oca/gpt-5.2"
model_provider = "oca-responses"
review_model = "oca/gpt-5.2"
[profiles.gpt-5-codex]
model = "oca/gpt-5-codex"
model_provider = "oca-responses"
review_model = "oca/gpt-5-codex"
personality = "pragmatic"
[profiles.gpt-5-1-codex-high]
model = "oca/gpt-5.1-codex"
model_provider = "oca-responses"
review_model = "oca/gpt-5.1-codex"
personality = "pragmatic"
model_reasoning_effort = "high"
[profiles.gpt-5-1-codex]
model = "oca/gpt-5.1-codex"
model_provider = "oca-responses"
review_model = "oca/gpt-5.1-codex"
personality = "pragmatic"
model_reasoning_effort = "medium"
[profiles.gpt-5-1-codex-mini]
model = "oca/gpt-5.1-codex-mini"
model_provider = "oca-responses"
review_model = "oca/gpt-5.1-codex-mini"
personality = "pragmatic"
[profiles.gpt-5-2-codex-high]
model = "oca/gpt-5.2-codex"
model_provider = "oca-responses"
review_model = "oca/gpt-5.2-codex"
personality = "pragmatic"
model_reasoning_effort = "high"
[profiles.gpt-5-2-codex]
model = "oca/gpt-5.2-codex"
model_provider = "oca-responses"
review_model = "oca/gpt-5.2-codex"
personality = "pragmatic"
model_reasoning_effort = "medium"
[profiles.gpt-5-2-codex-mini]
model = "oca/gpt-5.2-codex-mini"
model_provider = "oca-responses"
review_model = "oca/gpt-5.2-codex-mini"
personality = "pragmatic"
[profiles.gpt-5-3-codex]
model = "oca/gpt-5.3-codex"
model_provider = "oca-responses"
review_model = "oca/gpt-5.3-codex"
personality = "pragmatic"
model_reasoning_effort = "medium"
[mcp_servers.playwright]
name = "playwright"
command = "/Users/jetpac/.codex/bin/playwright-mcp"
args = []
startup_timeout_sec = 30
env = { PLAYWRIGHT_BROWSERS_PATH = "0" }
# Adjust args if you need --browser=firefox, --headed, or custom launch flags.