# doc: https://developers.openai.com/codex/config-advanced # https://developers.openai.com/codex/config-reference approval_policy = "on-request" preferred_auth_method = "apikey" # model = "oca/gpt-5-codex" # profile = "gpt-5-codex" # model = "oca/gpt-5.1-codex" # profile = "gpt-5-1-codex" # model = "oca/gpt-5.1-codex-mini" # profile = "gpt-5-1-codex-mini" model = "gpt-5.4" profile = "gpt-5-4" web_search_request = true trust_level = "trusted" sandbox_mode = "workspace-write" [features] multi_agent = true [agents] max_threads = 6 max_depth = 2 [tui] alternate_screen = "always" [sandbox_workspace_write] network_access = true writable_roots = [ "/Users/jetpac/.codex/skills" ] [analytics] enabled = false [model_providers.oca-responses] base_url = "https://code-internal.aiservice.us-chicago-1.oci.oraclecloud.com/20250206/app/litellm" http_headers = { "client" = "codex-cli", "client-version" = "0" } model = "gpt5" name = "Oracle Code Assist Responses" wire_api = "responses" [model_providers.oca-chat] base_url = "https://code-internal.aiservice.us-chicago-1.oci.oraclecloud.com/20250206/app/litellm" http_headers = { "client" = "codex-cli", "client-version" = "0" } model = "gpt5" name = "Oracle Code Assist Chat" wire_api = "responses" [profiles.grok-4] model = "grok4" model_provider = "oca-chat" review_model = "grok4" [profiles.grok-4-fast-reasoning] model = "grok4-fast-reasoning" model_provider = "oca-chat" review_model = "grok4-fast-reasoning" [profiles.grok-code-fast-1] model = "grok-code-fast-1" model_provider = "oca-chat" review_model = "grok-code-fast-1" [profiles.gpt-4-1] model = "gpt-4.1" model_provider = "oca-chat" review_model = "gpt-4.1" [profiles.gpt-5] model = "gpt5" model_provider = "oca-responses" review_model = "gpt5" [profiles.gpt-5-1] model = "gpt-5.1" model_provider = "oca-chat" review_model = "gpt-5.1" [profiles.gpt-5-2] model = "gpt-5.2" model_provider = "oca-responses" review_model = "gpt-5.2" [profiles.gpt-5-codex] model = "gpt-5-codex" model_provider = "oca-responses" review_model = "gpt-5-codex" personality = "pragmatic" [profiles.gpt-5-1-codex-high] model = "gpt-5.1-codex" model_provider = "oca-responses" review_model = "gpt-5.1-codex" personality = "pragmatic" model_reasoning_effort = "high" [profiles.gpt-5-1-codex] model = "gpt-5.1-codex" model_provider = "oca-responses" review_model = "gpt-5.1-codex" personality = "pragmatic" model_reasoning_effort = "medium" [profiles.gpt-5-1-codex-mini] model = "gpt-5.1-codex-mini" model_provider = "oca-responses" review_model = "gpt-5.1-codex-mini" personality = "pragmatic" [profiles.gpt-5-2-codex-high] model = "gpt-5.2-codex" model_provider = "oca-responses" review_model = "gpt-5.2-codex" personality = "pragmatic" model_reasoning_effort = "high" [profiles.gpt-5-2-codex] model = "gpt-5.2-codex" model_provider = "oca-responses" review_model = "gpt-5.2-codex" personality = "pragmatic" model_reasoning_effort = "medium" [profiles.gpt-5-2-codex-mini] model = "gpt-5.2-codex-mini" model_provider = "oca-responses" review_model = "gpt-5.2-codex-mini" personality = "pragmatic" [profiles.gpt-5-3-codex] model = "gpt-5.3-codex" model_provider = "oca-responses" review_model = "gpt-5.3-codex" personality = "pragmatic" model_reasoning_effort = "high" [profiles.gpt-5-4] model = "gpt-5.4" model_provider = "oca-responses" review_model = "gpt-5.4" personality = "pragmatic" model_reasoning_effort = "high" [mcp_servers.playwright] command = "/Users/jetpac/.codex/bin/playwright-mcp" startup_timeout_sec = 30.0 PLAYWRIGHT_BROWSERS_PATH = "0" [mcp_servers.oci-kb] # command = "/Users/jetpac/.local/bin/ocikb-mcp-server" # command = "uvx" args = ["--index", "https://artifactory.oci.oraclecorp.com/api/pypi/global-release-pypi/simple/", "--from", "oci-kb-mcp@latest", "ocikb-mcp-server"] startup_timeout_sec = 30.0 ## Adjust args if you need --browser=firefox, --headed, or custom launch flags. [mcp_servers.devops_mcp] command = "/Users/jetpac/bin/devops-mcp-wrapper.sh" env_vars = ["OP_TOKEN", "OPERATOR_ACCESS_TOKEN"] startup_timeout_sec = 180.0 [projects."/Users/jetpac/bin"] trust_level = "trusted"