model = "gpt-5.3-codex" model_reasoning_effort = "medium" personality = "pragmatic" tool_output_token_limit = 25000 # Leave room for native compaction near the 272–273k context window. # Formula: 273000 - (tool_output_token_limit + 15000) # With tool_output_token_limit=25000 ⇒ 273000 - (25000 + 15000) = 233000 model_auto_compact_token_limit = 233000 suppress_unstable_features_warning = true [features] ghost_commit = false unified_exec = true apply_patch_freeform = true web_request = true skills = true shell_snapshot = true [projects."/home/sudacode/projects"] trust_level = "trusted" [projects."/home/sudacode/.config/mpv"] trust_level = "trusted" [projects."/home/sudacode/projects/japanese/SubMiner"] trust_level = "trusted" [projects."/home/sudacode/.codex/skills"] trust_level = "trusted" [projects."/home/sudacode/projects/japanese/GameSentenceMiner"] trust_level = "trusted" [projects."/home/sudacode/.config/mpv/script-opts"] trust_level = "trusted" [projects."/home/sudacode/projects/japanese/SubMiner/texthooker-ui"] trust_level = "trusted" [projects."/home/sudacode/.config/opencode/commands"] trust_level = "trusted"