Class: Ace::LLM::Providers::CLI::CodexClient

Inherits:
Organisms::BaseClient
  • Object
show all
Includes:
CliArgsSupport
Defined in:
lib/ace/llm/providers/cli/codex_client.rb

Overview

Client for interacting with Codex CLI (OpenAI) Provides access to Codex models through subprocess execution

Constant Summary collapse

API_BASE_URL =

Not used for CLI interaction but required by BaseClient

"https://api.openai.com"
DEFAULT_GENERATION_CONFIG =
{}.freeze
DEFAULT_MODEL =

Default model (can be overridden by config)

"gpt-5.4"

Class Method Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(model: nil, **options) ⇒ CodexClient

Returns a new instance of CodexClient.



36
37
38
39
40
41
42
# File 'lib/ace/llm/providers/cli/codex_client.rb', line 36

def initialize(model: nil, **options)
  @model = model || DEFAULT_MODEL
  # Skip normal BaseClient initialization that requires API key
  @options = options
  @generation_config = options[:generation_config] || {}
  @skill_name_reader = Molecules::SkillNameReader.new
end

Class Method Details

.provider_nameObject

Provider registration - auto-registers as “codex”



29
30
31
# File 'lib/ace/llm/providers/cli/codex_client.rb', line 29

def self.provider_name
  "codex"
end

Instance Method Details

#build_interactive_invocation(messages, **options) ⇒ Object



89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# File 'lib/ace/llm/providers/cli/codex_client.rb', line 89

def build_interactive_invocation(messages, **options)
  validate_codex_availability!

  prompt = format_messages_as_prompt(messages)
  subprocess_env = options[:subprocess_env]
  working_dir = Atoms::ExecutionContext.resolve_working_dir(
    working_dir: options[:working_dir],
    subprocess_env: subprocess_env
  )
  prompt = rewrite_skill_commands(prompt, working_dir: working_dir)

  env = subprocess_env ? subprocess_env.to_h.dup : {}
  overlay_home = Atoms::InteractiveStartupPolicy.codex_overlay_home(
    working_dir: working_dir,
    subprocess_env: subprocess_env
  )
  env["HOME"] = overlay_home if overlay_home

  cmd = build_codex_interactive_command(
    prompt,
    options,
    working_dir: working_dir
  )
  {
    command: cmd,
    env: env,
    working_dir: working_dir,
    prompt: prompt
  }
end

#generate(messages, **options) ⇒ Hash

Generate a response from the LLM

Parameters:

  • messages (Array<Hash>)

    Conversation messages

  • options (Hash)

    Generation options

Returns:

  • (Hash)

    Response with text and metadata



53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# File 'lib/ace/llm/providers/cli/codex_client.rb', line 53

def generate(messages, **options)
  validate_codex_availability!

  # Convert messages to prompt format
  prompt = format_messages_as_prompt(messages)
  subprocess_env = options[:subprocess_env]
  working_dir = Atoms::ExecutionContext.resolve_working_dir(
    working_dir: options[:working_dir],
    subprocess_env: subprocess_env
  )
  prompt = rewrite_skill_commands(prompt, working_dir: working_dir)

  cmd = build_codex_command(prompt, options, working_dir: working_dir)
  stdout, stderr, status = execute_codex_command(cmd, prompt, options)

  parse_codex_response(stdout, stderr, status, prompt, options)
rescue => e
  handle_codex_error(e)
end

#interactive_supported?Boolean

Returns:

  • (Boolean)


85
86
87
# File 'lib/ace/llm/providers/cli/codex_client.rb', line 85

def interactive_supported?
  true
end

#list_modelsObject

List available Codex models



74
75
76
77
78
79
80
81
82
83
# File 'lib/ace/llm/providers/cli/codex_client.rb', line 74

def list_models
  # Return models based on what the CLI supports
  # Actual models come from YAML config
  [
    {id: "gpt-5.3-codex", name: "GPT-5.3 Codex", description: "Code-specialized Codex model", context_size: 128_000},
    {id: "gpt-5.3-codex-spark", name: "GPT-5.3 Codex Spark", description: "Faster Codex model", context_size: 128_000},
    {id: "gpt-5.4", name: "GPT-5.4", description: "Advanced Codex model", context_size: 128_000},
    {id: "gpt-5.4-mini", name: "GPT-5.4 Mini", description: "Smaller, faster Codex model", context_size: 128_000}
  ]
end

#needs_credentials?Boolean

Override to indicate this client doesn’t need API credentials

Returns:

  • (Boolean)


45
46
47
# File 'lib/ace/llm/providers/cli/codex_client.rb', line 45

def needs_credentials?
  false
end