Module: Kernai::Kernel
- Defined in:
- lib/kernai/kernel.rb
Constant Summary collapse
- WORKFLOW_DOCUMENTATION =
<<~DOC Structured workflow plans Emit <block type="plan"> containing JSON: { "goal": "string", "strategy": "parallel | sequential | mixed", "tasks": [ { "id": "string (required, unique)", "input": "string (required, the sub-agent prompt)", "parallel": true | false, "depends_on": ["other_task_id", "..."] } ] } Rules: - Each task runs as an isolated sub-agent inheriting provider, model and skills. - Tasks with parallel=true run concurrently; others run sequentially. - A task waits for every id listed in depends_on to finish first. - Sub-agents cannot themselves create nested structured plans. - Invalid plans are ignored (fail-safe). Results are injected back as: <block type="result" name="tasks">{"task_id": "result", ...}</block> Use /tasks to inspect current state. DOC
Class Method Summary collapse
-
.run(agent, input, provider: nil, history: [], recorder: nil, context: nil, on_task_complete: nil, &callback) ⇒ Object
The main agent loop.
Class Method Details
.run(agent, input, provider: nil, history: [], recorder: nil, context: nil, on_task_complete: nil, &callback) ⇒ Object
The main agent loop. Cohesive by design: every branch of the dispatch — workflow vs command vs protocol vs final vs informational-only vs chatbot fallback — belongs here so the execution contract is visible in one place. Splitting would force the reader to chase state across multiple methods without making the logic simpler. rubocop:disable Metrics/AbcSize, Metrics/MethodLength, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/BlockLength
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
# File 'lib/kernai/kernel.rb', line 54 def run(agent, input, provider: nil, history: [], recorder: nil, context: nil, on_task_complete: nil, &callback) provider = resolve_provider(agent, provider) raise ProviderError, 'No provider configured' unless provider rec = recorder || Kernai.config.recorder ctx = context || Context.new # Any Media the caller passes directly in `input` is registered in # the store up front so sub-agents and skills can look it up by id # throughout the run. Array(input).grep(Media).each { |m| ctx.media_store.put(m) } = [ Message.new(role: :system, content: agent.resolve_instructions(workflow_enabled: ctx.root?)), *history.map { |m| Message.new(role: m[:role], content: m[:content]) }, Message.new(role: :user, content: input) ] result = nil agent.max_steps.times do |step| # Hot reload: update system message each step [0] = Message.new( role: :system, content: agent.resolve_instructions(workflow_enabled: ctx.root?) ) stream_parser = StreamParser.new blocks = [] setup_stream_callbacks(stream_parser, blocks, callback) Kernai.logger.debug(event: 'llm.request', step: step + 1, model: agent.model) record(rec, ctx, step: step, event: :messages_sent, data: .map(&:to_h)) llm_response = provider.call( messages: .map(&:to_h), model: agent.model, generation: agent.generation ) { |chunk| stream_parser.push(chunk) } stream_parser.flush Kernai.logger.debug(event: 'llm.response', step: step + 1) record(rec, ctx, step: step, event: :llm_response, data: llm_response.to_h) << Message.new(role: :assistant, content: llm_response.content) final_block = blocks.find { |b| b.type == :final } command_blocks = blocks.select { |b| b.type == :command } plan_blocks = blocks.select { |b| b.type == :plan } protocol_blocks = blocks.select { |b| Protocol.registered?(b.type) } record(rec, ctx, step: step, event: :blocks_parsed, data: blocks.map(&:to_h)) workflow_plan, consumed_plan_block = detect_workflow_plan(plan_blocks, ctx, rec, step) emit_informational_blocks(blocks, consumed_plan_block, step, rec, ctx, callback) if workflow_plan << execute_workflow( workflow_plan, agent: agent, provider: provider, ctx: ctx, rec: rec, step: step, callback: callback, on_task_complete: on_task_complete ) elsif command_blocks.any? || protocol_blocks.any? # Actionable blocks take priority over `final`: execute them in # the order the LLM emitted them so command/protocol interleaving # stays deterministic, then continue the loop so the model can # react to their results. actionable = (command_blocks + protocol_blocks).sort_by { |b| blocks.index(b) } actionable.each do |block| result_msg = if command_blocks.include?(block) execute_command(block, agent, ctx, rec, step, callback) else execute_protocol(block, agent, ctx, rec, step, callback) end << result_msg end elsif final_block result = final_block.content Kernai.logger.info(event: 'agent.complete', steps: step + 1) record(rec, ctx, step: step, event: :result, data: result) callback&.call(Event.new(:final, result)) break elsif informational_only?(blocks) # The agent emitted <plan> and/or <json> but nothing actionable # and no <final>. By the declared semantics of those blocks # (reasoning/side-channel, not terminal) the turn is NOT over: # small models often split "think" and "act" across turns. We # inject a corrective feedback and let the loop run another # step so the agent can follow up with an actual action. << handle_informational_only(blocks, rec, ctx, step, callback) else # Truly no blocks — the agent replied with plain prose. Treat # that as a chatbot-style final answer and return it as-is. result = llm_response.content record(rec, ctx, step: step, event: :result, data: result) break end end raise MaxStepsReachedError, "Agent reached maximum steps (#{agent.max_steps})" if result.nil? result end |