Class: Whoosh::AI::LLM

Inherits:
Object
  • Object
show all
Defined in:
lib/whoosh/ai/llm.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(provider: "auto", model: nil, cache_enabled: true, cache_size: DEFAULT_CACHE_MAX) ⇒ LLM

Returns a new instance of LLM.



42
43
44
45
46
47
48
# File 'lib/whoosh/ai/llm.rb', line 42

def initialize(provider: "auto", model: nil, cache_enabled: true, cache_size: DEFAULT_CACHE_MAX)
  @provider = provider
  @model = model
  @cache_enabled = cache_enabled
  @cache = cache_enabled ? LRUCache.new(cache_size) : nil
  @ruby_llm = nil
end

Instance Attribute Details

#modelObject (readonly)

Returns the value of attribute model.



40
41
42
# File 'lib/whoosh/ai/llm.rb', line 40

def model
  @model
end

#providerObject (readonly)

Returns the value of attribute provider.



40
41
42
# File 'lib/whoosh/ai/llm.rb', line 40

def provider
  @provider
end

Instance Method Details

#available?Boolean

Check if LLM is available

Returns:

  • (Boolean)


110
111
112
# File 'lib/whoosh/ai/llm.rb', line 110

def available?
  ruby_llm_available?
end

#chat(message, model: nil, system: nil, max_tokens: nil, temperature: nil, cache: nil) ⇒ Object

Chat with an LLM — returns response text



51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# File 'lib/whoosh/ai/llm.rb', line 51

def chat(message, model: nil, system: nil, max_tokens: nil, temperature: nil, cache: nil)
  use_cache = cache.nil? ? @cache_enabled : cache
  cache_key = "chat:#{model || @model}:#{message}" if use_cache

  # Check cache
  if use_cache && @cache && (cached = @cache[cache_key])
    return cached
  end

  result = call_llm(
    messages: [{ role: "user", content: message }],
    model: model || @model,
    system: system,
    max_tokens: max_tokens,
    temperature: temperature
  )

  @cache[cache_key] = result if use_cache && @cache

  result
end

#extract(text, schema:, model: nil, prompt: nil) ⇒ Object

Extract structured data — returns validated hash



74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# File 'lib/whoosh/ai/llm.rb', line 74

def extract(text, schema:, model: nil, prompt: nil)
  schema_desc = describe_schema(schema)
  system_prompt = prompt || "Extract structured data from the text. Return ONLY valid JSON matching this schema:\n#{schema_desc}"

  response = chat(text, model: model, system: system_prompt, cache: false)

  # Parse JSON from LLM response
  json_str = extract_json(response)
  parsed = Serialization::Json.decode(json_str)

  # Validate against schema
  result = schema.validate(parsed)
  if result.success?
    result.data
  else
    raise Errors::ValidationError.new(result.errors)
  end
end

#stream(message, model: nil, system: nil, &block) ⇒ Object

Stream an LLM response — yields each chunk as ruby_llm produces it. The block receives a RubyLLM::Chunk (a Message subclass with #content). Returns the final response message after the stream completes.



96
97
98
99
100
101
102
103
104
105
106
107
# File 'lib/whoosh/ai/llm.rb', line 96

def stream(message, model: nil, system: nil, &block)
  ensure_ruby_llm!
  unless @ruby_llm
    raise Errors::DependencyError, "No LLM provider available. Add 'ruby_llm' to your Gemfile."
  end

  chat = RubyLLM.chat(model: model || @model || DEFAULT_MODEL)
  chat.with_instructions(system) if system
  chat.ask(message) do |chunk|
    block.call(chunk) if block
  end
end