Class: Whoosh::AI::LLM

Inherits:
Object
  • Object
show all
Defined in:
lib/whoosh/ai/llm.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(provider: "auto", model: nil, cache_enabled: true) ⇒ LLM

Returns a new instance of LLM.



8
9
10
11
12
13
14
15
# File 'lib/whoosh/ai/llm.rb', line 8

def initialize(provider: "auto", model: nil, cache_enabled: true)
  @provider = provider
  @model = model
  @cache_enabled = cache_enabled
  @cache = cache_enabled ? {} : nil
  @mutex = Mutex.new
  @ruby_llm = nil
end

Instance Attribute Details

#modelObject (readonly)

Returns the value of attribute model.



6
7
8
# File 'lib/whoosh/ai/llm.rb', line 6

def model
  @model
end

#providerObject (readonly)

Returns the value of attribute provider.



6
7
8
# File 'lib/whoosh/ai/llm.rb', line 6

def provider
  @provider
end

Instance Method Details

#available?Boolean

Check if LLM is available

Returns:

  • (Boolean)


79
80
81
# File 'lib/whoosh/ai/llm.rb', line 79

def available?
  ruby_llm_available?
end

#chat(message, model: nil, system: nil, max_tokens: nil, temperature: nil, cache: nil) ⇒ Object

Chat with an LLM — returns response text



18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# File 'lib/whoosh/ai/llm.rb', line 18

def chat(message, model: nil, system: nil, max_tokens: nil, temperature: nil, cache: nil)
  use_cache = cache.nil? ? @cache_enabled : cache
  cache_key = "chat:#{model || @model}:#{message}" if use_cache

  # Check cache
  if use_cache && @cache && (cached = @cache[cache_key])
    return cached
  end

  result = call_llm(
    messages: [{ role: "user", content: message }],
    model: model || @model,
    system: system,
    max_tokens: max_tokens,
    temperature: temperature
  )

  # Cache result
  if use_cache && @cache
    @mutex.synchronize { @cache[cache_key] = result }
  end

  result
end

#extract(text, schema:, model: nil, prompt: nil) ⇒ Object

Extract structured data — returns validated hash



44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# File 'lib/whoosh/ai/llm.rb', line 44

def extract(text, schema:, model: nil, prompt: nil)
  schema_desc = describe_schema(schema)
  system_prompt = prompt || "Extract structured data from the text. Return ONLY valid JSON matching this schema:\n#{schema_desc}"

  response = chat(text, model: model, system: system_prompt, cache: false)

  # Parse JSON from LLM response
  json_str = extract_json(response)
  parsed = Serialization::Json.decode(json_str)

  # Validate against schema
  result = schema.validate(parsed)
  if result.success?
    result.data
  else
    raise Errors::ValidationError.new(result.errors)
  end
end

#stream(message, model: nil, system: nil, &block) ⇒ Object

Stream LLM response — yields chunks



64
65
66
67
68
69
70
71
72
73
74
75
76
# File 'lib/whoosh/ai/llm.rb', line 64

def stream(message, model: nil, system: nil, &block)
  ensure_ruby_llm!

  messages = [{ role: "user", content: message }]
  # Delegate to ruby_llm's streaming interface
  if @ruby_llm
    # ruby_llm streaming would go here
    # For now, fall back to non-streaming
    result = chat(message, model: model, system: system, cache: false)
    yield result if block_given?
    result
  end
end