Class: CompletionKit::OllamaClient
- Inherits:
-
LlmClient
show all
- Defined in:
- app/services/completion_kit/ollama_client.rb
Instance Method Summary
collapse
Methods inherited from LlmClient
for_model, for_provider, #initialize
Instance Method Details
#available_models ⇒ Object
32
33
34
35
36
37
38
39
40
41
42
43
44
45
|
# File 'app/services/completion_kit/ollama_client.rb', line 32
def available_models
return [] unless configured?
response = build_connection(api_endpoint).get("/v1/models") do |req|
req.["Authorization"] = "Bearer #{api_key}" if api_key.present?
end
return [] unless response.success?
models = JSON.parse(response.body).fetch("data", []).map { |entry| entry["id"] }.sort
models.map { |id| { id: id, name: id } }
rescue StandardError
[]
end
|
#configuration_errors ⇒ Object
51
52
53
54
55
|
# File 'app/services/completion_kit/ollama_client.rb', line 51
def configuration_errors
errors = []
errors << "Ollama API endpoint is not configured" unless api_endpoint.present?
errors
end
|
47
48
49
|
# File 'app/services/completion_kit/ollama_client.rb', line 47
def configured?
api_endpoint.present?
end
|
#generate_completion(prompt, options = {}) ⇒ Object
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
# File 'app/services/completion_kit/ollama_client.rb', line 3
def generate_completion(prompt, options = {})
return "Error: API endpoint not configured" unless configured?
model = options[:model]
max_tokens = options[:max_tokens] || 1000
temperature = options[:temperature] || 0.7
response = build_connection(api_endpoint).post do |req|
req.url "/v1/completions"
req.["Content-Type"] = "application/json"
req.["Authorization"] = "Bearer #{api_key}" if api_key.present?
req.body = {
model: model,
prompt: prompt,
max_tokens: max_tokens,
temperature: temperature
}.to_json
end
if response.success?
data = JSON.parse(response.body)
data["choices"][0]["text"].strip
else
"Error: #{response.status} - #{response.body}"
end
rescue => e
"Error: #{e.message}"
end
|