Module: PWN::AI::Ollama

Defined in:
lib/pwn/ai/ollama.rb

Overview

This plugin is used for interacting w/ Ollama’s REST API using the ‘rest’ browser type of PWN::Plugins::TransparentBrowser. This is based on the following Ollama API Specification: api.openai.com/v1

Class Method Summary collapse

Class Method Details

.authorsObject

Author(s)

0day Inc. <support@0dayinc.com>



232
233
234
235
236
# File 'lib/pwn/ai/ollama.rb', line 232

public_class_method def self.authors
  "AUTHOR(S):
    0day Inc. <support@0dayinc.com>
  "
end

.chat(opts = {}) ⇒ Object

Supported Method Parameters

response = PWN::AI::Ollama.chat(

request: 'required - message to Ollama'
model: 'optional - model to use for text generation (defaults to PWN::Env[:ai][:ollama][:model])',
temp: 'optional - creative response float (deafults to PWN::Env[:ai][:ollama][:temp])',
system_role_content: 'optional - context to set up the model behavior for conversation (Default: PWN::Env[:ai][:ollama][:system_role_content])',
response_history: 'optional - pass response back in to have a conversation',
speak_answer: 'optional speak answer using PWN::Plugins::Voice.text_to_speech (Default: nil)',
timeout: 'optional timeout in seconds (defaults to 300)',
spinner: 'optional - display spinner (defaults to false)'

)



147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
# File 'lib/pwn/ai/ollama.rb', line 147

public_class_method def self.chat(opts = {})
  engine = PWN::Env[:ai][:ollama]
  request = opts[:request]
  max_prompt_length = engine[:max_prompt_length] ||= 1_000_000
  request_trunc_idx = ((max_prompt_length - 1) / 3.36).floor
  request = request[0..request_trunc_idx]

  model = opts[:model] ||= engine[:model]
  raise 'ERROR: Model is required.  Call #get_models method for details' if model.nil?

  temp = opts[:temp].to_f ||= engine[:temp].to_f
  temp = 1 if temp.zero?

  rest_call = 'ollama/v1/chat/completions'

  response_history = opts[:response_history]

  max_tokens = response_history[:usage][:total_tokens] unless response_history.nil?

  system_role_content = opts[:system_role_content] ||= engine[:system_role_content]
  system_role_content = response_history[:choices].first[:content] if response_history

  system_role = {
    role: 'system',
    content: system_role_content
  }

  user_role = {
    role: 'user',
    content: request
  }

  response_history ||= { choices: [system_role] }
  choices_len = response_history[:choices].length

  http_body = {
    model: model,
    messages: [system_role],
    temperature: temp,
    stream: false
  }

  if response_history[:choices].length > 1
    response_history[:choices][1..-1].each do |message|
      http_body[:messages].push(message)
    end
  end

  http_body[:messages].push(user_role)

  timeout = opts[:timeout]
  spinner = opts[:spinner]

  response = ollama_rest_call(
    http_method: :post,
    rest_call: rest_call,
    http_body: http_body,
    timeout: timeout,
    spinner: spinner
  )

  json_resp = JSON.parse(response, symbolize_names: true)
  assistant_resp = json_resp[:choices].first[:message]
  json_resp[:choices] = http_body[:messages]
  json_resp[:choices].push(assistant_resp)

  speak_answer = true if opts[:speak_answer]

  if speak_answer
    answer = assistant_resp[:content]
    text_path = "/tmp/#{SecureRandom.hex}.pwn_voice"
    # answer = json_resp[:choices].last[:text]
    # answer = json_resp[:choices].last[:content] if gpt
    File.write(text_path, answer)
    PWN::Plugins::Voice.text_to_speech(text_path: text_path)
    File.unlink(text_path)
  end

  json_resp
rescue StandardError => e
  raise e
end

.get_modelsObject

Supported Method Parameters

response = PWN::AI::Ollama.get_models



127
128
129
130
131
132
133
# File 'lib/pwn/ai/ollama.rb', line 127

public_class_method def self.get_models
  models = ollama_rest_call(rest_call: 'ollama/api/tags')

  JSON.parse(models, symbolize_names: true)[:models]
rescue StandardError => e
  raise e
end

.helpObject

Display Usage for this Module



240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
# File 'lib/pwn/ai/ollama.rb', line 240

public_class_method def self.help
  puts "USAGE:
    models = #{self}.get_models

    response = #{self}.chat(
      request: 'required - message to Ollama',
      model: 'optional - model to use for text generation (defaults to PWN::Env[:ai][:ollama][:model])',
      temp: 'optional - creative response float (defaults to PWN::Env[:ai][:ollama][:temp])',
      system_role_content: 'optional - context to set up the model behavior for conversation (Default: PWN::Env[:ai][:ollama][:system_role_content])',
      response_history: 'optional - pass response back in to have a conversation',
      speak_answer: 'optional speak answer using PWN::Plugins::Voice.text_to_speech (Default: nil)',
      timeout: 'optional - timeout in seconds (defaults to 300)',
      spinner: 'optional - display spinner (defaults to false)'
    )

    #{self}.authors
  "
end