Class: Kiribi::Gemma4E2B::AudioEncoder
- Defined in:
- lib/kiribi/gemma4_e2b/audio_encoder.rb
Constant Summary collapse
- FILES =
%w[audio_encoder.onnx audio_encoder.onnx_data].freeze
Instance Method Summary collapse
- #encode(pcm_samples) ⇒ Object
-
#initialize(dest_dir) ⇒ AudioEncoder
constructor
A new instance of AudioEncoder.
Methods inherited from Base
Constructor Details
#initialize(dest_dir) ⇒ AudioEncoder
Returns a new instance of AudioEncoder.
10 11 12 13 14 15 16 |
# File 'lib/kiribi/gemma4_e2b/audio_encoder.rb', line 10 def initialize(dest_dir) FILES.each do |f| path = File.join(dest_dir, f) raise Kiribi::ModelNotDownloaded, %(gemma4-e2b/audio: #{f} missing. Run: Kiribi.download("gemma4-e2b/audio")) unless File.exist?(path) end @model = OnnxRuntime::Model.new(File.join(dest_dir, "audio_encoder.onnx")) end |
Instance Method Details
#encode(pcm_samples) ⇒ Object
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
# File 'lib/kiribi/gemma4_e2b/audio_encoder.rb', line 18 def encode(pcm_samples) pcm = pcm_samples.is_a?(String) ? pcm_samples.unpack("e*") : pcm_samples frame_length = 320 hop_length = 160 fft_length = 512 num_mels = 128 mel_floor = 0.001 window = Array.new(frame_length) { 0.5 - 0.5 * Math.cos(2.0 * Math::PI * it / frame_length) } mel_filters = build_mel_filterbank(fft_length / 2 + 1, num_mels, 0.0, 8000.0, 16_000) pad_left = frame_length / 2 padded = Array.new(pad_left, 0.0) + pcm mask_raw = Array.new(pad_left, false) + Array.new(pcm.length, true) frame_size = frame_length + 1 num_frames = (padded.length - frame_size) / hop_length + 1 input_features = [] input_features_mask = [] num_frames.times do |fi| start = fi * hop_length windowed = frame_length.times.map { padded[start + it] * window[it] } mag = rfft_magnitude(windowed, fft_length) mel = num_mels.times.map do |m| sum = 0.0 mag.each_with_index { |v, i| sum += v * mel_filters[i][m] } Math.log(sum + mel_floor) end end_idx = fi * hop_length + frame_size - 1 valid = end_idx < mask_raw.length && mask_raw[end_idx] input_features << (valid ? mel : Array.new(num_mels, 0.0)) input_features_mask << valid end padded_frames = ((input_features.length + 127) / 128) * 128 while input_features.length < padded_frames input_features << Array.new(num_mels, 0.0) input_features_mask << false end @model.predict({ "input_features" => [input_features], "input_features_mask" => [input_features_mask] })["audio_features"] end |