Class: GRX::Tensor

Inherits:
Object
  • Object
show all
Defined in:
lib/grx/tensor.rb

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(storage, shape, strides: nil, offset: 0, requires_grad: false) ⇒ Tensor

Returns a new instance of Tensor.



8
9
10
11
12
13
14
15
16
17
# File 'lib/grx/tensor.rb', line 8

def initialize(storage, shape, strides: nil, offset: 0, requires_grad: false)
  @storage       = storage
  @shape         = shape
  @offset        = offset
  @strides       = strides || _calc_strides(shape)
  @requires_grad = requires_grad
  @grad          = nil
  @backward_fn   = nil
  @_grafo_hijos  = []
end

Instance Attribute Details

#backward_fnObject

Returns the value of attribute backward_fn.



6
7
8
# File 'lib/grx/tensor.rb', line 6

def backward_fn
  @backward_fn
end

#gradObject

Returns the value of attribute grad.



6
7
8
# File 'lib/grx/tensor.rb', line 6

def grad
  @grad
end

#offsetObject (readonly)

Returns the value of attribute offset.



5
6
7
# File 'lib/grx/tensor.rb', line 5

def offset
  @offset
end

#requires_gradObject

Returns the value of attribute requires_grad.



6
7
8
# File 'lib/grx/tensor.rb', line 6

def requires_grad
  @requires_grad
end

#shapeObject (readonly)

Returns the value of attribute shape.



5
6
7
# File 'lib/grx/tensor.rb', line 5

def shape
  @shape
end

#storageObject (readonly)

Returns the value of attribute storage.



5
6
7
# File 'lib/grx/tensor.rb', line 5

def storage
  @storage
end

#stridesObject (readonly)

Returns the value of attribute strides.



5
6
7
# File 'lib/grx/tensor.rb', line 5

def strides
  @strides
end

Class Method Details

._alloc_raw(n) ⇒ Object



564
565
566
567
568
569
570
571
572
573
574
575
576
# File 'lib/grx/tensor.rb', line 564

def self._alloc_raw(n)
  if CAPI::LOADED
    ptr = CAPI.grx_alloc(n)
    raise StorageError, "grx_alloc OOM" if ptr.null?
    s = Storage.allocate
    s.instance_variable_set(:@size, n)
    s.instance_variable_set(:@ptr,  ptr)
    ObjectSpace.define_finalizer(s, Storage.make_finalizer(ptr))
    s
  else
    Storage.new(Array.new(n, 0.0))
  end
end

.create(array_valores, shape, requires_grad: false) ⇒ Object


FACTORIES




23
24
25
# File 'lib/grx/tensor.rb', line 23

def self.create(array_valores, shape, requires_grad: false)
  new(Storage.new(array_valores), shape, requires_grad: requires_grad)
end

.he_normal(shape, requires_grad: false) ⇒ Object

Inicialización He normal (para capas con ReLU)



53
54
55
56
57
58
59
60
# File 'lib/grx/tensor.rb', line 53

def self.he_normal(shape, requires_grad: false)
  # fan_in = número de entradas = último dim o penúltimo si es 2D
  fan_in = shape.size >= 2 ? shape[-1] : shape[0]
  n = shape.reduce(1, :*)
  s = _alloc_raw(n)
  CAPI.grx_init_he_normal(s.ptr, n, fan_in) if CAPI::LOADED
  new(s, shape, requires_grad: requires_grad)
end

.ones(shape, requires_grad: false) ⇒ Object



31
32
33
# File 'lib/grx/tensor.rb', line 31

def self.ones(shape, requires_grad: false)
  create(Array.new(shape.reduce(1,:*), 1.0), shape, requires_grad: requires_grad)
end

.ones_like(t, requires_grad: false) ⇒ Object



39
40
41
# File 'lib/grx/tensor.rb', line 39

def self.ones_like(t, requires_grad: false)
  ones(t.shape, requires_grad: requires_grad)
end

.xavier_uniform(shape, requires_grad: false) ⇒ Object

Inicialización Xavier uniform (para capas lineales con tanh/sigmoid)



44
45
46
47
48
49
50
# File 'lib/grx/tensor.rb', line 44

def self.xavier_uniform(shape, requires_grad: false)
  fan_in, fan_out = shape[-2] || 1, shape[-1] || 1
  n = shape.reduce(1, :*)
  s = _alloc_raw(n)
  CAPI.grx_init_xavier_uniform(s.ptr, n, fan_in, fan_out) if CAPI::LOADED
  new(s, shape, requires_grad: requires_grad)
end

.zeros(shape, requires_grad: false) ⇒ Object



27
28
29
# File 'lib/grx/tensor.rb', line 27

def self.zeros(shape, requires_grad: false)
  create(Array.new(shape.reduce(1,:*), 0.0), shape, requires_grad: requires_grad)
end

.zeros_like(t, requires_grad: false) ⇒ Object



35
36
37
# File 'lib/grx/tensor.rb', line 35

def self.zeros_like(t, requires_grad: false)
  zeros(t.shape, requires_grad: requires_grad)
end

Instance Method Details

#*(other) ⇒ Object



108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# File 'lib/grx/tensor.rb', line 108

def *(other)
  case other
  when Tensor
    raise ShapeError, "Shapes incompatibles: #{@shape} vs #{other.shape}" if @shape != other.shape
    r = Tensor.new(_binop(:grx_mul, other), @shape)
    if requires_grad || other.requires_grad
      r.requires_grad = true
      a, b = self, other
      r._grafo_hijos.push(a, b)
      r.backward_fn = ->(g) {
        a.agregar_gradiente(g * b) if a.requires_grad
        b.agregar_gradiente(g * a) if b.requires_grad
      }
    end
    r
  when Numeric
    scale(other.to_f)
  else
    raise TypeError, "No se puede multiplicar Tensor con #{other.class}"
  end
end

#+(other) ⇒ Object


OPERACIONES ARITMÉTICAS (con autograd)




66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# File 'lib/grx/tensor.rb', line 66

def +(other)
  case other
  when Tensor
    raise ShapeError, "Shapes incompatibles: #{@shape} vs #{other.shape}" if @shape != other.shape
    r = Tensor.new(_binop(:grx_add, other), @shape)
    if requires_grad || other.requires_grad
      r.requires_grad = true
      r._grafo_hijos.push(self, other)
      r.backward_fn = ->(g) {
        agregar_gradiente(g)       if requires_grad
        other.agregar_gradiente(g) if other.requires_grad
      }
    end
    r
  when Numeric
    add_scalar(other.to_f)
  else
    raise TypeError, "No se puede sumar Tensor con #{other.class}"
  end
end

#-(other) ⇒ Object



87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# File 'lib/grx/tensor.rb', line 87

def -(other)
  case other
  when Tensor
    raise ShapeError, "Shapes incompatibles: #{@shape} vs #{other.shape}" if @shape != other.shape
    r = Tensor.new(_binop(:grx_sub, other), @shape)
    if requires_grad || other.requires_grad
      r.requires_grad = true
      r._grafo_hijos.push(self, other)
      r.backward_fn = ->(g) {
        agregar_gradiente(g)              if requires_grad
        other.agregar_gradiente(g.negate) if other.requires_grad
      }
    end
    r
  when Numeric
    add_scalar(-other.to_f)
  else
    raise TypeError, "No se puede restar Tensor con #{other.class}"
  end
end

#-@Object



153
154
155
# File 'lib/grx/tensor.rb', line 153

def -@
  negate
end

#/(other) ⇒ Object



130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# File 'lib/grx/tensor.rb', line 130

def /(other)
  case other
  when Tensor
    raise ShapeError, "Shapes incompatibles: #{@shape} vs #{other.shape}" if @shape != other.shape
    r = Tensor.new(_binop(:grx_div, other), @shape)
    if requires_grad || other.requires_grad
      r.requires_grad = true
      a, b = self, other
      r._grafo_hijos.push(a, b)
      r.backward_fn = ->(g) {
        # d(a/b)/da = 1/b,  d(a/b)/db = -a/b^2
        a.agregar_gradiente(g / b)                    if a.requires_grad
        b.agregar_gradiente((g * a).negate / (b * b)) if b.requires_grad
      }
    end
    r
  when Numeric
    scale(1.0 / other.to_f)
  else
    raise TypeError, "No se puede dividir Tensor con #{other.class}"
  end
end

#_grafo_hijosObject



445
446
447
# File 'lib/grx/tensor.rb', line 445

def _grafo_hijos
  @_grafo_hijos
end

#_matmul_no_grad(other) ⇒ Object

Matmul sin autograd — para uso interno en backward_fn

Raises:



486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
# File 'lib/grx/tensor.rb', line 486

def _matmul_no_grad(other)
  raise DimensionError, "matmul requiere tensores 2D" unless @shape.size == 2 && other.shape.size == 2
  m, k = @shape; k2, n = other.shape
  raise ShapeError, "Dimensiones incompatibles" if k != k2
  out = _alloc_storage(m * n)
  if CAPI::LOADED
    CAPI.grx_matmul(@storage.ptr, other.storage.ptr, out.ptr, m, k, n)
  else
    result = Array.new(m * n, 0.0)
    m.times { |i| k.times { |kk| aik = @storage.read(i*k+kk)
      n.times { |j| result[i*n+j] += aik * other.storage.read(kk*n+j) } } }
    return Tensor.new(Storage.new(result), [m, n])
  end
  Tensor.new(out, [m, n])
end

#_transpose_viewObject

Transpose sin autograd — solo para uso interno en backward

Raises:



478
479
480
481
482
483
# File 'lib/grx/tensor.rb', line 478

def _transpose_view
  raise DimensionError, "transpose solo soporta 2D" if @shape.size != 2
  Tensor.new(@storage, [@shape[1], @shape[0]],
             strides: [@strides[1], @strides[0]],
             offset: @offset, requires_grad: false)
end

#absObject


MATEMÁTICAS ELEMENT-WISE (con autograd)




177
178
179
180
181
182
183
184
185
186
187
188
189
# File 'lib/grx/tensor.rb', line 177

def abs
  r = _unary_c(:grx_abs) { |v| v.abs }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    src = self
    r.backward_fn = ->(g) {
      # d|x|/dx = sign(x)
      sign = Tensor.create(src.to_a.map { |v| v >= 0 ? 1.0 : -1.0 }, src.shape)
      src.agregar_gradiente(g * sign)
    }
  end
  r
end

#add_scalar(s) ⇒ Object



165
166
167
# File 'lib/grx/tensor.rb', line 165

def add_scalar(s)
  _unary_c(:grx_add_scalar, s) { |v| v + s }
end

#agregar_gradiente(g) ⇒ Object


AUTOGRAD




403
404
405
# File 'lib/grx/tensor.rb', line 403

def agregar_gradiente(g)
  @grad = @grad.nil? ? g : @grad + g
end

#backward(grad_inicial = nil) ⇒ Object



407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
# File 'lib/grx/tensor.rb', line 407

def backward(grad_inicial = nil)
  if grad_inicial.nil? && @grad.nil?
    agregar_gradiente(Tensor.ones(@shape))
  elsif !grad_inicial.nil?
    agregar_gradiente(grad_inicial)
  end

  # Orden topológico via DFS iterativo post-order (evita stack overflow en grafos profundos)
  orden     = []
  visitados = {}
  stack     = [[self, false]]

  until stack.empty?
    nodo, procesado = stack.pop
    if procesado
      orden << nodo unless visitados[nodo.object_id]
      visitados[nodo.object_id] = true
    else
      next if visitados[nodo.object_id]
      stack.push([nodo, true])
      nodo._grafo_hijos.each { |h| stack.push([h, false]) unless visitados[h.object_id] }
    end
  end

  # orden ya está en post-order → reverse = raíz primero, hojas al final
  orden.reverse_each do |nodo|
    next unless nodo.grad && nodo.backward_fn
    nodo.backward_fn.call(nodo.grad)
    nodo.backward_fn = nil
  end
end

#clip(lo, hi) ⇒ Object



246
247
248
249
250
251
252
253
254
255
# File 'lib/grx/tensor.rb', line 246

def clip(lo, hi)
  out = _alloc_storage(numel)
  if CAPI::LOADED
    CAPI.grx_clip(@storage.ptr, lo.to_f, hi.to_f, out.ptr, numel)
  else
    data = to_a.map { |v| v < lo ? lo : (v > hi ? hi : v) }
    return Tensor.create(data, @shape)
  end
  Tensor.new(out, @shape)
end

#dot(other) ⇒ Object


ÁLGEBRA LINEAL


Raises:



297
298
299
300
301
302
303
304
# File 'lib/grx/tensor.rb', line 297

def dot(other)
  raise ShapeError, "dot requiere mismo shape" if @shape != other.shape
  if CAPI::LOADED
    CAPI.grx_dot(@storage.ptr, other.storage.ptr, numel)
  else
    to_a.zip(other.to_a).sum { |a, b| a * b }
  end
end

#expObject



224
225
226
227
228
229
230
231
232
# File 'lib/grx/tensor.rb', line 224

def exp
  r = _unary_c(:grx_exp) { |v| Math.exp(v) }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    res = r; src = self
    r.backward_fn = ->(g) { src.agregar_gradiente(g * res) }
  end
  r
end

#flattenObject



502
503
504
# File 'lib/grx/tensor.rb', line 502

def flatten
  reshape([numel])
end

#get(*coords) ⇒ Object


GEOMETRÍA (zero-copy)




453
454
455
# File 'lib/grx/tensor.rb', line 453

def get(*coords)
  @storage.read(_calc_flat_index(coords))
end

#itemObject



544
545
546
547
# File 'lib/grx/tensor.rb', line 544

def item
  raise "item() solo para tensores de 1 elemento" if numel != 1
  to_a[0]
end

#leaky_relu(alpha = 0.01) ⇒ Object



351
352
353
354
355
356
357
358
359
360
361
362
# File 'lib/grx/tensor.rb', line 351

def leaky_relu(alpha = 0.01)
  r = _unary_c(:grx_leaky_relu, alpha.to_f) { |v| v > 0 ? v : alpha * v }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    src = self
    r.backward_fn = ->(g) {
      mask = Tensor.create(src.to_a.map { |v| v > 0 ? 1.0 : alpha }, src.shape)
      src.agregar_gradiente(g * mask)
    }
  end
  r
end

#logObject



214
215
216
217
218
219
220
221
222
# File 'lib/grx/tensor.rb', line 214

def log
  r = _unary_c(:grx_log) { |v| Math.log(v) }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    src = self
    r.backward_fn = ->(g) { src.agregar_gradiente(g / src) }
  end
  r
end

#matmul(other) ⇒ Object

Raises:



306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
# File 'lib/grx/tensor.rb', line 306

def matmul(other)
  raise DimensionError, "matmul requiere tensores 2D" unless @shape.size == 2 && other.shape.size == 2
  m, k = @shape; k2, n = other.shape
  raise ShapeError, "Dimensiones incompatibles: #{@shape} × #{other.shape}" if k != k2
  out = _alloc_storage(m * n)
  if CAPI::LOADED
    CAPI.grx_matmul(@storage.ptr, other.storage.ptr, out.ptr, m, k, n)
  else
    result = Array.new(m * n, 0.0)
    m.times { |i| k.times { |kk| aik = @storage.read(i*k+kk)
      n.times { |j| result[i*n+j] += aik * other.storage.read(kk*n+j) } } }
    return Tensor.create(result, [m, n])
  end
  r = Tensor.new(out, [m, n])
  if requires_grad || other.requires_grad
    r.requires_grad = true
    a, b = self, other
    r._grafo_hijos.push(a, b)
    r.backward_fn = ->(g) {
      # dL/dA = dL/dC × B^T,  dL/dB = A^T × dL/dC
      # Usamos _matmul_no_grad y _transpose_view para no crear nodos en el grafo
      a.agregar_gradiente(g._matmul_no_grad(b._transpose_view)) if a.requires_grad
      b.agregar_gradiente(a._transpose_view._matmul_no_grad(g)) if b.requires_grad
    }
  end
  r
end

#maxObject



277
278
279
280
281
282
283
# File 'lib/grx/tensor.rb', line 277

def max
  if CAPI::LOADED
    CAPI.grx_max(@storage.ptr, numel)
  else
    to_a.max
  end
end

#meanObject



269
270
271
272
273
274
275
# File 'lib/grx/tensor.rb', line 269

def mean
  if CAPI::LOADED
    CAPI.grx_mean(@storage.ptr, numel)
  else
    to_a.sum.to_f / numel
  end
end

#minObject



285
286
287
288
289
290
291
# File 'lib/grx/tensor.rb', line 285

def min
  if CAPI::LOADED
    CAPI.grx_min(@storage.ptr, numel)
  else
    to_a.min
  end
end

#negateObject



169
170
171
# File 'lib/grx/tensor.rb', line 169

def negate
  _unary_c(:grx_negate) { |v| -v }
end

#numelObject


UTILIDADES




510
511
512
# File 'lib/grx/tensor.rb', line 510

def numel
  @shape.reduce(1, :*)
end

#pow(e) ⇒ Object



234
235
236
237
238
239
240
241
242
243
244
# File 'lib/grx/tensor.rb', line 234

def pow(e)
  r = _unary_c(:grx_pow, e.to_f) { |v| v ** e }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    src = self
    r.backward_fn = ->(g) {
      src.agregar_gradiente(g * src.pow(e - 1).scale(e.to_f))
    }
  end
  r
end

#reluObject


ACTIVACIONES (con autograd)




338
339
340
341
342
343
344
345
346
347
348
349
# File 'lib/grx/tensor.rb', line 338

def relu
  r = _unary_c(:grx_relu) { |v| v > 0 ? v : 0.0 }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    src = self
    r.backward_fn = ->(g) {
      mask = Tensor.create(src.to_a.map { |v| v > 0 ? 1.0 : 0.0 }, src.shape)
      src.agregar_gradiente(g * mask)
    }
  end
  r
end

#reshape(nueva_forma) ⇒ Object

Raises:

  • (ArgumentError)


457
458
459
460
# File 'lib/grx/tensor.rb', line 457

def reshape(nueva_forma)
  raise ArgumentError, "Reshape incompatible" if numel != nueva_forma.reduce(1,:*)
  Tensor.new(@storage, nueva_forma, offset: @offset, requires_grad: @requires_grad)
end

#scale(s) ⇒ Object


OPERACIONES ESCALARES




161
162
163
# File 'lib/grx/tensor.rb', line 161

def scale(s)
  _unary_c(:grx_scale, s) { |v| v * s }
end

#sigmoidObject



377
378
379
380
381
382
383
384
385
386
387
388
# File 'lib/grx/tensor.rb', line 377

def sigmoid
  r = _unary_c(:grx_sigmoid) { |v| 1.0 / (1.0 + Math.exp(-v)) }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    res = r; src = self
    r.backward_fn = ->(g) {
      # d(sigmoid)/dx = sigmoid * (1 - sigmoid)
      src.agregar_gradiente(g * res * (Tensor.ones_like(res) - res))
    }
  end
  r
end

#softmaxObject



390
391
392
393
394
395
396
397
# File 'lib/grx/tensor.rb', line 390

def softmax
  r = _unary_c(:grx_softmax) do
    vals = to_a; max_v = vals.max
    exps = vals.map { |v| Math.exp(v - max_v) }; s = exps.sum
    exps.map { |e| e / s }
  end
  r
end

#sqrtObject



191
192
193
194
195
196
197
198
199
200
201
202
# File 'lib/grx/tensor.rb', line 191

def sqrt
  r = _unary_c(:grx_sqrt) { |v| Math.sqrt(v) }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    res = r; src = self
    r.backward_fn = ->(g) {
      # d(sqrt(x))/dx = 1 / (2*sqrt(x))
      src.agregar_gradiente(g / (res.scale(2.0)))
    }
  end
  r
end

#squareObject



204
205
206
207
208
209
210
211
212
# File 'lib/grx/tensor.rb', line 204

def square
  r = _unary_c(:grx_square) { |v| v * v }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    src = self
    r.backward_fn = ->(g) { src.agregar_gradiente(g * src.scale(2.0)) }
  end
  r
end

#sumObject


REDUCCIONES (retornan Float o Tensor escalar)




261
262
263
264
265
266
267
# File 'lib/grx/tensor.rb', line 261

def sum
  if CAPI::LOADED
    CAPI.grx_sum(@storage.ptr, numel)
  else
    to_a.sum
  end
end

#tanhObject



364
365
366
367
368
369
370
371
372
373
374
375
# File 'lib/grx/tensor.rb', line 364

def tanh
  r = _unary_c(:grx_tanh_act) { |v| Math.tanh(v) }
  if requires_grad
    r.requires_grad = true; r._grafo_hijos << self
    res = r; src = self
    r.backward_fn = ->(g) {
      # d(tanh)/dx = 1 - tanh(x)^2
      src.agregar_gradiente(g * (Tensor.ones_like(res) - res.square))
    }
  end
  r
end

#to_aObject



514
515
516
517
518
519
520
521
522
# File 'lib/grx/tensor.rb', line 514

def to_a
  # Si los strides son contiguos (tensor normal, reshape), leemos el buffer directo.
  # Si no (transpose, vistas con strides custom), recorremos con strides.
  if _contiguous?
    @storage.to_ruby_array
  else
    _collect_elements(@shape, @strides, @offset)
  end
end

#to_sObject Also known as: inspect



549
550
551
# File 'lib/grx/tensor.rb', line 549

def to_s
  "#<GRX::Tensor shape=#{@shape} data=#{to_a}>"
end

#transposeObject

Raises:



462
463
464
465
466
467
468
469
470
471
472
473
474
475
# File 'lib/grx/tensor.rb', line 462

def transpose
  raise DimensionError, "transpose solo soporta 2D" if @shape.size != 2
  t = Tensor.new(@storage, [@shape[1], @shape[0]],
             strides: [@strides[1], @strides[0]],
             offset: @offset, requires_grad: @requires_grad)
  if @requires_grad
    t._grafo_hijos << self
    src = self
    t.backward_fn = ->(g) {
      src.agregar_gradiente(g._transpose_view)
    }
  end
  t
end

#zero_grad!Object



439
440
441
442
443
# File 'lib/grx/tensor.rb', line 439

def zero_grad!
  @grad = nil
  @_grafo_hijos = []
  @backward_fn = nil
end