Class: LlamaCpp::LlamaModelParams

Inherits:
Object
  • Object
show all
Defined in:
ext/llama_cpp/llama_cpp.c,
ext/llama_cpp/llama_cpp.c

Overview

“struct llama_model_params” wrapper class

Instance Method Summary collapse

Instance Method Details

#check_tensorsBoolean

Returns:

  • (Boolean)


534
535
536
537
# File 'ext/llama_cpp/llama_cpp.c', line 534

static VALUE llama_model_params_get_check_tensors(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->check_tensors ? Qtrue : Qfalse;
}

#check_tensors=(check_tensors) ⇒ Boolean

Parameters:

  • check_tensors (Boolean)

Returns:

  • (Boolean)


539
540
541
542
543
# File 'ext/llama_cpp/llama_cpp.c', line 539

static VALUE llama_model_params_set_check_tensors(VALUE self, VALUE check_tensors) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->check_tensors = RTEST(check_tensors) ? true : false;
  return check_tensors;
}

#main_gpuInteger

Returns:

  • (Integer)


464
465
466
467
# File 'ext/llama_cpp/llama_cpp.c', line 464

static VALUE llama_model_params_get_main_gpu(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->main_gpu);
}

#main_gpu=(main_gpu) ⇒ Integer

Parameters:

  • main_gpu (Integer)

Returns:

  • (Integer)


469
470
471
472
473
# File 'ext/llama_cpp/llama_cpp.c', line 469

static VALUE llama_model_params_set_main_gpu(VALUE self, VALUE main_gpu) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->main_gpu = NUM2INT(main_gpu);
  return main_gpu;
}

#n_gpu_layersInteger

Returns:

  • (Integer)


442
443
444
445
# File 'ext/llama_cpp/llama_cpp.c', line 442

static VALUE llama_model_params_get_n_gpu_layers(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->n_gpu_layers);
}

#n_gpu_layers=(n_gpu_layers) ⇒ Integer

Parameters:

  • n_gpu_layers (Integer)

Returns:

  • (Integer)


447
448
449
450
451
# File 'ext/llama_cpp/llama_cpp.c', line 447

static VALUE llama_model_params_set_n_gpu_layers(VALUE self, VALUE n_gpu_layers) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->n_gpu_layers = NUM2INT(n_gpu_layers);
  return n_gpu_layers;
}

#no_allocBoolean

Returns:

  • (Boolean)


567
568
569
570
# File 'ext/llama_cpp/llama_cpp.c', line 567

static VALUE llama_model_params_get_no_alloc(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->no_alloc ? Qtrue : Qfalse;
}

#no_alloc=(no_alloc) ⇒ Boolean

Parameters:

  • no_alloc_ (Boolean)

Returns:

  • (Boolean)


572
573
574
575
576
# File 'ext/llama_cpp/llama_cpp.c', line 572

static VALUE llama_model_params_set_no_alloc(VALUE self, VALUE no_alloc) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->no_alloc = RTEST(no_alloc) ? true : false;
  return no_alloc;
}

#no_hostBoolean

Returns:

  • (Boolean)


556
557
558
559
# File 'ext/llama_cpp/llama_cpp.c', line 556

static VALUE llama_model_params_get_no_host(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->no_host ? Qtrue : Qfalse;
}

#no_host=(no_host) ⇒ Boolean

Parameters:

  • no_host (Boolean)

Returns:

  • (Boolean)


561
562
563
564
565
# File 'ext/llama_cpp/llama_cpp.c', line 561

static VALUE llama_model_params_set_no_host(VALUE self, VALUE no_host) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->no_host = RTEST(no_host) ? true : false;
  return no_host;
}

#split_modeInteger

Returns:

  • (Integer)


453
454
455
456
# File 'ext/llama_cpp/llama_cpp.c', line 453

static VALUE llama_model_params_get_split_mode(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->split_mode);
}

#split_mode=(split_mode) ⇒ Integer

Parameters:

  • split_mode (Integer)

Returns:

  • (Integer)


458
459
460
461
462
# File 'ext/llama_cpp/llama_cpp.c', line 458

static VALUE llama_model_params_set_split_mode(VALUE self, VALUE split_mode) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->split_mode = (enum llama_split_mode)NUM2INT(split_mode);
  return split_mode;
}

#tensor_splitArray<Float>

Returns:

  • (Array<Float>)


475
476
477
478
479
480
481
482
483
484
485
486
487
488
# File 'ext/llama_cpp/llama_cpp.c', line 475

static VALUE llama_model_params_get_tensor_split(VALUE self) {
  if (llama_max_devices() < 1) {
    return rb_ary_new();
  }
  struct llama_model_params* data = get_llama_model_params(self);
  if (data->tensor_split == NULL) {
    return rb_ary_new();
  }
  VALUE ret = rb_ary_new2(llama_max_devices());
  for (size_t i = 0; i < llama_max_devices(); i++) {
    rb_ary_store(ret, i, DBL2NUM(data->tensor_split[i]));
  }
  return ret;
}

#use_direct_ioBoolean

Returns:

  • (Boolean)


512
513
514
515
# File 'ext/llama_cpp/llama_cpp.c', line 512

static VALUE llama_model_params_get_use_direct_io(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_direct_io ? Qtrue : Qfalse;
}

#use_direct_io=(use_direct_io) ⇒ Boolean

Parameters:

  • use_direct_io (Boolean)

Returns:

  • (Boolean)


517
518
519
520
521
# File 'ext/llama_cpp/llama_cpp.c', line 517

static VALUE llama_model_params_set_use_direct_io(VALUE self, VALUE use_direct_io) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_direct_io = RTEST(use_direct_io) ? true : false;
  return use_direct_io;
}

#use_extra_buftsBoolean

Returns:

  • (Boolean)


545
546
547
548
# File 'ext/llama_cpp/llama_cpp.c', line 545

static VALUE llama_model_params_get_use_extra_bufts(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_extra_bufts ? Qtrue : Qfalse;
}

#use_extra_bufts=(use_extra_bufts) ⇒ Boolean

Parameters:

  • use_extra_bufts (Boolean)

Returns:

  • (Boolean)


550
551
552
553
554
# File 'ext/llama_cpp/llama_cpp.c', line 550

static VALUE llama_model_params_set_use_extra_bufts(VALUE self, VALUE use_extra_bufts) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_extra_bufts = RTEST(use_extra_bufts) ? true : false;
  return use_extra_bufts;
}

#use_mlockBoolean

Returns:

  • (Boolean)


523
524
525
526
# File 'ext/llama_cpp/llama_cpp.c', line 523

static VALUE llama_model_params_get_use_mlock(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_mlock ? Qtrue : Qfalse;
}

#use_mlock=(use_mlock) ⇒ Boolean

Parameters:

  • use_mlock (Boolean)

Returns:

  • (Boolean)


528
529
530
531
532
# File 'ext/llama_cpp/llama_cpp.c', line 528

static VALUE llama_model_params_set_use_mlock(VALUE self, VALUE use_mlock) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_mlock = RTEST(use_mlock) ? true : false;
  return use_mlock;
}

#use_mmapBoolean

Returns:

  • (Boolean)


501
502
503
504
# File 'ext/llama_cpp/llama_cpp.c', line 501

static VALUE llama_model_params_get_use_mmap(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_mmap ? Qtrue : Qfalse;
}

#use_mmap=(use_mmap) ⇒ Boolean

Parameters:

  • use_mmap (Boolean)

Returns:

  • (Boolean)


506
507
508
509
510
# File 'ext/llama_cpp/llama_cpp.c', line 506

static VALUE llama_model_params_set_use_mmap(VALUE self, VALUE use_mmap) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_mmap = RTEST(use_mmap) ? true : false;
  return use_mmap;
}

#vocab_onlyBoolean

Returns:

  • (Boolean)


490
491
492
493
# File 'ext/llama_cpp/llama_cpp.c', line 490

static VALUE llama_model_params_get_vocab_only(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->vocab_only ? Qtrue : Qfalse;
}

#vocab_only=(vocab_only) ⇒ Boolean

Parameters:

  • vocab_only (Boolean)

Returns:

  • (Boolean)


495
496
497
498
499
# File 'ext/llama_cpp/llama_cpp.c', line 495

static VALUE llama_model_params_set_vocab_only(VALUE self, VALUE vocab_only) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->vocab_only = RTEST(vocab_only) ? true : false;
  return vocab_only;
}