Class: LlamaCpp::LlamaModelParams

Inherits:
Object
  • Object
show all
Defined in:
ext/llama_cpp/llama_cpp.c,
ext/llama_cpp/llama_cpp.c

Overview

“struct llama_model_params” wrapper class

Instance Method Summary collapse

Instance Method Details

#check_tensorsBoolean

Returns:

  • (Boolean)


521
522
523
524
# File 'ext/llama_cpp/llama_cpp.c', line 521

static VALUE llama_model_params_get_check_tensors(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->check_tensors ? Qtrue : Qfalse;
}

#check_tensors=(check_tensors) ⇒ Boolean

Parameters:

  • check_tensors (Boolean)

Returns:

  • (Boolean)


526
527
528
529
530
# File 'ext/llama_cpp/llama_cpp.c', line 526

static VALUE llama_model_params_set_check_tensors(VALUE self, VALUE check_tensors) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->check_tensors = RTEST(check_tensors) ? true : false;
  return check_tensors;
}

#main_gpuInteger

Returns:

  • (Integer)


462
463
464
465
# File 'ext/llama_cpp/llama_cpp.c', line 462

static VALUE llama_model_params_get_main_gpu(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->main_gpu);
}

#main_gpu=(main_gpu) ⇒ Integer

Parameters:

  • main_gpu (Integer)

Returns:

  • (Integer)


467
468
469
470
471
# File 'ext/llama_cpp/llama_cpp.c', line 467

static VALUE llama_model_params_set_main_gpu(VALUE self, VALUE main_gpu) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->main_gpu = NUM2INT(main_gpu);
  return main_gpu;
}

#n_gpu_layersInteger

Returns:

  • (Integer)


440
441
442
443
# File 'ext/llama_cpp/llama_cpp.c', line 440

static VALUE llama_model_params_get_n_gpu_layers(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->n_gpu_layers);
}

#n_gpu_layers=(n_gpu_layers) ⇒ Integer

Parameters:

  • n_gpu_layers (Integer)

Returns:

  • (Integer)


445
446
447
448
449
# File 'ext/llama_cpp/llama_cpp.c', line 445

static VALUE llama_model_params_set_n_gpu_layers(VALUE self, VALUE n_gpu_layers) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->n_gpu_layers = NUM2INT(n_gpu_layers);
  return n_gpu_layers;
}

#no_hostBoolean

Returns:

  • (Boolean)


543
544
545
546
# File 'ext/llama_cpp/llama_cpp.c', line 543

static VALUE llama_model_params_get_no_host(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->no_host ? Qtrue : Qfalse;
}

#no_host=(no_host) ⇒ Boolean

Parameters:

  • no_host (Boolean)

Returns:

  • (Boolean)


548
549
550
551
552
# File 'ext/llama_cpp/llama_cpp.c', line 548

static VALUE llama_model_params_set_no_host(VALUE self, VALUE no_host) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->no_host = RTEST(no_host) ? true : false;
  return no_host;
}

#split_modeInteger

Returns:

  • (Integer)


451
452
453
454
# File 'ext/llama_cpp/llama_cpp.c', line 451

static VALUE llama_model_params_get_split_mode(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->split_mode);
}

#split_mode=(split_mode) ⇒ Integer

Parameters:

  • split_mode (Integer)

Returns:

  • (Integer)


456
457
458
459
460
# File 'ext/llama_cpp/llama_cpp.c', line 456

static VALUE llama_model_params_set_split_mode(VALUE self, VALUE split_mode) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->split_mode = (enum llama_split_mode)NUM2INT(split_mode);
  return split_mode;
}

#tensor_splitArray<Float>

Returns:

  • (Array<Float>)


473
474
475
476
477
478
479
480
481
482
483
484
485
486
# File 'ext/llama_cpp/llama_cpp.c', line 473

static VALUE llama_model_params_get_tensor_split(VALUE self) {
  if (llama_max_devices() < 1) {
    return rb_ary_new();
  }
  struct llama_model_params* data = get_llama_model_params(self);
  if (data->tensor_split == NULL) {
    return rb_ary_new();
  }
  VALUE ret = rb_ary_new2(llama_max_devices());
  for (size_t i = 0; i < llama_max_devices(); i++) {
    rb_ary_store(ret, i, DBL2NUM(data->tensor_split[i]));
  }
  return ret;
}

#use_extra_buftsBoolean

Returns:

  • (Boolean)


532
533
534
535
# File 'ext/llama_cpp/llama_cpp.c', line 532

static VALUE llama_model_params_get_use_extra_bufts(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_extra_bufts ? Qtrue : Qfalse;
}

#use_extra_bufts=(use_extra_bufts) ⇒ Boolean

Parameters:

  • use_extra_bufts (Boolean)

Returns:

  • (Boolean)


537
538
539
540
541
# File 'ext/llama_cpp/llama_cpp.c', line 537

static VALUE llama_model_params_set_use_extra_bufts(VALUE self, VALUE use_extra_bufts) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_extra_bufts = RTEST(use_extra_bufts) ? true : false;
  return use_extra_bufts;
}

#use_mlockBoolean

Returns:

  • (Boolean)


510
511
512
513
# File 'ext/llama_cpp/llama_cpp.c', line 510

static VALUE llama_model_params_get_use_mlock(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_mlock ? Qtrue : Qfalse;
}

#use_mlock=(use_mlock) ⇒ Boolean

Parameters:

  • use_mlock (Boolean)

Returns:

  • (Boolean)


515
516
517
518
519
# File 'ext/llama_cpp/llama_cpp.c', line 515

static VALUE llama_model_params_set_use_mlock(VALUE self, VALUE use_mlock) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_mlock = RTEST(use_mlock) ? true : false;
  return use_mlock;
}

#use_mmapBoolean

Returns:

  • (Boolean)


499
500
501
502
# File 'ext/llama_cpp/llama_cpp.c', line 499

static VALUE llama_model_params_get_use_mmap(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_mmap ? Qtrue : Qfalse;
}

#use_mmap=(use_mmap) ⇒ Boolean

Parameters:

  • use_mmap (Boolean)

Returns:

  • (Boolean)


504
505
506
507
508
# File 'ext/llama_cpp/llama_cpp.c', line 504

static VALUE llama_model_params_set_use_mmap(VALUE self, VALUE use_mmap) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_mmap = RTEST(use_mmap) ? true : false;
  return use_mmap;
}

#vocab_onlyBoolean

Returns:

  • (Boolean)


488
489
490
491
# File 'ext/llama_cpp/llama_cpp.c', line 488

static VALUE llama_model_params_get_vocab_only(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->vocab_only ? Qtrue : Qfalse;
}

#vocab_only=(vocab_only) ⇒ Boolean

Parameters:

  • vocab_only (Boolean)

Returns:

  • (Boolean)


493
494
495
496
497
# File 'ext/llama_cpp/llama_cpp.c', line 493

static VALUE llama_model_params_set_vocab_only(VALUE self, VALUE vocab_only) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->vocab_only = RTEST(vocab_only) ? true : false;
  return vocab_only;
}