Class: LlamaCpp::LlamaModelParams

Inherits:
Object
  • Object
show all
Defined in:
ext/llama_cpp/llama_cpp.c,
ext/llama_cpp/llama_cpp.c

Overview

“struct llama_model_params” wrapper class

Instance Method Summary collapse

Instance Method Details

#check_tensorsBoolean

Returns:

  • (Boolean)


522
523
524
525
# File 'ext/llama_cpp/llama_cpp.c', line 522

static VALUE llama_model_params_get_check_tensors(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->check_tensors ? Qtrue : Qfalse;
}

#check_tensors=(check_tensors) ⇒ Boolean

Parameters:

  • check_tensors (Boolean)

Returns:

  • (Boolean)


527
528
529
530
531
# File 'ext/llama_cpp/llama_cpp.c', line 527

static VALUE llama_model_params_set_check_tensors(VALUE self, VALUE check_tensors) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->check_tensors = RTEST(check_tensors) ? true : false;
  return check_tensors;
}

#main_gpuInteger

Returns:

  • (Integer)


463
464
465
466
# File 'ext/llama_cpp/llama_cpp.c', line 463

static VALUE llama_model_params_get_main_gpu(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->main_gpu);
}

#main_gpu=(main_gpu) ⇒ Integer

Parameters:

  • main_gpu (Integer)

Returns:

  • (Integer)


468
469
470
471
472
# File 'ext/llama_cpp/llama_cpp.c', line 468

static VALUE llama_model_params_set_main_gpu(VALUE self, VALUE main_gpu) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->main_gpu = NUM2INT(main_gpu);
  return main_gpu;
}

#n_gpu_layersInteger

Returns:

  • (Integer)


441
442
443
444
# File 'ext/llama_cpp/llama_cpp.c', line 441

static VALUE llama_model_params_get_n_gpu_layers(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->n_gpu_layers);
}

#n_gpu_layers=(n_gpu_layers) ⇒ Integer

Parameters:

  • n_gpu_layers (Integer)

Returns:

  • (Integer)


446
447
448
449
450
# File 'ext/llama_cpp/llama_cpp.c', line 446

static VALUE llama_model_params_set_n_gpu_layers(VALUE self, VALUE n_gpu_layers) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->n_gpu_layers = NUM2INT(n_gpu_layers);
  return n_gpu_layers;
}

#split_modeInteger

Returns:

  • (Integer)


452
453
454
455
# File 'ext/llama_cpp/llama_cpp.c', line 452

static VALUE llama_model_params_get_split_mode(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->split_mode);
}

#split_mode=(split_mode) ⇒ Integer

Parameters:

  • split_mode (Integer)

Returns:

  • (Integer)


457
458
459
460
461
# File 'ext/llama_cpp/llama_cpp.c', line 457

static VALUE llama_model_params_set_split_mode(VALUE self, VALUE split_mode) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->split_mode = (enum llama_split_mode)NUM2INT(split_mode);
  return split_mode;
}

#tensor_splitArray<Float>

Returns:

  • (Array<Float>)


474
475
476
477
478
479
480
481
482
483
484
485
486
487
# File 'ext/llama_cpp/llama_cpp.c', line 474

static VALUE llama_model_params_get_tensor_split(VALUE self) {
  if (llama_max_devices() < 1) {
    return rb_ary_new();
  }
  struct llama_model_params* data = get_llama_model_params(self);
  if (data->tensor_split == NULL) {
    return rb_ary_new();
  }
  VALUE ret = rb_ary_new2(llama_max_devices());
  for (size_t i = 0; i < llama_max_devices(); i++) {
    rb_ary_store(ret, i, DBL2NUM(data->tensor_split[i]));
  }
  return ret;
}

#use_mlockBoolean

Returns:

  • (Boolean)


511
512
513
514
# File 'ext/llama_cpp/llama_cpp.c', line 511

static VALUE llama_model_params_get_use_mlock(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_mlock ? Qtrue : Qfalse;
}

#use_mlock=(use_mlock) ⇒ Boolean

Parameters:

  • use_mlock (Boolean)

Returns:

  • (Boolean)


516
517
518
519
520
# File 'ext/llama_cpp/llama_cpp.c', line 516

static VALUE llama_model_params_set_use_mlock(VALUE self, VALUE use_mlock) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_mlock = RTEST(use_mlock) ? true : false;
  return use_mlock;
}

#use_mmapBoolean

Returns:

  • (Boolean)


500
501
502
503
# File 'ext/llama_cpp/llama_cpp.c', line 500

static VALUE llama_model_params_get_use_mmap(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_mmap ? Qtrue : Qfalse;
}

#use_mmap=(use_mmap) ⇒ Boolean

Parameters:

  • use_mmap (Boolean)

Returns:

  • (Boolean)


505
506
507
508
509
# File 'ext/llama_cpp/llama_cpp.c', line 505

static VALUE llama_model_params_set_use_mmap(VALUE self, VALUE use_mmap) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_mmap = RTEST(use_mmap) ? true : false;
  return use_mmap;
}

#vocab_onlyBoolean

Returns:

  • (Boolean)


489
490
491
492
# File 'ext/llama_cpp/llama_cpp.c', line 489

static VALUE llama_model_params_get_vocab_only(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->vocab_only ? Qtrue : Qfalse;
}

#vocab_only=(vocab_only) ⇒ Boolean

Parameters:

  • vocab_only (Boolean)

Returns:

  • (Boolean)


494
495
496
497
498
# File 'ext/llama_cpp/llama_cpp.c', line 494

static VALUE llama_model_params_set_vocab_only(VALUE self, VALUE vocab_only) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->vocab_only = RTEST(vocab_only) ? true : false;
  return vocab_only;
}