Class: LlamaCpp::LlamaModelParams

Inherits:
Object
  • Object
show all
Defined in:
ext/llama_cpp/llama_cpp.c,
ext/llama_cpp/llama_cpp.c

Overview

“struct llama_model_params” wrapper class

Instance Method Summary collapse

Instance Method Details

#check_tensorsBoolean

Returns:

  • (Boolean)


478
479
480
481
# File 'ext/llama_cpp/llama_cpp.c', line 478

static VALUE llama_model_params_get_check_tensors(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->check_tensors ? Qtrue : Qfalse;
}

#check_tensors=(check_tensors) ⇒ Boolean

Parameters:

  • check_tensors (Boolean)

Returns:

  • (Boolean)


483
484
485
486
487
# File 'ext/llama_cpp/llama_cpp.c', line 483

static VALUE llama_model_params_set_check_tensors(VALUE self, VALUE check_tensors) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->check_tensors = RTEST(check_tensors) ? true : false;
  return check_tensors;
}

#main_gpuInteger

Returns:

  • (Integer)


419
420
421
422
# File 'ext/llama_cpp/llama_cpp.c', line 419

static VALUE llama_model_params_get_main_gpu(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->main_gpu);
}

#main_gpu=(main_gpu) ⇒ Integer

Parameters:

  • main_gpu (Integer)

Returns:

  • (Integer)


424
425
426
427
428
# File 'ext/llama_cpp/llama_cpp.c', line 424

static VALUE llama_model_params_set_main_gpu(VALUE self, VALUE main_gpu) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->main_gpu = NUM2INT(main_gpu);
  return main_gpu;
}

#n_gpu_layersInteger

Returns:

  • (Integer)


397
398
399
400
# File 'ext/llama_cpp/llama_cpp.c', line 397

static VALUE llama_model_params_get_n_gpu_layers(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->n_gpu_layers);
}

#n_gpu_layers=(n_gpu_layers) ⇒ Integer

Parameters:

  • n_gpu_layers (Integer)

Returns:

  • (Integer)


402
403
404
405
406
# File 'ext/llama_cpp/llama_cpp.c', line 402

static VALUE llama_model_params_set_n_gpu_layers(VALUE self, VALUE n_gpu_layers) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->n_gpu_layers = NUM2INT(n_gpu_layers);
  return n_gpu_layers;
}

#split_modeInteger

Returns:

  • (Integer)


408
409
410
411
# File 'ext/llama_cpp/llama_cpp.c', line 408

static VALUE llama_model_params_get_split_mode(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return INT2NUM(data->split_mode);
}

#split_mode=(split_mode) ⇒ Integer

Parameters:

  • split_mode (Integer)

Returns:

  • (Integer)


413
414
415
416
417
# File 'ext/llama_cpp/llama_cpp.c', line 413

static VALUE llama_model_params_set_split_mode(VALUE self, VALUE split_mode) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->split_mode = (enum llama_split_mode)NUM2INT(split_mode);
  return split_mode;
}

#tensor_splitArray<Float>

Returns:

  • (Array<Float>)


430
431
432
433
434
435
436
437
438
439
440
441
442
443
# File 'ext/llama_cpp/llama_cpp.c', line 430

static VALUE llama_model_params_get_tensor_split(VALUE self) {
  if (llama_max_devices() < 1) {
    return rb_ary_new();
  }
  struct llama_model_params* data = get_llama_model_params(self);
  if (data->tensor_split == NULL) {
    return rb_ary_new();
  }
  VALUE ret = rb_ary_new2(llama_max_devices());
  for (size_t i = 0; i < llama_max_devices(); i++) {
    rb_ary_store(ret, i, DBL2NUM(data->tensor_split[i]));
  }
  return ret;
}

#use_mlockBoolean

Returns:

  • (Boolean)


467
468
469
470
# File 'ext/llama_cpp/llama_cpp.c', line 467

static VALUE llama_model_params_get_use_mlock(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_mlock ? Qtrue : Qfalse;
}

#use_mlock=(use_mlock) ⇒ Boolean

Parameters:

  • use_mlock (Boolean)

Returns:

  • (Boolean)


472
473
474
475
476
# File 'ext/llama_cpp/llama_cpp.c', line 472

static VALUE llama_model_params_set_use_mlock(VALUE self, VALUE use_mlock) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_mlock = RTEST(use_mlock) ? true : false;
  return use_mlock;
}

#use_mmapBoolean

Returns:

  • (Boolean)


456
457
458
459
# File 'ext/llama_cpp/llama_cpp.c', line 456

static VALUE llama_model_params_get_use_mmap(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->use_mmap ? Qtrue : Qfalse;
}

#use_mmap=(use_mmap) ⇒ Boolean

Parameters:

  • use_mmap (Boolean)

Returns:

  • (Boolean)


461
462
463
464
465
# File 'ext/llama_cpp/llama_cpp.c', line 461

static VALUE llama_model_params_set_use_mmap(VALUE self, VALUE use_mmap) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->use_mmap = RTEST(use_mmap) ? true : false;
  return use_mmap;
}

#vocab_onlyBoolean

Returns:

  • (Boolean)


445
446
447
448
# File 'ext/llama_cpp/llama_cpp.c', line 445

static VALUE llama_model_params_get_vocab_only(VALUE self) {
  struct llama_model_params* data = get_llama_model_params(self);
  return data->vocab_only ? Qtrue : Qfalse;
}

#vocab_only=(vocab_only) ⇒ Boolean

Parameters:

  • vocab_only (Boolean)

Returns:

  • (Boolean)


450
451
452
453
454
# File 'ext/llama_cpp/llama_cpp.c', line 450

static VALUE llama_model_params_set_vocab_only(VALUE self, VALUE vocab_only) {
  struct llama_model_params* data = get_llama_model_params(self);
  data->vocab_only = RTEST(vocab_only) ? true : false;
  return vocab_only;
}