Class: LlamaCpp::LlamaModelParams
- Inherits:
-
Object
- Object
- LlamaCpp::LlamaModelParams
- Defined in:
- ext/llama_cpp/llama_cpp.c,
ext/llama_cpp/llama_cpp.c
Overview
“struct llama_model_params” wrapper class
Instance Method Summary collapse
- #check_tensors ⇒ Boolean
- #check_tensors=(check_tensors) ⇒ Boolean
- #main_gpu ⇒ Integer
- #main_gpu=(main_gpu) ⇒ Integer
- #n_gpu_layers ⇒ Integer
- #n_gpu_layers=(n_gpu_layers) ⇒ Integer
- #split_mode ⇒ Integer
- #split_mode=(split_mode) ⇒ Integer
- #tensor_split ⇒ Array<Float>
- #use_mlock ⇒ Boolean
- #use_mlock=(use_mlock) ⇒ Boolean
- #use_mmap ⇒ Boolean
- #use_mmap=(use_mmap) ⇒ Boolean
- #vocab_only ⇒ Boolean
- #vocab_only=(vocab_only) ⇒ Boolean
Instance Method Details
#check_tensors ⇒ Boolean
522 523 524 525 |
# File 'ext/llama_cpp/llama_cpp.c', line 522
static VALUE llama_model_params_get_check_tensors(VALUE self) {
struct llama_model_params* data = get_llama_model_params(self);
return data->check_tensors ? Qtrue : Qfalse;
}
|
#check_tensors=(check_tensors) ⇒ Boolean
527 528 529 530 531 |
# File 'ext/llama_cpp/llama_cpp.c', line 527
static VALUE llama_model_params_set_check_tensors(VALUE self, VALUE check_tensors) {
struct llama_model_params* data = get_llama_model_params(self);
data->check_tensors = RTEST(check_tensors) ? true : false;
return check_tensors;
}
|
#main_gpu ⇒ Integer
463 464 465 466 |
# File 'ext/llama_cpp/llama_cpp.c', line 463
static VALUE llama_model_params_get_main_gpu(VALUE self) {
struct llama_model_params* data = get_llama_model_params(self);
return INT2NUM(data->main_gpu);
}
|
#main_gpu=(main_gpu) ⇒ Integer
468 469 470 471 472 |
# File 'ext/llama_cpp/llama_cpp.c', line 468
static VALUE llama_model_params_set_main_gpu(VALUE self, VALUE main_gpu) {
struct llama_model_params* data = get_llama_model_params(self);
data->main_gpu = NUM2INT(main_gpu);
return main_gpu;
}
|
#n_gpu_layers ⇒ Integer
441 442 443 444 |
# File 'ext/llama_cpp/llama_cpp.c', line 441
static VALUE llama_model_params_get_n_gpu_layers(VALUE self) {
struct llama_model_params* data = get_llama_model_params(self);
return INT2NUM(data->n_gpu_layers);
}
|
#n_gpu_layers=(n_gpu_layers) ⇒ Integer
446 447 448 449 450 |
# File 'ext/llama_cpp/llama_cpp.c', line 446
static VALUE llama_model_params_set_n_gpu_layers(VALUE self, VALUE n_gpu_layers) {
struct llama_model_params* data = get_llama_model_params(self);
data->n_gpu_layers = NUM2INT(n_gpu_layers);
return n_gpu_layers;
}
|
#split_mode ⇒ Integer
452 453 454 455 |
# File 'ext/llama_cpp/llama_cpp.c', line 452
static VALUE llama_model_params_get_split_mode(VALUE self) {
struct llama_model_params* data = get_llama_model_params(self);
return INT2NUM(data->split_mode);
}
|
#split_mode=(split_mode) ⇒ Integer
457 458 459 460 461 |
# File 'ext/llama_cpp/llama_cpp.c', line 457
static VALUE llama_model_params_set_split_mode(VALUE self, VALUE split_mode) {
struct llama_model_params* data = get_llama_model_params(self);
data->split_mode = (enum llama_split_mode)NUM2INT(split_mode);
return split_mode;
}
|
#tensor_split ⇒ Array<Float>
474 475 476 477 478 479 480 481 482 483 484 485 486 487 |
# File 'ext/llama_cpp/llama_cpp.c', line 474
static VALUE llama_model_params_get_tensor_split(VALUE self) {
if (llama_max_devices() < 1) {
return rb_ary_new();
}
struct llama_model_params* data = get_llama_model_params(self);
if (data->tensor_split == NULL) {
return rb_ary_new();
}
VALUE ret = rb_ary_new2(llama_max_devices());
for (size_t i = 0; i < llama_max_devices(); i++) {
rb_ary_store(ret, i, DBL2NUM(data->tensor_split[i]));
}
return ret;
}
|
#use_mlock ⇒ Boolean
511 512 513 514 |
# File 'ext/llama_cpp/llama_cpp.c', line 511
static VALUE llama_model_params_get_use_mlock(VALUE self) {
struct llama_model_params* data = get_llama_model_params(self);
return data->use_mlock ? Qtrue : Qfalse;
}
|
#use_mlock=(use_mlock) ⇒ Boolean
516 517 518 519 520 |
# File 'ext/llama_cpp/llama_cpp.c', line 516
static VALUE llama_model_params_set_use_mlock(VALUE self, VALUE use_mlock) {
struct llama_model_params* data = get_llama_model_params(self);
data->use_mlock = RTEST(use_mlock) ? true : false;
return use_mlock;
}
|
#use_mmap ⇒ Boolean
500 501 502 503 |
# File 'ext/llama_cpp/llama_cpp.c', line 500
static VALUE llama_model_params_get_use_mmap(VALUE self) {
struct llama_model_params* data = get_llama_model_params(self);
return data->use_mmap ? Qtrue : Qfalse;
}
|
#use_mmap=(use_mmap) ⇒ Boolean
505 506 507 508 509 |
# File 'ext/llama_cpp/llama_cpp.c', line 505
static VALUE llama_model_params_set_use_mmap(VALUE self, VALUE use_mmap) {
struct llama_model_params* data = get_llama_model_params(self);
data->use_mmap = RTEST(use_mmap) ? true : false;
return use_mmap;
}
|
#vocab_only ⇒ Boolean
489 490 491 492 |
# File 'ext/llama_cpp/llama_cpp.c', line 489
static VALUE llama_model_params_get_vocab_only(VALUE self) {
struct llama_model_params* data = get_llama_model_params(self);
return data->vocab_only ? Qtrue : Qfalse;
}
|
#vocab_only=(vocab_only) ⇒ Boolean
494 495 496 497 498 |
# File 'ext/llama_cpp/llama_cpp.c', line 494
static VALUE llama_model_params_set_vocab_only(VALUE self, VALUE vocab_only) {
struct llama_model_params* data = get_llama_model_params(self);
data->vocab_only = RTEST(vocab_only) ? true : false;
return vocab_only;
}
|