Module: RubyLLM::Providers::Vllm::Chat
- Included in:
- RubyLLM::Providers::Vllm
- Defined in:
- lib/legion/llm/patches/ruby_llm_vllm.rb
Instance Method Summary collapse
- #format_messages(messages) ⇒ Object
- #format_role(role) ⇒ Object
- #render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil, thinking: nil, tool_prefs: nil) ⇒ Object
Instance Method Details
#format_messages(messages) ⇒ Object
11 12 13 14 15 16 17 18 19 20 |
# File 'lib/legion/llm/patches/ruby_llm_vllm.rb', line 11 def () .map do |msg| { role: format_role(msg.role), content: OpenAI::Media.format_content(msg.content), tool_calls: format_tool_calls(msg.tool_calls), tool_call_id: msg.tool_call_id }.compact.merge(OpenAI::Chat.format_thinking(msg)) end end |
#format_role(role) ⇒ Object
7 8 9 |
# File 'lib/legion/llm/patches/ruby_llm_vllm.rb', line 7 def format_role(role) role.to_s end |
#render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil, thinking: nil, tool_prefs: nil) ⇒ Object
22 23 24 25 26 27 28 29 30 31 32 |
# File 'lib/legion/llm/patches/ruby_llm_vllm.rb', line 22 def render_payload(, tools:, temperature:, model:, stream: false, schema: nil, thinking: nil, tool_prefs: nil) payload = super enable = if thinking.nil? vllm_thinking_default else thinking ? true : false end payload[:chat_template_kwargs] = { enable_thinking: enable } payload end |