Class: Chat

Inherits:
ApplicationRecord
  • Object
show all
Includes:
ChatManager::TitleGeneratable
Defined in:
lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb

Class Method Summary collapse

Instance Method Summary collapse

Class Method Details

.find_or_switch_for_session(session, current_user) ⇒ Object



11
12
13
14
15
16
17
18
# File 'lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb', line 11

def find_or_switch_for_session(session, current_user)
  chat = find_by_session_chat_id(session, current_user)
  return chat if chat.present?

  chat = create!(user: current_user)
  session[:chat_id] = chat.id
  chat
end

Instance Method Details

#add_assistant_response(prompt_execution, jwt_token, tool_ids: [], generation_settings: {}) ⇒ Object

Add assistant response by sending to LLM



57
58
59
60
61
62
63
64
65
66
67
68
69
# File 'lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb', line 57

def add_assistant_response(prompt_execution, jwt_token, tool_ids: [], generation_settings: {})
  response_content = send_to_llm(prompt_execution, jwt_token, tool_ids: tool_ids, generation_settings: generation_settings)
  prompt_execution.update!(
    llm_platform: resolve_llm_type(prompt_execution.llm_uuid, jwt_token),
    response: response_content
  )
  new_message = messages.create!(
    role: "assistant",
    prompt_navigator_prompt_execution: prompt_execution
  )

  new_message
end

#add_user_message(message, llm_uuid, model, branch_from_execution_id = nil) ⇒ Object

Add a user message to the chat



34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# File 'lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb', line 34

def add_user_message(message, llm_uuid, model, branch_from_execution_id = nil)
  previous_id = if branch_from_execution_id.present?
    PromptNavigator::PromptExecution.find_by(execution_id: branch_from_execution_id)&.id
  else
    messages.where(role: "user").order(:created_at).last&.prompt_navigator_prompt_execution_id
  end
  prompt_execution = PromptNavigator::PromptExecution.create!(
    prompt: message,
    llm_uuid: llm_uuid,
    model: model,
    configuration: "",
    previous_id: previous_id
  )

  new_message = messages.create!(
    role: "user",
    prompt_navigator_prompt_execution: prompt_execution
  )

  [ prompt_execution, new_message ]
end

#finalize_streamed_response(prompt_execution, content, jwt_token) ⇒ Object

Persist the streamed assistant response. Skips persistence if content is blank.



87
88
89
90
91
92
93
94
95
96
97
98
# File 'lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb', line 87

def finalize_streamed_response(prompt_execution, content, jwt_token)
  return nil if content.blank?

  prompt_execution.update!(
    llm_platform: resolve_llm_type(prompt_execution.llm_uuid, jwt_token),
    response: content
  )
  messages.create!(
    role: "assistant",
    prompt_navigator_prompt_execution: prompt_execution
  )
end

#ordered_by_descending_prompt_executionsObject



107
108
109
110
111
112
113
114
115
# File 'lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb', line 107

def ordered_by_descending_prompt_executions
  messages
    .where(role: "user")
    .includes(:prompt_navigator_prompt_execution)
    .order(created_at: :desc)
    .to_a
    .select { |msg| msg.prompt_navigator_prompt_execution }
    .map(&:prompt_navigator_prompt_execution)
end

#ordered_messagesObject

Get all messages in order



101
102
103
104
105
# File 'lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb', line 101

def ordered_messages
  messages
    .includes(:prompt_navigator_prompt_execution)
    .order(:created_at)
end

#stream_assistant_response(prompt_execution, jwt_token, generation_settings: {}, &block) ⇒ Object

Stream the assistant response from the LLM. Yields each parsed SSE event. Returns the assembled content. Caller is responsible for persistence.



73
74
75
76
77
78
79
80
81
82
83
84
# File 'lib/generators/llm_meta_client/scaffold/templates/app/models/chat.rb', line 73

def stream_assistant_response(prompt_execution, jwt_token, generation_settings: {}, &block)
  summarized_context, prompt = build_streaming_context(prompt_execution, jwt_token)
  LlmMetaClient::ServerQuery.new.stream(
    jwt_token,
    prompt_execution.llm_uuid,
    prompt_execution.model,
    summarized_context,
    prompt,
    generation_settings: generation_settings,
    &block
  )
end