Class: OpenAI::Resources::Responses

Inherits:
Object
  • Object
show all
Defined in:
lib/openai/resources/responses.rb,
lib/openai/resources/responses/input_items.rb

Defined Under Namespace

Classes: InputItems

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(client:) ⇒ Responses

This method is part of a private API. You should avoid using this method if possible, as it may be removed or be changed in the future.

Returns a new instance of Responses.

Parameters:



363
364
365
366
# File 'lib/openai/resources/responses.rb', line 363

def initialize(client:)
  @client = client
  @input_items = OpenAI::Resources::Responses::InputItems.new(client: client)
end

Instance Attribute Details

#input_itemsOpenAI::Resources::Responses::InputItems (readonly)



7
8
9
# File 'lib/openai/resources/responses.rb', line 7

def input_items
  @input_items
end

Instance Method Details

#cancel(response_id, request_options: {}) ⇒ OpenAI::Models::Responses::Response

Cancels a model response with the given ID. Only responses created with the ‘background` parameter set to `true` can be cancelled. [Learn more](platform.openai.com/docs/guides/background).

Parameters:

  • response_id (String)

    The ID of the response to cancel.

  • request_options (OpenAI::RequestOptions, Hash{Symbol=>Object}, nil)

Returns:

See Also:



351
352
353
354
355
356
357
358
# File 'lib/openai/resources/responses.rb', line 351

def cancel(response_id, params = {})
  @client.request(
    method: :post,
    path: ["responses/%1$s/cancel", response_id],
    model: OpenAI::Responses::Response,
    options: params[:request_options]
  )
end

#create(input: , model: , background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) ⇒ OpenAI::Models::Responses::Response

See #stream_raw for streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseCreateParams for more details.

Creates a model response. Provide [text](platform.openai.com/docs/guides/text) or [image](platform.openai.com/docs/guides/images) inputs to generate [text](platform.openai.com/docs/guides/text) or [JSON](platform.openai.com/docs/guides/structured-outputs) outputs. Have the model call your own [custom code](platform.openai.com/docs/guides/function-calling) or use built-in [tools](platform.openai.com/docs/guides/tools) like [web search](platform.openai.com/docs/guides/tools-web-search) or [file search](platform.openai.com/docs/guides/tools-file-search) to use your own data as input for the model’s response.

Parameters:

Returns:

See Also:



71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
# File 'lib/openai/resources/responses.rb', line 71

def create(params)
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
  if parsed[:stream]
    message = "Please use `#stream_raw` for the streaming use case."
    raise ArgumentError.new(message)
  end

  model = nil
  tool_models = {}
  case parsed
  in {text: OpenAI::StructuredOutput::JsonSchemaConverter => model}
    parsed.update(
      text: {
        format: {
          type: :json_schema,
          strict: true,
          name: model.name.split("::").last,
          schema: model.to_json_schema
        }
      }
    )
  in {text: {format: OpenAI::StructuredOutput::JsonSchemaConverter => model}}
    parsed.fetch(:text).update(
      format: {
        type: :json_schema,
        strict: true,
        name: model.name.split("::").last,
        schema: model.to_json_schema
      }
    )
  in {text: {format: {type: :json_schema, schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}}
    parsed.dig(:text, :format).store(:schema, model.to_json_schema)
  in {tools: Array => tools}
    mapped = tools.map do |tool|
      case tool
      in OpenAI::StructuredOutput::JsonSchemaConverter
        name = tool.name.split("::").last
        tool_models.store(name, tool)
        {
          type: :function,
          strict: true,
          name: name,
          parameters: tool.to_json_schema
        }
      in {type: :function, parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params}
        func = tool.fetch(:function)
        name = func[:name] ||= params.name.split("::").last
        tool_models.store(name, params)
        func.update(parameters: params.to_json_schema)
      else
      end
    end
    tools.replace(mapped)
  else
  end

  unwrap = ->(raw) do
    if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
      raw[:output]
        &.flat_map do |output|
          next [] unless output[:type] == "message"
          output[:content].to_a
        end
        &.each do |content|
          next unless content[:type] == "output_text"
          parsed = JSON.parse(content.fetch(:text), symbolize_names: true)
          coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
          content.store(:parsed, coerced)
        end
    end
    raw[:output]&.each do |output|
      next unless output[:type] == "function_call"
      next if (model = tool_models[output.fetch(:name)]).nil?
      parsed = JSON.parse(output.fetch(:arguments), symbolize_names: true)
      coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
      output.store(:parsed, coerced)
    end

    raw
  end
  @client.request(
    method: :post,
    path: "responses",
    body: parsed,
    unwrap: unwrap,
    model: OpenAI::Responses::Response,
    options: options
  )
end

#delete(response_id, request_options: {}) ⇒ nil

Deletes a model response with the given ID.

Parameters:

  • response_id (String)

    The ID of the response to delete.

  • request_options (OpenAI::RequestOptions, Hash{Symbol=>Object}, nil)

Returns:

  • (nil)

See Also:



329
330
331
332
333
334
335
336
# File 'lib/openai/resources/responses.rb', line 329

def delete(response_id, params = {})
  @client.request(
    method: :delete,
    path: ["responses/%1$s", response_id],
    model: NilClass,
    options: params[:request_options]
  )
end

#retrieve(response_id, include: nil, starting_after: nil, request_options: {}) ⇒ OpenAI::Models::Responses::Response

See #retrieve_streaming for streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseRetrieveParams for more details.

Retrieves a model response with the given ID.

Parameters:

  • response_id (String)

    The ID of the response to retrieve.

  • include (Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>)

    Additional fields to include in the response. See the ‘include`

  • starting_after (Integer)

    The sequence number of the event after which to start streaming.

  • request_options (OpenAI::RequestOptions, Hash{Symbol=>Object}, nil)

Returns:

See Also:



265
266
267
268
269
270
271
272
273
274
275
276
277
278
# File 'lib/openai/resources/responses.rb', line 265

def retrieve(response_id, params = {})
  parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
  if parsed[:stream]
    message = "Please use `#retrieve_streaming` for the streaming use case."
    raise ArgumentError.new(message)
  end
  @client.request(
    method: :get,
    path: ["responses/%1$s", response_id],
    query: parsed,
    model: OpenAI::Responses::Response,
    options: options
  )
end

#retrieve_streaming(response_id, include: nil, starting_after: nil, request_options: {}) ⇒ OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>

See #retrieve for non-streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseRetrieveParams for more details.

Retrieves a model response with the given ID.

Parameters:

  • response_id (String)

    The ID of the response to retrieve.

  • include (Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>)

    Additional fields to include in the response. See the ‘include`

  • starting_after (Integer)

    The sequence number of the event after which to start streaming.

  • request_options (OpenAI::RequestOptions, Hash{Symbol=>Object}, nil)

Returns:

See Also:



300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
# File 'lib/openai/resources/responses.rb', line 300

def retrieve_streaming(response_id, params = {})
  parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
  unless parsed.fetch(:stream, true)
    message = "Please use `#retrieve` for the non-streaming use case."
    raise ArgumentError.new(message)
  end
  parsed.store(:stream, true)
  @client.request(
    method: :get,
    path: ["responses/%1$s", response_id],
    query: parsed,
    headers: {"accept" => "text/event-stream"},
    stream: OpenAI::Internal::Stream,
    model: OpenAI::Responses::ResponseStreamEvent,
    options: options
  )
end

#streamObject

Raises:

  • (NotImplementedError)


161
162
163
# File 'lib/openai/resources/responses.rb', line 161

def stream
  raise NotImplementedError.new("higher level helpers are coming soon!")
end

#stream_raw(input: , model: , background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) ⇒ OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>

See #create for non-streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseCreateParams for more details.

Creates a model response. Provide [text](platform.openai.com/docs/guides/text) or [image](platform.openai.com/docs/guides/images) inputs to generate [text](platform.openai.com/docs/guides/text) or [JSON](platform.openai.com/docs/guides/structured-outputs) outputs. Have the model call your own [custom code](platform.openai.com/docs/guides/function-calling) or use built-in [tools](platform.openai.com/docs/guides/tools) like [web search](platform.openai.com/docs/guides/tools-web-search) or [file search](platform.openai.com/docs/guides/tools-file-search) to use your own data as input for the model’s response.

Parameters:

Returns:

See Also:



227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
# File 'lib/openai/resources/responses.rb', line 227

def stream_raw(params)
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
  unless parsed.fetch(:stream, true)
    message = "Please use `#create` for the non-streaming use case."
    raise ArgumentError.new(message)
  end
  parsed.store(:stream, true)
  @client.request(
    method: :post,
    path: "responses",
    headers: {"accept" => "text/event-stream"},
    body: parsed,
    stream: OpenAI::Internal::Stream,
    model: OpenAI::Responses::ResponseStreamEvent,
    options: options
  )
end