Class: Google::Cloud::Ces::V1::Guardrail

Inherits:
Object
  • Object
show all
Extended by:
Protobuf::MessageExts::ClassMethods
Includes:
Protobuf::MessageExts
Defined in:
proto_docs/google/cloud/ces/v1/guardrail.rb

Overview

Guardrail contains a list of checks and balances to keep the agents safe and secure.

Defined Under Namespace

Classes: CodeCallback, ContentFilter, LlmPolicy, LlmPromptSecurity, ModelSafety

Instance Attribute Summary collapse

Instance Attribute Details

#action::Google::Cloud::Ces::V1::TriggerAction

Returns Optional. Action to take when the guardrail is triggered.

Returns:



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#code_callback::Google::Cloud::Ces::V1::Guardrail::CodeCallback

Returns Optional. Guardrail that potentially blocks the conversation based on the result of the callback execution.

Note: The following fields are mutually exclusive: code_callback, content_filter, llm_prompt_security, llm_policy, model_safety. If a field in that set is populated, all other fields in the set will automatically be cleared.

Returns:

  • (::Google::Cloud::Ces::V1::Guardrail::CodeCallback)

    Optional. Guardrail that potentially blocks the conversation based on the result of the callback execution.

    Note: The following fields are mutually exclusive: code_callback, content_filter, llm_prompt_security, llm_policy, model_safety. If a field in that set is populated, all other fields in the set will automatically be cleared.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#content_filter::Google::Cloud::Ces::V1::Guardrail::ContentFilter

Returns Optional. Guardrail that bans certain content from being used in the conversation.

Note: The following fields are mutually exclusive: content_filter, llm_prompt_security, llm_policy, model_safety, code_callback. If a field in that set is populated, all other fields in the set will automatically be cleared.

Returns:

  • (::Google::Cloud::Ces::V1::Guardrail::ContentFilter)

    Optional. Guardrail that bans certain content from being used in the conversation.

    Note: The following fields are mutually exclusive: content_filter, llm_prompt_security, llm_policy, model_safety, code_callback. If a field in that set is populated, all other fields in the set will automatically be cleared.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#create_time::Google::Protobuf::Timestamp (readonly)

Returns Output only. Timestamp when the guardrail was created.

Returns:



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#description::String

Returns Optional. Description of the guardrail.

Returns:

  • (::String)

    Optional. Description of the guardrail.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#display_name::String

Returns Required. Display name of the guardrail.

Returns:

  • (::String)

    Required. Display name of the guardrail.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#enabled::Boolean

Returns Optional. Whether the guardrail is enabled.

Returns:

  • (::Boolean)

    Optional. Whether the guardrail is enabled.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#etag::String

Returns Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.

Returns:

  • (::String)

    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#llm_policy::Google::Cloud::Ces::V1::Guardrail::LlmPolicy

Returns Optional. Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification.

Note: The following fields are mutually exclusive: llm_policy, content_filter, llm_prompt_security, model_safety, code_callback. If a field in that set is populated, all other fields in the set will automatically be cleared.

Returns:

  • (::Google::Cloud::Ces::V1::Guardrail::LlmPolicy)

    Optional. Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification.

    Note: The following fields are mutually exclusive: llm_policy, content_filter, llm_prompt_security, model_safety, code_callback. If a field in that set is populated, all other fields in the set will automatically be cleared.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#llm_prompt_security::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity

Returns Optional. Guardrail that blocks the conversation if the prompt is considered unsafe based on the LLM classification.

Note: The following fields are mutually exclusive: llm_prompt_security, content_filter, llm_policy, model_safety, code_callback. If a field in that set is populated, all other fields in the set will automatically be cleared.

Returns:

  • (::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity)

    Optional. Guardrail that blocks the conversation if the prompt is considered unsafe based on the LLM classification.

    Note: The following fields are mutually exclusive: llm_prompt_security, content_filter, llm_policy, model_safety, code_callback. If a field in that set is populated, all other fields in the set will automatically be cleared.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#model_safety::Google::Cloud::Ces::V1::Guardrail::ModelSafety

Returns Optional. Guardrail that blocks the conversation if the LLM response is considered unsafe based on the model safety settings.

Note: The following fields are mutually exclusive: model_safety, content_filter, llm_prompt_security, llm_policy, code_callback. If a field in that set is populated, all other fields in the set will automatically be cleared.

Returns:

  • (::Google::Cloud::Ces::V1::Guardrail::ModelSafety)

    Optional. Guardrail that blocks the conversation if the LLM response is considered unsafe based on the model safety settings.

    Note: The following fields are mutually exclusive: model_safety, content_filter, llm_prompt_security, llm_policy, code_callback. If a field in that set is populated, all other fields in the set will automatically be cleared.



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#name::String

Returns Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}.

Returns:

  • (::String)

    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#update_time::Google::Protobuf::Timestamp (readonly)

Returns Output only. Timestamp when the guardrail was last updated.

Returns:



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'proto_docs/google/cloud/ces/v1/guardrail.rb', line 84

class Guardrail
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Guardrail that bans certain content from being used in the conversation.
  # @!attribute [rw] banned_contents
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies to both user inputs and agent
  #     responses.
  # @!attribute [rw] banned_contents_in_user_input
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to user inputs.
  # @!attribute [rw] banned_contents_in_agent_response
  #   @return [::Array<::String>]
  #     Optional. List of banned phrases. Applies only to agent responses.
  # @!attribute [rw] match_type
  #   @return [::Google::Cloud::Ces::V1::Guardrail::ContentFilter::MatchType]
  #     Required. Match type for the content filter.
  # @!attribute [rw] disregard_diacritics
  #   @return [::Boolean]
  #     Optional. If true, diacritics are ignored during matching.
  class ContentFilter
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Match type for the content filter.
    module MatchType
      # Match type is not specified.
      MATCH_TYPE_UNSPECIFIED = 0

      # Content is matched for substrings character by character.
      SIMPLE_STRING_MATCH = 1

      # Content only matches if the pattern found in the text is
      # surrounded by word delimiters. Banned phrases can also contain word
      # delimiters.
      WORD_BOUNDARY_STRING_MATCH = 2

      # Content is matched using regular expression syntax.
      REGEXP_MATCH = 3
    end
  end

  # Guardrail that blocks the conversation if the input is considered unsafe
  # based on the LLM classification.
  # @!attribute [rw] default_settings
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPromptSecurity::DefaultSecuritySettings]
  #     Optional. Use the system's predefined default security settings.
  #     To select this mode, include an empty 'default_settings' message
  #     in the request. The 'default_prompt_template' field within
  #     will be populated by the server in the response.
  #
  #     Note: The following fields are mutually exclusive: `default_settings`, `custom_policy`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] custom_policy
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy]
  #     Optional. Use a user-defined LlmPolicy to configure the security
  #     guardrail.
  #
  #     Note: The following fields are mutually exclusive: `custom_policy`, `default_settings`. If a field in that set is populated, all other fields in the set will automatically be cleared.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. Determines the behavior when the guardrail encounters an LLM
  #     error.
  #     - If true: the guardrail is bypassed.
  #     - If false (default): the guardrail triggers/blocks.
  #
  #     Note: If a custom policy is provided, this field is ignored in favor
  #     of the policy's 'fail_open' configuration.
  class LlmPromptSecurity
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Configuration for default system security settings.
    # @!attribute [r] default_prompt_template
    #   @return [::String]
    #     Output only. The default prompt template used by the system.
    #     This field is for display purposes to show the user what prompt
    #     the system uses by default. It is OUTPUT_ONLY.
    class DefaultSecuritySettings
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # Guardrail that blocks the conversation if the LLM response is considered
  # violating the policy based on the LLM classification.
  # @!attribute [rw] max_conversation_messages
  #   @return [::Integer]
  #     Optional. When checking this policy, consider the last 'n' messages in
  #     the conversation. When not set a default value of 10 will be used.
  # @!attribute [rw] model_settings
  #   @return [::Google::Cloud::Ces::V1::ModelSettings]
  #     Optional. Model settings.
  # @!attribute [rw] prompt
  #   @return [::String]
  #     Required. Policy prompt.
  # @!attribute [rw] policy_scope
  #   @return [::Google::Cloud::Ces::V1::Guardrail::LlmPolicy::PolicyScope]
  #     Required. Defines when to apply the policy check during the conversation.
  #     If set to `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the
  #     user input. When applying the policy to the agent response, additional
  #     latency will be introduced before the agent can respond.
  # @!attribute [rw] fail_open
  #   @return [::Boolean]
  #     Optional. If an error occurs during the policy check, fail open and do
  #     not trigger the guardrail.
  # @!attribute [rw] allow_short_utterance
  #   @return [::Boolean]
  #     Optional. By default, the LLM policy check is bypassed for short
  #     utterances. Enabling this setting applies the policy check to all
  #     utterances, including those that would normally be skipped.
  class LlmPolicy
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Defines when to apply the policy check during the conversation.
    module PolicyScope
      # Policy scope is not specified.
      POLICY_SCOPE_UNSPECIFIED = 0

      # Policy check is triggered on user input.
      USER_QUERY = 1

      # Policy check is triggered on agent response. Applying this policy
      # scope will introduce additional latency before the agent can respond.
      AGENT_RESPONSE = 2

      # Policy check is triggered on both user input and agent response.
      # Applying this policy scope will introduce additional latency before
      # the agent can respond.
      USER_QUERY_AND_AGENT_RESPONSE = 3
    end
  end

  # Model safety settings overrides. When this is set, it will override the
  # default settings and trigger the guardrail if the response is considered
  # unsafe.
  # @!attribute [rw] safety_settings
  #   @return [::Array<::Google::Cloud::Ces::V1::Guardrail::ModelSafety::SafetySetting>]
  #     Required. List of safety settings.
  class ModelSafety
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # Safety setting.
    # @!attribute [rw] category
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmCategory]
    #     Required. The harm category.
    # @!attribute [rw] threshold
    #   @return [::Google::Cloud::Ces::V1::Guardrail::ModelSafety::HarmBlockThreshold]
    #     Required. The harm block threshold.
    class SafetySetting
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end

    # Harm category.
    module HarmCategory
      # The harm category is unspecified.
      HARM_CATEGORY_UNSPECIFIED = 0

      # The harm category is hate speech.
      HARM_CATEGORY_HATE_SPEECH = 1

      # The harm category is dangerous content.
      HARM_CATEGORY_DANGEROUS_CONTENT = 2

      # The harm category is harassment.
      HARM_CATEGORY_HARASSMENT = 3

      # The harm category is sexually explicit content.
      HARM_CATEGORY_SEXUALLY_EXPLICIT = 4
    end

    # Probability based thresholds levels for blocking.
    module HarmBlockThreshold
      # Unspecified harm block threshold.
      HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0

      # Block low threshold and above (i.e. block more).
      BLOCK_LOW_AND_ABOVE = 1

      # Block medium threshold and above.
      BLOCK_MEDIUM_AND_ABOVE = 2

      # Block only high threshold (i.e. block less).
      BLOCK_ONLY_HIGH = 3

      # Block none.
      BLOCK_NONE = 4

      # Turn off the safety filter.
      OFF = 5
    end
  end

  # Guardrail that blocks the conversation based on the code callbacks
  # provided.
  # @!attribute [rw] before_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_agent_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the agent is called.
  #     Each callback function is expected to return a structure (e.g., a dict or
  #     object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] before_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute before the model is called. If there
  #     are multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  # @!attribute [rw] after_model_callback
  #   @return [::Google::Cloud::Ces::V1::Callback]
  #     Optional. The callback to execute after the model is called. If there are
  #     multiple calls to the model, the callback will be executed multiple
  #     times. Each callback function is expected to return a structure (e.g., a
  #     dict or object) containing at least:
  #       - 'decision': Either 'OK' or 'TRIGGER'.
  #       - 'reason': A string explaining the decision.
  #     A 'TRIGGER' decision may halt further processing.
  class CodeCallback
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end