Class: Google::Cloud::Dialogflow::CX::V3::SafetySettings
- Inherits:
-
Object
- Object
- Google::Cloud::Dialogflow::CX::V3::SafetySettings
- Extended by:
- Protobuf::MessageExts::ClassMethods
- Includes:
- Protobuf::MessageExts
- Defined in:
- proto_docs/google/cloud/dialogflow/cx/v3/safety_settings.rb
Overview
Settings for Generative Safety.
Defined Under Namespace
Modules: PhraseMatchStrategy Classes: Phrase, PromptSecuritySettings, RaiSettings
Instance Attribute Summary collapse
-
#banned_phrases ⇒ ::Array<::Google::Cloud::Dialogflow::CX::V3::SafetySettings::Phrase>
Banned phrases for generated text.
-
#default_banned_phrase_match_strategy ⇒ ::Google::Cloud::Dialogflow::CX::V3::SafetySettings::PhraseMatchStrategy
Optional.
-
#default_rai_settings ⇒ ::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings
Optional.
-
#prompt_security_settings ⇒ ::Google::Cloud::Dialogflow::CX::V3::SafetySettings::PromptSecuritySettings
Optional.
-
#rai_settings ⇒ ::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings
Optional.
Instance Attribute Details
#banned_phrases ⇒ ::Array<::Google::Cloud::Dialogflow::CX::V3::SafetySettings::Phrase>
Returns Banned phrases for generated text.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
# File 'proto_docs/google/cloud/dialogflow/cx/v3/safety_settings.rb', line 43 class SafetySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Text input which can be used for prompt or banned phrases. # @!attribute [rw] text # @return [::String] # Required. Text input which can be used for prompt or banned phrases. # @!attribute [rw] language_code # @return [::String] # Required. Language code of the phrase. class Phrase include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Settings for Responsible AI. # @!attribute [rw] category_filters # @return [::Array<::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::CategoryFilter>] # Optional. RAI blocking configurations. class RaiSettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Configuration of the sensitivity level for blocking an RAI category. # @!attribute [rw] category # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyCategory] # RAI category to configure. # @!attribute [rw] filter_level # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyFilterLevel] # Blocking sensitivity level to configure for the RAI category. class CategoryFilter include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Sensitivity level for RAI categories. module SafetyFilterLevel # Unspecified -- uses default sensitivity levels. SAFETY_FILTER_LEVEL_UNSPECIFIED = 0 # Block no text -- effectively disables the category. BLOCK_NONE = 1 # Block a few suspicious texts. BLOCK_FEW = 2 # Block some suspicious texts. BLOCK_SOME = 3 # Block most suspicious texts. BLOCK_MOST = 4 end # RAI categories to configure. module SafetyCategory # Unspecified. SAFETY_CATEGORY_UNSPECIFIED = 0 # Dangerous content. DANGEROUS_CONTENT = 1 # Hate speech. HATE_SPEECH = 2 # Harassment. HARASSMENT = 3 # Sexually explicit content. SEXUALLY_EXPLICIT_CONTENT = 4 end end # Settings for prompt security checks. # @!attribute [rw] enable_prompt_security # @return [::Boolean] # Optional. Enable prompt security checks. class PromptSecuritySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Strategy for matching phrases. module PhraseMatchStrategy # Unspecified, defaults to PARTIAL_MATCH. PHRASE_MATCH_STRATEGY_UNSPECIFIED = 0 # Text that contains the phrase as a substring will be matched, e.g. "foo" # will match "afoobar". PARTIAL_MATCH = 1 # Text that contains the tokenized words of the phrase will be matched, # e.g. "foo" will match "a foo bar" and "foo bar", but not "foobar". WORD_MATCH = 2 end end |
#default_banned_phrase_match_strategy ⇒ ::Google::Cloud::Dialogflow::CX::V3::SafetySettings::PhraseMatchStrategy
Returns Optional. Default phrase match strategy for banned phrases.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
# File 'proto_docs/google/cloud/dialogflow/cx/v3/safety_settings.rb', line 43 class SafetySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Text input which can be used for prompt or banned phrases. # @!attribute [rw] text # @return [::String] # Required. Text input which can be used for prompt or banned phrases. # @!attribute [rw] language_code # @return [::String] # Required. Language code of the phrase. class Phrase include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Settings for Responsible AI. # @!attribute [rw] category_filters # @return [::Array<::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::CategoryFilter>] # Optional. RAI blocking configurations. class RaiSettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Configuration of the sensitivity level for blocking an RAI category. # @!attribute [rw] category # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyCategory] # RAI category to configure. # @!attribute [rw] filter_level # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyFilterLevel] # Blocking sensitivity level to configure for the RAI category. class CategoryFilter include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Sensitivity level for RAI categories. module SafetyFilterLevel # Unspecified -- uses default sensitivity levels. SAFETY_FILTER_LEVEL_UNSPECIFIED = 0 # Block no text -- effectively disables the category. BLOCK_NONE = 1 # Block a few suspicious texts. BLOCK_FEW = 2 # Block some suspicious texts. BLOCK_SOME = 3 # Block most suspicious texts. BLOCK_MOST = 4 end # RAI categories to configure. module SafetyCategory # Unspecified. SAFETY_CATEGORY_UNSPECIFIED = 0 # Dangerous content. DANGEROUS_CONTENT = 1 # Hate speech. HATE_SPEECH = 2 # Harassment. HARASSMENT = 3 # Sexually explicit content. SEXUALLY_EXPLICIT_CONTENT = 4 end end # Settings for prompt security checks. # @!attribute [rw] enable_prompt_security # @return [::Boolean] # Optional. Enable prompt security checks. class PromptSecuritySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Strategy for matching phrases. module PhraseMatchStrategy # Unspecified, defaults to PARTIAL_MATCH. PHRASE_MATCH_STRATEGY_UNSPECIFIED = 0 # Text that contains the phrase as a substring will be matched, e.g. "foo" # will match "afoobar". PARTIAL_MATCH = 1 # Text that contains the tokenized words of the phrase will be matched, # e.g. "foo" will match "a foo bar" and "foo bar", but not "foobar". WORD_MATCH = 2 end end |
#default_rai_settings ⇒ ::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings
Returns Optional. Immutable. Default RAI settings to be annotated on the agent, so that users will be able to restore their RAI configurations to the default settings. Read-only field for the API proto only.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
# File 'proto_docs/google/cloud/dialogflow/cx/v3/safety_settings.rb', line 43 class SafetySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Text input which can be used for prompt or banned phrases. # @!attribute [rw] text # @return [::String] # Required. Text input which can be used for prompt or banned phrases. # @!attribute [rw] language_code # @return [::String] # Required. Language code of the phrase. class Phrase include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Settings for Responsible AI. # @!attribute [rw] category_filters # @return [::Array<::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::CategoryFilter>] # Optional. RAI blocking configurations. class RaiSettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Configuration of the sensitivity level for blocking an RAI category. # @!attribute [rw] category # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyCategory] # RAI category to configure. # @!attribute [rw] filter_level # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyFilterLevel] # Blocking sensitivity level to configure for the RAI category. class CategoryFilter include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Sensitivity level for RAI categories. module SafetyFilterLevel # Unspecified -- uses default sensitivity levels. SAFETY_FILTER_LEVEL_UNSPECIFIED = 0 # Block no text -- effectively disables the category. BLOCK_NONE = 1 # Block a few suspicious texts. BLOCK_FEW = 2 # Block some suspicious texts. BLOCK_SOME = 3 # Block most suspicious texts. BLOCK_MOST = 4 end # RAI categories to configure. module SafetyCategory # Unspecified. SAFETY_CATEGORY_UNSPECIFIED = 0 # Dangerous content. DANGEROUS_CONTENT = 1 # Hate speech. HATE_SPEECH = 2 # Harassment. HARASSMENT = 3 # Sexually explicit content. SEXUALLY_EXPLICIT_CONTENT = 4 end end # Settings for prompt security checks. # @!attribute [rw] enable_prompt_security # @return [::Boolean] # Optional. Enable prompt security checks. class PromptSecuritySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Strategy for matching phrases. module PhraseMatchStrategy # Unspecified, defaults to PARTIAL_MATCH. PHRASE_MATCH_STRATEGY_UNSPECIFIED = 0 # Text that contains the phrase as a substring will be matched, e.g. "foo" # will match "afoobar". PARTIAL_MATCH = 1 # Text that contains the tokenized words of the phrase will be matched, # e.g. "foo" will match "a foo bar" and "foo bar", but not "foobar". WORD_MATCH = 2 end end |
#prompt_security_settings ⇒ ::Google::Cloud::Dialogflow::CX::V3::SafetySettings::PromptSecuritySettings
Returns Optional. Settings for prompt security checks.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
# File 'proto_docs/google/cloud/dialogflow/cx/v3/safety_settings.rb', line 43 class SafetySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Text input which can be used for prompt or banned phrases. # @!attribute [rw] text # @return [::String] # Required. Text input which can be used for prompt or banned phrases. # @!attribute [rw] language_code # @return [::String] # Required. Language code of the phrase. class Phrase include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Settings for Responsible AI. # @!attribute [rw] category_filters # @return [::Array<::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::CategoryFilter>] # Optional. RAI blocking configurations. class RaiSettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Configuration of the sensitivity level for blocking an RAI category. # @!attribute [rw] category # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyCategory] # RAI category to configure. # @!attribute [rw] filter_level # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyFilterLevel] # Blocking sensitivity level to configure for the RAI category. class CategoryFilter include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Sensitivity level for RAI categories. module SafetyFilterLevel # Unspecified -- uses default sensitivity levels. SAFETY_FILTER_LEVEL_UNSPECIFIED = 0 # Block no text -- effectively disables the category. BLOCK_NONE = 1 # Block a few suspicious texts. BLOCK_FEW = 2 # Block some suspicious texts. BLOCK_SOME = 3 # Block most suspicious texts. BLOCK_MOST = 4 end # RAI categories to configure. module SafetyCategory # Unspecified. SAFETY_CATEGORY_UNSPECIFIED = 0 # Dangerous content. DANGEROUS_CONTENT = 1 # Hate speech. HATE_SPEECH = 2 # Harassment. HARASSMENT = 3 # Sexually explicit content. SEXUALLY_EXPLICIT_CONTENT = 4 end end # Settings for prompt security checks. # @!attribute [rw] enable_prompt_security # @return [::Boolean] # Optional. Enable prompt security checks. class PromptSecuritySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Strategy for matching phrases. module PhraseMatchStrategy # Unspecified, defaults to PARTIAL_MATCH. PHRASE_MATCH_STRATEGY_UNSPECIFIED = 0 # Text that contains the phrase as a substring will be matched, e.g. "foo" # will match "afoobar". PARTIAL_MATCH = 1 # Text that contains the tokenized words of the phrase will be matched, # e.g. "foo" will match "a foo bar" and "foo bar", but not "foobar". WORD_MATCH = 2 end end |
#rai_settings ⇒ ::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings
Returns Optional. Settings for Responsible AI checks.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
# File 'proto_docs/google/cloud/dialogflow/cx/v3/safety_settings.rb', line 43 class SafetySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Text input which can be used for prompt or banned phrases. # @!attribute [rw] text # @return [::String] # Required. Text input which can be used for prompt or banned phrases. # @!attribute [rw] language_code # @return [::String] # Required. Language code of the phrase. class Phrase include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Settings for Responsible AI. # @!attribute [rw] category_filters # @return [::Array<::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::CategoryFilter>] # Optional. RAI blocking configurations. class RaiSettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Configuration of the sensitivity level for blocking an RAI category. # @!attribute [rw] category # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyCategory] # RAI category to configure. # @!attribute [rw] filter_level # @return [::Google::Cloud::Dialogflow::CX::V3::SafetySettings::RaiSettings::SafetyFilterLevel] # Blocking sensitivity level to configure for the RAI category. class CategoryFilter include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Sensitivity level for RAI categories. module SafetyFilterLevel # Unspecified -- uses default sensitivity levels. SAFETY_FILTER_LEVEL_UNSPECIFIED = 0 # Block no text -- effectively disables the category. BLOCK_NONE = 1 # Block a few suspicious texts. BLOCK_FEW = 2 # Block some suspicious texts. BLOCK_SOME = 3 # Block most suspicious texts. BLOCK_MOST = 4 end # RAI categories to configure. module SafetyCategory # Unspecified. SAFETY_CATEGORY_UNSPECIFIED = 0 # Dangerous content. DANGEROUS_CONTENT = 1 # Hate speech. HATE_SPEECH = 2 # Harassment. HARASSMENT = 3 # Sexually explicit content. SEXUALLY_EXPLICIT_CONTENT = 4 end end # Settings for prompt security checks. # @!attribute [rw] enable_prompt_security # @return [::Boolean] # Optional. Enable prompt security checks. class PromptSecuritySettings include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Strategy for matching phrases. module PhraseMatchStrategy # Unspecified, defaults to PARTIAL_MATCH. PHRASE_MATCH_STRATEGY_UNSPECIFIED = 0 # Text that contains the phrase as a substring will be matched, e.g. "foo" # will match "afoobar". PARTIAL_MATCH = 1 # Text that contains the tokenized words of the phrase will be matched, # e.g. "foo" will match "a foo bar" and "foo bar", but not "foobar". WORD_MATCH = 2 end end |