Class: Google::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Inherits:
Object
  • Object
show all
Includes:
Core::Hashable, Core::JsonObjectSupport
Defined in:
lib/google/apis/aiplatform_v1/classes.rb,
lib/google/apis/aiplatform_v1/representations.rb,
lib/google/apis/aiplatform_v1/representations.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(**args) ⇒ GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Returns a new instance of GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics.



19472
19473
19474
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19472

def initialize(**args)
   update!(**args)
end

Instance Attribute Details

#confidence_thresholdFloat

Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. Corresponds to the JSON property confidenceThreshold

Returns:

  • (Float)


19381
19382
19383
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19381

def confidence_threshold
  @confidence_threshold
end

#confusion_matrixGoogle::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix

Confusion matrix of the evaluation for this confidence_threshold. Corresponds to the JSON property confusionMatrix



19386
19387
19388
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19386

def confusion_matrix
  @confusion_matrix
end

#f1_scoreFloat

The harmonic mean of recall and precision. For summary metrics, it computes the micro-averaged F1 score. Corresponds to the JSON property f1Score

Returns:

  • (Float)


19392
19393
19394
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19392

def f1_score
  @f1_score
end

#f1_score_at1Float

The harmonic mean of recallAt1 and precisionAt1. Corresponds to the JSON property f1ScoreAt1

Returns:

  • (Float)


19397
19398
19399
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19397

def f1_score_at1
  @f1_score_at1
end

#f1_score_macroFloat

Macro-averaged F1 Score. Corresponds to the JSON property f1ScoreMacro

Returns:

  • (Float)


19402
19403
19404
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19402

def f1_score_macro
  @f1_score_macro
end

#f1_score_microFloat

Micro-averaged F1 Score. Corresponds to the JSON property f1ScoreMicro

Returns:

  • (Float)


19407
19408
19409
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19407

def f1_score_micro
  @f1_score_micro
end

#false_negative_countFixnum

The number of ground truth labels that are not matched by a Model created label. Corresponds to the JSON property falseNegativeCount

Returns:

  • (Fixnum)


19413
19414
19415
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19413

def false_negative_count
  @false_negative_count
end

#false_positive_countFixnum

The number of Model created labels that do not match a ground truth label. Corresponds to the JSON property falsePositiveCount

Returns:

  • (Fixnum)


19418
19419
19420
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19418

def false_positive_count
  @false_positive_count
end

#false_positive_rateFloat

False Positive Rate for the given confidence threshold. Corresponds to the JSON property falsePositiveRate

Returns:

  • (Float)


19423
19424
19425
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19423

def false_positive_rate
  @false_positive_rate
end

#false_positive_rate_at1Float

The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property falsePositiveRateAt1

Returns:

  • (Float)


19429
19430
19431
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19429

def false_positive_rate_at1
  @false_positive_rate_at1
end

#max_predictionsFixnum

Metrics are computed with an assumption that the Model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidenceThreshold. Corresponds to the JSON property maxPredictions

Returns:

  • (Fixnum)


19436
19437
19438
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19436

def max_predictions
  @max_predictions
end

#precisionFloat

Precision for the given confidence threshold. Corresponds to the JSON property precision

Returns:

  • (Float)


19441
19442
19443
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19441

def precision
  @precision
end

#precision_at1Float

The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property precisionAt1

Returns:

  • (Float)


19447
19448
19449
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19447

def precision_at1
  @precision_at1
end

#recallFloat

Recall (True Positive Rate) for the given confidence threshold. Corresponds to the JSON property recall

Returns:

  • (Float)


19452
19453
19454
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19452

def recall
  @recall
end

#recall_at1Float

The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property recallAt1

Returns:

  • (Float)


19459
19460
19461
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19459

def recall_at1
  @recall_at1
end

#true_negative_countFixnum

The number of labels that were not created by the Model, but if they would, they would not match a ground truth label. Corresponds to the JSON property trueNegativeCount

Returns:

  • (Fixnum)


19465
19466
19467
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19465

def true_negative_count
  @true_negative_count
end

#true_positive_countFixnum

The number of Model created labels that match a ground truth label. Corresponds to the JSON property truePositiveCount

Returns:

  • (Fixnum)


19470
19471
19472
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19470

def true_positive_count
  @true_positive_count
end

Instance Method Details

#update!(**args) ⇒ Object

Update properties of this object



19477
19478
19479
19480
19481
19482
19483
19484
19485
19486
19487
19488
19489
19490
19491
19492
19493
19494
19495
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19477

def update!(**args)
  @confidence_threshold = args[:confidence_threshold] if args.key?(:confidence_threshold)
  @confusion_matrix = args[:confusion_matrix] if args.key?(:confusion_matrix)
  @f1_score = args[:f1_score] if args.key?(:f1_score)
  @f1_score_at1 = args[:f1_score_at1] if args.key?(:f1_score_at1)
  @f1_score_macro = args[:f1_score_macro] if args.key?(:f1_score_macro)
  @f1_score_micro = args[:f1_score_micro] if args.key?(:f1_score_micro)
  @false_negative_count = args[:false_negative_count] if args.key?(:false_negative_count)
  @false_positive_count = args[:false_positive_count] if args.key?(:false_positive_count)
  @false_positive_rate = args[:false_positive_rate] if args.key?(:false_positive_rate)
  @false_positive_rate_at1 = args[:false_positive_rate_at1] if args.key?(:false_positive_rate_at1)
  @max_predictions = args[:max_predictions] if args.key?(:max_predictions)
  @precision = args[:precision] if args.key?(:precision)
  @precision_at1 = args[:precision_at1] if args.key?(:precision_at1)
  @recall = args[:recall] if args.key?(:recall)
  @recall_at1 = args[:recall_at1] if args.key?(:recall_at1)
  @true_negative_count = args[:true_negative_count] if args.key?(:true_negative_count)
  @true_positive_count = args[:true_positive_count] if args.key?(:true_positive_count)
end