
scala.googleapis.bigquery.RankingMetrics.scala Maven / Gradle / Ivy
package googleapis.bigquery
import io.circe._
import io.circe.syntax._
final case class RankingMetrics(
/** Determines the goodness of a ranking by computing the percentile rank from the predicted confidence and dividing it by the original rank.
*/
averageRank: Option[Double] = None,
/** Calculates a precision per user for all the items by ranking them and then averages all the precisions across all the users.
*/
meanAveragePrecision: Option[Double] = None,
/** Similar to the mean squared error computed in regression and explicit recommendation models except instead of computing the rating directly, the output from evaluate is computed against a preference which is 1 or 0 depending on if the rating exists or not.
*/
meanSquaredError: Option[Double] = None,
/** A metric to determine the goodness of a ranking calculated from the predicted confidence by comparing it to an ideal rank measured by the original ratings.
*/
normalizedDiscountedCumulativeGain: Option[Double] = None,
)
object RankingMetrics {
implicit val encoder: Encoder[RankingMetrics] = Encoder.instance { x =>
Json.obj(
"averageRank" := x.averageRank,
"meanAveragePrecision" := x.meanAveragePrecision,
"meanSquaredError" := x.meanSquaredError,
"normalizedDiscountedCumulativeGain" := x.normalizedDiscountedCumulativeGain,
)
}
implicit val decoder: Decoder[RankingMetrics] = Decoder.instance { c =>
for {
v0 <- c.get[Option[Double]]("averageRank")
v1 <- c.get[Option[Double]]("meanAveragePrecision")
v2 <- c.get[Option[Double]]("meanSquaredError")
v3 <- c.get[Option[Double]]("normalizedDiscountedCumulativeGain")
} yield RankingMetrics(v0, v1, v2, v3)
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy