com.dimajix.flowman.spec.mapping.DeduplicateMapping.scala Maven / Gradle / Ivy
/*
* Copyright (C) 2018 The Flowman Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dimajix.flowman.spec.mapping
import com.fasterxml.jackson.annotation.JsonProperty
import org.apache.spark.sql.DataFrame
import com.dimajix.flowman.execution.Context
import com.dimajix.flowman.execution.Execution
import com.dimajix.flowman.model.BaseMapping
import com.dimajix.flowman.model.Mapping
import com.dimajix.flowman.model.MappingOutputIdentifier
import com.dimajix.flowman.types.StructType
final case class DeduplicateMapping(
instanceProperties:Mapping.Properties,
input:MappingOutputIdentifier,
columns:Seq[String] = Seq(),
filter:Option[String] = None
) extends BaseMapping {
/**
* Returns the dependencies of this mapping, which is exactly one input table
*
* @return
*/
override def inputs : Set[MappingOutputIdentifier] = {
Set(input) ++ expressionDependencies(filter)
}
/**
* Creates an instance of the deduplication table.
*
* @param execution
* @param tables
* @return
*/
override def execute(execution:Execution, tables:Map[MappingOutputIdentifier,DataFrame]): Map[String,DataFrame] = {
require(execution != null)
require(input != null)
val df = tables(input)
val result = if (columns.nonEmpty) {
// Since Spark does not support deduplication on nested columns, lift all deduplication columns up
val replacementColumns = columns.map(col => s"__flowman_dedup_${col.replace(".","_")}" -> df(col))
replacementColumns.foldLeft(df)((df, col) => df.withColumn(col._1, col._2))
.dropDuplicates(replacementColumns.map(_._1))
.drop(replacementColumns.map(_._1):_*)
}
else {
df.distinct()
}
// Apply optional filter
val filteredResult = applyFilter(result, filter, tables)
Map("main" -> filteredResult)
}
/**
* Returns the schema as produced by this mapping, relative to the given input schema
* @param input
* @return
*/
override def describe(execution:Execution, input:Map[MappingOutputIdentifier,StructType]) : Map[String,StructType] = {
require(execution != null)
require(input != null)
val result = input(this.input)
// Apply documentation
val schemas = Map("main" -> result)
applyDocumentation(schemas)
}
}
class DeduplicateMappingSpec extends MappingSpec {
@JsonProperty(value = "input", required = true) private var input: String = _
@JsonProperty(value = "columns", required = true) private var columns: Seq[String] = Seq()
@JsonProperty(value = "filter", required=false) private var filter:Option[String] = None
/**
* Creates the instance of the specified Mapping with all variable interpolation being performed
* @param context
* @return
*/
override def instantiate(context: Context, properties:Option[Mapping.Properties] = None): DeduplicateMapping = {
DeduplicateMapping(
instanceProperties(context, properties),
MappingOutputIdentifier(context.evaluate(input)),
columns.map(context.evaluate),
context.evaluate(filter)
)
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy