
geotrellis.spark.io.file.FileLayerUpdater.scala Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of geotrellis-spark_2.10 Show documentation
Show all versions of geotrellis-spark_2.10 Show documentation
GeoTrellis is an open source geographic data processing engine for high performance applications.
The newest version!
package geotrellis.spark.io.file
import geotrellis.spark._
import geotrellis.spark.io._
import geotrellis.spark.io.avro._
import geotrellis.spark.io.avro.codecs._
import geotrellis.spark.io.index._
import geotrellis.spark.io.json._
import geotrellis.spark.merge._
import geotrellis.util._
import com.typesafe.scalalogging.slf4j._
import org.apache.avro.Schema
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import spray.json._
import java.io.File
import scala.reflect._
class FileLayerUpdater(
catalogPath: String,
attributeStore: AttributeStore,
layerReader: FileLayerReader
) extends LayerUpdater[LayerId] with LazyLogging {
protected def _update[
K: AvroRecordCodec: Boundable: JsonFormat: ClassTag,
V: AvroRecordCodec: ClassTag,
M: JsonFormat: GetComponent[?, Bounds[K]]: Mergable
](id: LayerId, rdd: RDD[(K, V)] with Metadata[M], keyBounds: KeyBounds[K], mergeFunc: (V, V) => V) = {
if (!attributeStore.layerExists(id)) throw new LayerNotFoundError(id)
val LayerAttributes(header, metadata, keyIndex, writerSchema) = try {
attributeStore.readLayerAttributes[FileLayerHeader, M, K](id)
} catch {
case e: AttributeNotFoundError => throw new LayerUpdateError(id).initCause(e)
}
val path = header.path
if (!(keyIndex.keyBounds contains keyBounds))
throw new LayerOutOfKeyBoundsError(id, keyIndex.keyBounds)
val maxWidth = Index.digits(keyIndex.toIndex(keyIndex.keyBounds.maxKey))
val keyPath = KeyPathGenerator(catalogPath, path, keyIndex, maxWidth)
val layerPath = new File(catalogPath, path).getAbsolutePath
logger.info(s"Saving updated RDD for layer ${id} to $path")
val existingTiles =
if(schemaHasChanged[K, V](writerSchema)) {
logger.warn(s"RDD schema has changed, this requires rewriting the entire layer.")
layerReader
.read[K, V, M](id)
} else {
val query =
new LayerQuery[K, M]
.where(Intersects(rdd.metadata.getComponent[Bounds[K]].get))
layerReader.read[K, V, M](id, query, layerReader.defaultNumPartitions, filterIndexOnly = true)
}
val updatedMetadata: M =
metadata.merge(rdd.metadata)
val updatedRdd: RDD[(K, V)] =
existingTiles
.fullOuterJoin(rdd)
.flatMapValues {
case (Some(layerTile), Some(updateTile)) => Some(mergeFunc(layerTile, updateTile))
case (Some(layerTile), _) => Some(layerTile)
case (_, Some(updateTile)) => Some(updateTile)
case _ => None
}
val codec = KeyValueRecordCodec[K, V]
val schema = codec.schema
// Write updated metadata, and the possibly updated schema
// Only really need to write the metadata and schema
attributeStore.writeLayerAttributes(id, header, updatedMetadata, keyIndex, schema)
FileRDDWriter.write[K, V](updatedRdd, layerPath, keyPath)
}
}
object FileLayerUpdater {
def apply(catalogPath: String)(implicit sc: SparkContext): FileLayerUpdater =
new FileLayerUpdater(
catalogPath,
FileAttributeStore(catalogPath),
FileLayerReader(catalogPath)
)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy