All Downloads are FREE. Search and download functionalities are using the official Maven repository.

packer.CorePacker.scala Maven / Gradle / Ivy

// Copyright 2015-2022 by Carnegie Mellon University
// See license information in LICENSE.txt

package org.cert.netsa.mothra.packer

import org.cert.netsa.io.ipfix.{InfoModel, RecordReader, SessionGroup,
  StreamMessageReader}

import com.github.ghik.silencer.silent
import com.typesafe.scalalogging.StrictLogging
import java.io.FileNotFoundException
import java.nio.channels.{Channels, FileChannel, ReadableByteChannel}
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{
  Files, FileVisitResult, Path => JPath, SimpleFileVisitor,
  StandardOpenOption}
import java.util.LinkedHashMap
import java.util.UUID.randomUUID
import java.util.concurrent.{
  Executors, LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path => HPath}
import org.apache.hadoop.io.IOUtils
import org.apache.hadoop.io.compress.CodecPool
import resource.managed // see http://jsuereth.com/scala-arm/index.html
import scala.collection.JavaConverters._
import scala.collection.immutable.Queue
import scala.util.control.NonFatal
import scala.util.{Try, Failure}


/**
  * The CorePacker class provides the functionality necessary to read IPFIX
  * records from streams and write each of the records output files.  The
  * processing of the records and the rules that specify in which file(s) a
  * record is written are specified in the `pack` method of the `packLogic`
  * parameter.
  *
  * @param packLogic The rules to determine where records are written
  * @param packConf CorePacker configuration directories and compression
  * @param infoModel The information model to use
  * @param hoursPerFile The number of hours covered by each file.
  * @param fileCacheSize The number of open files to maintain in work dir.
  * @param numMoveThreads The number of threads to move work files to `destDir`.
  * @param conf The Hadoop configuration.
  */
@silent(" JavaConverters .*deprecated")
private[mothra] case class CorePacker protected (
  val packLogic: PackingLogic,
  val packConf: PackerConfig,
  val infoModel: InfoModel,
  val hoursPerFile: Int = CorePacker.DEFAULT_HOURS_PER_FILE,
  val fileCacheSize: Int = CorePacker.DEFAULT_FILE_CACHE_SIZE,
  val numMoveThreads: Int = CorePacker.DEFAULT_NUM_MOVE_THREADS)
  (implicit conf: Configuration)
    extends StrictLogging
{
  // import from our companion object
  import CorePacker.{MoveFilesJob, MoveOneFileJob, FileEvent, MoveOldFiles}

  // thread pool that runs a `MoveOneFileJob` to close a single working
  // output file, copy it into HDFS, and perhaps archive it
  private val moveOnePool = new ThreadPoolExecutor(
    numMoveThreads, numMoveThreads, 0L, TimeUnit.SECONDS,
    new LinkedBlockingQueue[Runnable](),
    new PackerThreadFactory("MoveOneFileThread-"))

  /**
    * The number of mover "watch" threads to create.
    */
  private[this] val moverSize = 2.max(numMoveThreads / 2)

  // thread pool that runs a `MoveFilesJob` which in turn spawns
  // `MoveOneFileJob` threads and waits for them to complete
  private val moveFilesPool = new ThreadPoolExecutor(
    moverSize, moverSize, 60L, TimeUnit.SECONDS,
    new LinkedBlockingQueue[Runnable](),
    new PackerThreadFactory("MoveFilesThread-"))

  /**
    * Writes a log message with the number of files moved and the number of
    * files waiting to be moved.
    */
  def logMoverTaskCount(): Unit = {
    val active = moveOnePool.getActiveCount()
    val completed = moveOnePool.getCompletedTaskCount()
    val total = moveOnePool.getTaskCount()
    logger.info(s"Mover task count: Completed: ${completed}," +
      s" Active: ${active}, Queued: ${total - completed - active}")
  }

  /*
   * A class that wraps java.util.LinkedHashMap to provide an LRU
   * Cache that uses the relative date-path as the Key and a WorkFile
   * as the value.
   *
   * This uses the LinkedHashMap class from Java since it can sort its
   * items by access order, unlike the Scala one.  Also, there is the
   * ability to have it automatically limit its size.
   */
  private[this] class LruMap()
      extends LinkedHashMap[String, WorkFile](128, 0.75.toFloat, true)
  {
    // Other possible types to consider for an LRU cache:
    // http://spray.io/documentation/1.2.4/spray-caching
    // https://twitter.github.io/util/docs/com/twitter/util/LruMap.html
    override def removeEldestEntry(eldest: java.util.Map.Entry[String, WorkFile]):
        Boolean =
    {
      if (size() < fileCacheSize) {
        false
      } else {
        val out = eldest.getValue()
        out.close()
        //LOG//logger.trace(s"Closed work file '${eldest.getKey()}'")
        for ( cb <- fileEvents ) { cb.fileClosed(out) }
        true
      }
    }
  }

  /** A map from a pathname to the open WorkFile stream.  Given a
    * WorkFile, one may use its `key` field to get the pathname.  The
    * contents of this data structure are a subset of the
    * `knownFiles`. */
  private[this] var openFiles: LruMap = new LruMap()

  /** A map all files known to the CorePacker.  Key is the relative
    * file name (without any unique suffix), value is the Workfile.
    * If the WorkFile is open, it also appears in the `openFiles` data
    * structure. */
  private[this] var knownFiles = Map.empty[String, WorkFile]

  /** FileEvents to invoke when a file is opened, re-opened, or
    * closed. */
  @volatile
  private[this] var fileEvents = Queue.empty[FileEvent]

  /** Whether shutdown() has been called. */
  @volatile
  var isShuttingDown = false

  /** Variable used to delay closing WorkFiles during flushAllWorkFiles(). */
  private[this] var flushingFiles = false

  /** Variable that gathers delayed WorkFiles during flushingFiles(). */
  private[this] var delayedClose = Queue.empty[WorkFile]

  /** Register callback functions to be run when a file is created,
    * re-opened, or closed. */
  def addFileEvent(cb: FileEvent): Unit = {
    fileEvents = fileEvents :+ cb
  }

  /** Delete the specified callback functions. */
  def removeFileEvent(cb: FileEvent): Unit = {
    fileEvents = fileEvents.filter(_ != cb)
  }


  /**
    * Returns an open, writable WorkFile stream.  The path is determined by
    * variables in the `prec` argument.
    *
    * If the file is already opened, its handle is returned.  If the file
    * exists, it is reopened.  Otherwise, a new file is created and opened.
    *
    * @param prec An instance containing the [[Record]] and information used
    * to determine the path of the file where the Record is written.
    */
  private[this] def getWorkFile(prec: PackableRecord): WorkFile = synchronized
  {
    val key =
      s"${prec.relativePackedPath}/${prec.startTime.ymdhDuration(hoursPerFile)}"
    Option(openFiles.get(key)).getOrElse({
      val workfile: WorkFile = knownFiles.get(key) match {
        case Some(wf) =>
          wf.reopen()
          for (cb <- fileEvents) { cb.fileReopened(wf) }
          wf
        case None =>
          // create new file
          val uniqPath = s"${key}.${randomUUID().toString}"
          val workpath = packConf.workDir.resolve(
            s"${prec.startTime.ymdDirname}/${uniqPath}")
          val wf = WorkFile(
            key, workpath, uniqPath, infoModel, prec.observationDomain)
          knownFiles = knownFiles.updated(key, wf)
          for (cb <- fileEvents) { cb.fileCreated(wf) }
          wf
      }
      openFiles.put(key, workfile)
      workfile
    })
  }

  /**
    * Reads IPFIX records from `input` and calls the `pack()` method on the
    * [[PackingLogic]] on each record.
    *
    * Throws an Exception if called after the `shutdown()` method has
    * been invoked.
    *
    * @param input The byte-stream containing the IPFIX records to pack.
    */
  final def packStream(input: ReadableByteChannel): Unit = {
    if ( isShuttingDown ) {
      throw new Exception("CorePacker is shutting down")
    }

    // create a SessionGroup for this input
    val inGroup = SessionGroup(infoModel, input)

    /*  The called function does nothing, so no need to call it.
     *
     *|// register the callback to get the start time for each template
     *|StartTimeCache.sessionRegister(inGroup)
     */

    /*
     *|// register the metadata template callback defined in CorePacker
     *|sessionRegisterMetadataCallback(inGroup)
     */

    // register any callbacks defined by the PackingLogic
    packLogic.sessionRegister(inGroup)

    // create the reader
    val reader = RecordReader(StreamMessageReader(input, inGroup))

    /*
     *   If we decide to use the packRecords() method in the PackingLogic
     *   trait, the following function call replaces the loop that invokes
     *   reader.call().  In addition, the packRecord() function can call
     *   sessionRegister() itself and there is no need for this function to do
     *   it.
     *
     *   //packLogic.packRecords(reader, inGroup, this)
     *
     */

    // process all records in the input
    for ( record <- reader ) {
      for ( prec <- packLogic.pack(record) ) {
        writeRecord(prec)
      }
    }

    // force flush of files
    //flushAllWorkFiles()
  }

  /**
    * Writes a [[Record]] to a [[WorkFile]], creating a new file if needed.
    *
    * Specifically, creates a complete file path using the `rootDir` and the
    * time stamp and relative path in the [[PackableRecord]].  That path is
    * passed to a private function (`getWorkFile`) to get or create a
    * [[WorkFile]], and then the [[Record]] in `prec` is added to it.
    *
    * @param prec An instance containing the [[Record]] and information used
    * to determine the path of the file where the Record is written.
    */
  private[this] def writeRecord(prec: PackableRecord): Unit = {
    // get the output
    val out = getWorkFile(prec)
    // write the record
    out.writeRecord(prec.record)
  }

  /**
    * Checks all files in the working directory that this CorePacker owns to see
    * if the file should be moved to the repository.
    */
  final def checkWorkingFiles(checker: (WorkFile => Boolean)): Unit = {
    val size = synchronized { knownFiles.size }
    logger.info(s"Checking sizes and ages of the ${size} work files")
    if ( size == 0 ) {
      logger.info(s"Found no work files to close and move in 0.000 seconds")
    } else {
      val t0 = System.currentTimeMillis()
      val filesToMove = synchronized {
        val files = Array.empty[WorkFile] ++ (for {
          (key, workfile) <- knownFiles
          if checker(workfile)
        } yield workfile)

        knownFiles = knownFiles -- files.map { _.key }
        closeWorkFiles(files)
        files
      }

      val t = System.currentTimeMillis()
      if ( filesToMove.isEmpty ) {
        logger.info("Found no work files to close and move" +
          f" in ${(t-t0).toDouble/1000.0}%.3f seconds")
      } else {
        logger.info(
          s"Found and closed ${filesToMove.size} work files" +
            f" in ${(t-t0).toDouble/1000.0}%.3f seconds")
        val mover = MoveFilesJob(filesToMove, this)
        moveFilesPool.execute(mover)
      }
    }
  }

  /** Closes one WorkFile. */
  def closeWorkFile(workfile: WorkFile): Unit = synchronized {
    if ( flushingFiles ) {
      // store workfile until flush is complete
      delayedClose = delayedClose :+ workfile
    } else {
      knownFiles.get(workfile.key) match {
        case None => logger.trace(s"Warning: ${workfile.key} not found in Map")
        case Some(wf) =>
          assert( wf eq workfile )
          openFiles.remove(workfile.key)
          knownFiles = knownFiles - workfile.key
          if ( workfile.close() ) {
            logger.trace(s"Closed work file '${workfile.path}'")
            for ( cb <- fileEvents ) {
              cb.fileClosed(workfile)
            }
          }
          moveOnePool.execute(MoveOneFileJob(workfile, this))
      }
    }
  }

  /**
    * For each file in `workfiles` that is in `openFiles`, calls its `close`
    * method and invokes the `fileClosed` FileEvent.
    *
    * This method assumes the caller is synchronized.
    */
  private[this] def closeWorkFiles(workfiles: Array[WorkFile]): Unit = {
    for ( wf <- workfiles ) {
      openFiles.remove(wf.key, wf)
      if ( wf.close() ) {
        logger.trace(s"Closed work file '${wf.path}'")
        for ( cb <- fileEvents ) {
          cb.fileClosed(wf)
        }
      }
    }
  }

  /** Closes all WorkFile files in the cache, moves all files to their
    * final location, and empties the cache.
    *
    * @param wait If `false`, the closing occurs in a separate
    * thread. */
  final def closeAllWorkFiles(wait: Boolean = false): Unit = {
    val t0 = System.currentTimeMillis()
    val workfiles = synchronized {
      val files = Array.empty[WorkFile] ++ knownFiles.values
      knownFiles = Map.empty[String, WorkFile]
      openFiles = new LruMap()
      files
    }

    for ( wf <- workfiles ) {
      if ( wf.close() ) {
        logger.trace(s"Closed work file '${wf.path}'")
        for ( cb <- fileEvents ) {
          cb.fileClosed(wf)
        }
      }
    }

    val t = System.currentTimeMillis()
    logger.info(s"Closed ${workfiles.size} work files " +
      f" in ${(t-t0).toDouble/1000.0}%.3f seconds.")
    val mover = MoveFilesJob(workfiles, this)
    if ( wait ) {
      val task = List(Executors.callable(mover, true.asInstanceOf[Any]))
      moveFilesPool.invokeAll(task.toSeq.asJavaCollection)
    } else {
      moveFilesPool.execute(mover)
    }
    ()
  }

  /** Flushes all the WorkFiles in the cache. */
  final def flushAllWorkFiles(): Unit = synchronized {
    flushingFiles = true
    //logger.trace("Flushing all work files...")
    for (out <- openFiles.values().asScala) {
      out.flush()
    }
    //logger.trace("Flushed all work files.")
    flushingFiles = false

    if ( delayedClose.nonEmpty ) {
      val oldDelayed = delayedClose.toArray
      delayedClose = Queue.empty[WorkFile]

      val t0 = System.currentTimeMillis()
      knownFiles = knownFiles -- oldDelayed.map { _.key }
      closeWorkFiles(oldDelayed)
      val t = System.currentTimeMillis()
      logger.info(s"Closed ${oldDelayed.size} work files " +
        f" in ${(t-t0).toDouble/1000.0}%.3f seconds.")
      moveFilesPool.execute(MoveFilesJob(oldDelayed, this))
    }
  }

  /** Closes all files, moves them to their final location, and destroys
    * the internal thread pool.  After this function is called,
    * attempts to invoke packStream() throw an exception. */
  final def shutdown(): Unit = {
    isShuttingDown = true
    closeAllWorkFiles(true)
    moveFilesPool.shutdown()
    moveOnePool.shutdown()
  }


  /** Moves all files that currently exist in the `workDir` to the
    * `rootDir`. */
  final def initializeWorkDir(): Unit = {
    logger.info(s"Checking for existing files in ${packConf.workDir}...")
    var files = List.empty[JPath]
    var dots = 0
    var empties = 0
    Files.walkFileTree(packConf.workDir, new SimpleFileVisitor[JPath]() {
      override def visitFile(sourcePath: JPath, attr: BasicFileAttributes)
          : FileVisitResult =
      {
        logger.trace(s"Found sourcePath '${sourcePath}'")
        if ( sourcePath.getFileName().toString()(0) == '.' ) {
          dots += 1
        } else if ( Files.size(sourcePath) == 0 ) {
          empties = empties + 1
        } else {
          files = sourcePath +: files
        }
        FileVisitResult.CONTINUE
      }
    })
    if ( files.nonEmpty ) {
      logger.info(s"Found ${files.size} existing files in ${packConf.workDir}")
      moveFilesPool.execute(MoveOldFiles(files, this))
    } else {
      logger.info(s"Found no files to move from ${packConf.workDir}")
    }
    if ( empties > 0 || dots > 0 ) {
      logger.info(s"Ignored ${dots} dot-files and ${empties} empty files" +
        s" in ${packConf.workDir}")
    }
  }


  /**
    * Copies the contents of the local work file `sourcePath` into HDFS under
    * `rootDir`.  If the file size of `sourcePath` is 0, deletes the local
    * file and does not create `targetPath`.
    *
    * When `archiveLocation` is specified and the CorePacker has an `archiveDir`,
    * the `sourcePath` is moved to "archiveDir/archiveLocation" after its
    * contents are copied into HDFS.  Typically, archiveLocation is the same
    * as the repository file without the leading Y/M/D path components.
    *
    * Note: This method is called by a seprate thread; therefore, it should
    * not make any modifications to the CorePacker or use any non-constant member
    * of the CorePacker.
    *
    * @param sourcePath The complete path of the file whose contents must be
    * copied into the repository.
    * @param archiveLocation Where to move the `sourcePath` after its contents
    * have been processed.
    */
  private def copyIntoRepo(sourcePath: JPath, archiveLocation: Option[String]):
      Unit =
  {
    val sourceLen = Files.size(sourcePath)
    if ( 0 == sourceLen ) {
      logger.trace(s"Removing zero length file ${sourcePath.toString}")
      Files.delete(sourcePath)
    } else {
      // strip workDir from sourcePath
      val relPath = packConf.workDir.relativize(sourcePath).toString

      // construct path to the target
      val targetFs = packConf.rootDirFileSystem
      val targetPath = {
        var p = new HPath(
          packConf.rootDir, s"${relPath}${packConf.compressSuffix}")
        while ( targetFs.exists(p) ) {
          p = new HPath(packConf.rootDir,
            s"${relPath}${randomUUID().toString}${packConf.compressSuffix}")
        }
        p
      }

      logger.trace(s"Moving file '${sourcePath}' to '${targetPath}'")
      val compressor =
        packConf.compressCodec.map { c => CodecPool.getCompressor(c) }
      try {
        for {
          sourceStream <- managed({
            val channel = FileChannel.open(sourcePath,
              StandardOpenOption.WRITE, StandardOpenOption.READ)
            channel.lock()
            Channels.newInputStream(channel)
          })
          targetStream <- managed({
            val stream = targetFs.create(targetPath, false)
            compressor match {
              case None => stream
              case Some(c) =>
                packConf.compressCodec.get.createOutputStream(stream, c)
            }})
        } {
          IOUtils.copyBytes(sourceStream, targetStream, conf)
        }
      } finally {
        compressor.foreach { c => CodecPool.returnCompressor(c) }
      }

      try {
        val status = targetFs.getFileStatus(targetPath)
        if ( !status.isFile() ) {
          logger.error(
            s"Destination path '${targetPath}' exists but is not a file")
          //} else if ( status.getLen() != sourceLen ) {
          //  logger.error(
          //    s"Destination file '${targetPath}' exists but has" +
          //      s" size ${status.getLen()} versus expected ${sourceLen}")
        } else {
          // finally, now that all that worked, archive and remove the source
          // file
          if ( packConf.archiveDir.nonEmpty && archiveLocation.nonEmpty ) {
            val archivePath = new HPath(
              packConf.archiveDir.get, archiveLocation.get)
            Try {
              val archiveFs = packConf.archiveDirFileSystem
              for {
                sourceStream <- managed({
                  val channel = FileChannel.open(sourcePath,
                    StandardOpenOption.WRITE, StandardOpenOption.READ)
                  channel.lock()
                  Channels.newInputStream(channel)
                })
                archiveStream <- managed({
                  archiveFs.create(archivePath, false)
                })
              } {
                IOUtils.copyBytes(sourceStream, archiveStream, conf)
              }
            } match {
              case Failure(e) =>
                logger.warn("Error writing to archive file" +
                  s" ${archivePath}: ${e.toString}")
              case _ =>
            }
          }
          Files.delete(sourcePath)
        }
      } catch {
        case ex: FileNotFoundException =>
          logger.error(s"Destination path '${targetPath}' does not exist", ex)
        case NonFatal(ex) =>
          logger.error(s"Error checking destination path '${targetPath}'", ex)
      }
    }
  }

}


/**
  * Defines a helper trait for [[CorePacker]].
  */
@silent(" JavaConverters .*deprecated")
private[mothra] object CorePacker extends StrictLogging {

  /**
    * Default compression codec to use for files written to HDFS.
    * This may be modified by specifying the following property:
    * `mothra.packer.compression`.
    *
    * Values typically supported by Hadoop include `bzip2`, `gzip`,
    * `lz4`, `lzo`, `lzop`, `snappy`, and `default`.  The empty string
    * indicates no compression.
    */
  val DEFAULT_COMPRESSION = ""

  /**
    * Default number of hours covered by each file in the repository.  The
    * valid range is 1 to 24, inclusive.  This value may be specified at
    * run-time by specifying the following property:
    * mothra.packer.hoursPerFile
    */
  val DEFAULT_HOURS_PER_FILE = 1

  /**
    * The default maximum size of the open file cache.  This is the maximum
    * number of open files maintained by the file cache for writing to files
    * in the work directory.
    *
    * The packer does not limit the number of files in the work directory;
    * this only limits the number of open files.  Once the cache reaches this
    * number of open files and the packer needs to (re-)open a file, the packer
    * closes the least-recently-used file.
    *
    * This value does not include the file handles required when reading
    * incoming files or when copying files from the work directory to the data
    * directory.
    *
    * This value may be set at run time via the `mothra.packer.fileCacheSize`
    * Java property.
    */
  val DEFAULT_FILE_CACHE_SIZE = 2000

  /**
    * The smallest permitted value for the `mothra.packer.fileCacheSize`.
    */
  val MINIMUM_FILE_CACHE_SIZE = 128

  /**
    * Default value for the size of the thread pool that closes the work files
    * and moves them from the work directory to the destination directory.
    * This value may be specified at run-time by specifying the following
    * property: mothra.packer.numMoveThreads
    */
  val DEFAULT_NUM_MOVE_THREADS = 4

  /**
    * The type of a callback object specifies functions to invoke when
    * a file is created, a file is created, or a file is closed.
    */
  private[mothra] trait FileEvent {
    /**
      * Called when a new output file is opened.
      *
      * @param out  the file that was opened
      */
    @silent(" out .* never used")
    def fileCreated(out: WorkFile): Unit = ()

    /**
      * Called when an existing file is re-opened to append records.
      *
      * @param out  the file that was re-opened
      */
    @silent(" out .* never used")
    def fileReopened(out: WorkFile): Unit = ()

    /**
      * Called after an output is closed.
      *
      * @param out  the file that was closed
      */
    @silent(" out .* never used")
    def fileClosed(out: WorkFile): Unit = ()
  }


  /**
    * Class used to move files from the workDir to the destDir.  This is in a
    * separate class so it may be handed to a thread.
    *
    * @param knownFiles Files to be moved.
    * @param packer The CorePacker instance.  Only its constant members should be
    * used by this class.
    */
  private case class MoveFilesJob(
    knownFiles: Array[WorkFile],
    packer: CorePacker)
      extends Runnable
  {
    def run(): Unit = {
      logger.info(s"Moving ${knownFiles.size} files...")
      val tStart = System.currentTimeMillis()
      // create an Iterable of Callables that each moves a single file
      val tasks = knownFiles.map {
        (workfile: WorkFile) => {
          Executors.callable(
            MoveOneFileJob(workfile, packer, false),
            true.asInstanceOf[Any])
        }
      }
      // wait for all the callables to complete
      packer.moveOnePool.invokeAll(tasks.toSeq.asJavaCollection)
      val tEnd = System.currentTimeMillis()
      logger.info(
        s"Moved ${knownFiles.size} work files" +
          f" in ${(tEnd-tStart).toDouble/1000.0}%.3f seconds.")
    }
  }


  /**
    * Class used to move one work file into the repository.  This
    * is in a separate class so it may be handed to a thread.  It is
    * constructed using snapshots of the data structure members of the
    * [[CorePacker]] class.
    *
    * @param workfile The WorkFile to move.
    * @param packer The CorePacker instance.  Only its constant members should be
    * used by this class.
    * @param writeLogMsg If true, writes a log message about moving one file.
    */
  private case class MoveOneFileJob(
    workfile: WorkFile,
    packer: CorePacker,
    writeLogMsg: Boolean = true)
      extends Runnable
  {
    def run(): Unit = {
      logger.trace(s"Moving work file '${workfile.path}'")
      // move the file
      packer.copyIntoRepo(workfile.path, Option(workfile.archivePath))
      if ( writeLogMsg ) {
        logger.info("Moved one work file.")
      }
    }
  }


  /**
    * Class used to move files that exist in the workDir at start-up into the
    * repository.  It is a separate class so it may run in a separate thread.
    *
    * @param oldFiles The old workFiles being moved
    * @param packer The CorePacker instance.  Only its constant members should be
    * used by this class.
    */
  private case class MoveOldFiles(
    oldFiles: List[JPath],
    packer: CorePacker)
      extends Runnable
  {
    def run(): Unit = {
      logger.info(s"Moving ${oldFiles.size} existing files...")
      for ( workPath <- oldFiles ) {
        val archiveLoc =
          if ( packer.packConf.archiveDir.isEmpty ) None else {
            // get workPath relative to the workDir
            val relPath = packer.packConf.workDir.relativize(workPath)
            // strip the leading y/m/d
            Some(relPath.subpath(3, relPath.getNameCount).toString)
          }
        packer.copyIntoRepo(workPath, archiveLoc)
      }
      logger.info(s"Moving ${oldFiles.size} existing files...done.")
    }
  }

}

// @LICENSE_FOOTER@
//
// Copyright 2015-2022 Carnegie Mellon University. All Rights Reserved.
//
// This material is based upon work funded and supported by the
// Department of Defense and Department of Homeland Security under
// Contract No. FA8702-15-D-0002 with Carnegie Mellon University for the
// operation of the Software Engineering Institute, a federally funded
// research and development center sponsored by the United States
// Department of Defense. The U.S. Government has license rights in this
// software pursuant to DFARS 252.227.7014.
//
// NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING
// INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON
// UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR
// IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF
// FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS
// OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT
// MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT,
// TRADEMARK, OR COPYRIGHT INFRINGEMENT.
//
// Released under a GNU GPL 2.0-style license, please see LICENSE.txt or
// contact [email protected] for full terms.
//
// [DISTRIBUTION STATEMENT A] This material has been approved for public
// release and unlimited distribution. Please see Copyright notice for
// non-US Government use and distribution.
//
// Carnegie Mellon(R) and CERT(R) are registered in the U.S. Patent and
// Trademark Office by Carnegie Mellon University.
//
// This software includes and/or makes use of third party software each
// subject to its own license as detailed in LICENSE-thirdparty.tx
//
// DM20-1143




© 2015 - 2024 Weber Informatics LLC | Privacy Policy