All Downloads are FREE. Search and download functionalities are using the official Maven repository.

lib.DomainAlignmentLib.scala Maven / Gradle / Ivy

The newest version!
/*
 * Copyright (C) 2016-2017, Roberto Casadei, Mirko Viroli, and contributors.
 * See the LICENCE.txt file distributed with this work for additional
 * information regarding copyright ownership.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
*/

package lib

import it.unibo.scafi.incarnations.BasicSimulationIncarnation._
import sims.SensorDefinitions

trait DomainAlignmentLib { self: AggregateProgram with SensorDefinitions =>

  val inf = Double.PositiveInfinity
  def time = rep(0){ _ + 1 }

  ///////////////////////////////////////
  // domain-aligned updatable function //
  ///////////////////////////////////////

  /**
    * @param ver Version of the function
    * @param fun Function
    */
  case class VersionedFunction(ver: Int, fun: ()=>Double) {
    def max(o: VersionedFunction) = {
      if (ver > o.ver) this else o
    }
  }

  /**
    * Executes every function in list 'procs' which is not older than any version currently used by a neighbour.
    * @return the outcome of the function with the highest version number that is shared by all neighbours
    */
  def exec(procs: List[VersionedFunction], maxp: Int, curp: Int, nnum: Int, init: ()=>Double): (Double,Int) = aggregate{
    val d_cur = branch (minHood(nbr{curp}) <= procs.head.ver) {procs.head.fun()} {init()}
    val x: (Double,Int) = branch (procs.head.ver < maxp) {
      exec(procs.tail, maxp, curp, nnum, init)
    } { (init(),-1) }
    val d_nxt = x._1
    val ncurp:Int = x._2
    mux(ncurp<0 && nnum == foldhood(0)(_+_){1}) { (d_cur, procs.head.ver) } { x }
  }

  /**
    * procs: list of functions ever executed on the network
    * maxp: the max version number in procs
    * curp: the version number of the function used in previous round
    * field: the output field
    *
    * @param metric The "injecter" of the functions.
    * @param init Initial version of the function.
    * @return
    */
  def safeup(metric: ()=>VersionedFunction, init: ()=>Double) = aggregate{
    (rep ((List[VersionedFunction](), -1, -1, nbr{0.0})) {
      case ((procs:List[VersionedFunction], maxp:Int, curp:Int, field:Double)) => aggregate {
        val y: (Int,List[VersionedFunction]) = foldhood((maxp,procs))( (a,b) => if(a._1>=b._1) a else b){ nbr{(maxp, procs)} }
        val z: (Int,List[VersionedFunction]) = mux(metric().ver > maxp){ (metric().ver, metric() :: procs) }{ y }
        val nmaxp = z._1
        val nprocs = z._2
        val x = exec(nprocs, nmaxp, curp, foldhood(0)(_+_){1}, init)
        (nprocs, nmaxp, x._2, x._1)
      }
    })._4
  }

  ////////////////////////////////
  // trivial updatable function //
  ////////////////////////////////

  def up(f: ()=>VersionedFunction) = aggregate{
    (rep(f()) {
      (x) => aggregate{foldhood(f())(_.max(_)){nbr{x}}}
    } ).fun
  }

  ////////////////////////////////////
  // trivial and adaptive gradients //
  ////////////////////////////////////

  case class RaisingDist(dist: Double, raising: Boolean) {
    def +(delta: Double) = {
      RaisingDist(dist+delta, raising)
    }
    def min(o: RaisingDist) = {
      if (raising == o.raising) {
        if (dist < o.dist) this else o
      } else {
        if (raising) o else this
      }
    }
  }

  def crfgradient(source: Boolean, metric: ()=>Double) = aggregate{
    (rep (RaisingDist(inf,false)) {
      (d) => aggregate{
        val x = foldhood(RaisingDist(inf,false))(_.min(_)){nbr{d} + nbrRange()}
        RaisingDist(x.dist, x.dist > d.dist || x.raising)
      }
    }).dist
  }

  def gradient(source: Boolean, metric: ()=>Double) = aggregate{
    rep (inf) {
      (dist) => mux(source){ 0.0 } { foldhood(inf)(Math.min(_,_))(metric()+nbr{dist}) }
    }
  }

  ////////////////////////////
  // metrics to be injected //
  ////////////////////////////

  def badmetric(): Double = aggregate{
    nbrRange() * (4.5+Math.random()) * 0.2
  }

  def smartmetric(): Double = aggregate{
    nbrRange() * foldhood(0)(_+_){1}
  }

  def injmetric(): VersionedFunction = aggregate{
    if (mid() == 1 && time < 50){ /*print('a');*/ VersionedFunction(2, nbrRange) }
    else if (mid() == 2 && time > 50 && time < 100) { /*print('z');*/ VersionedFunction(3, smartmetric) }
    else VersionedFunction(1, badmetric)
  }

  ////////////////////
  // selected tests //
  ////////////////////

  def testGup() = {
    gradient(mid() == 0, () => aggregate{up(()=>injmetric)()})
  }

  def testGsafe() = {
    gradient(mid() == 0, () => aggregate{safeup(()=>injmetric,()=>badmetric)})
  }

  def testCRFup() = {
    crfgradient(mid() == 0, () => aggregate{up(()=>injmetric)()})
  }

  def testCRFsafe() = {
    crfgradient(mid() == 0, () => aggregate{safeup(()=>injmetric,()=>badmetric)})
  }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy