
neureka.backend.standard.algorithms.Activation Maven / Gradle / Ivy
package neureka.backend.standard.algorithms;
import neureka.Neureka;
import neureka.Tsr;
import neureka.backend.api.ExecutionCall;
import neureka.backend.api.algorithms.AbstractFunctionalAlgorithm;
import neureka.backend.standard.algorithms.internal.Fun;
import neureka.backend.standard.algorithms.internal.WithForward;
import neureka.backend.standard.implementations.CLImplementation;
import neureka.calculus.args.Arg;
import neureka.calculus.internal.CalcUtil;
import neureka.devices.Device;
import neureka.devices.host.CPU;
import neureka.dtype.NumericType;
import neureka.ndim.iterator.NDIterator;
/**
* This is lambda based {@link neureka.backend.api.Algorithm} implementation
* providing some basic functionality for implementing custom
* activation functions.
*/
public final class Activation extends AbstractFunctionalAlgorithm
{
public Activation() {
super("activation");
setIsSuitableFor(
call -> call.validate()
.allNotNull( t -> t.getDataType().typeClassImplements(NumericType.class) )
.basicSuitability()
);
setCanPerformBackwardADFor( call -> true );
setCanPerformForwardADFor(
call -> call
.validate()
.all( ( first, second ) -> first.shape().equals(second.shape()) )
.isValid()
);
setExecutionDispatcher( CalcUtil::defaultRecursiveExecution );
setCallPreparation(
call -> {
Tsr>[] inputs = call.getTensors();
Device device = call.getDeviceFor(Number.class);
if ( inputs[ 0 ] == null ) // Creating a new tensor:
{
int[] shape = inputs[ 1 ].getNDConf().shape();
Class
© 2015 - 2025 Weber Informatics LLC | Privacy Policy