org.deeplearning4j.nn.layers.ActivationLayer Maven / Gradle / Ivy
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
package org.deeplearning4j.nn.layers;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.gradient.DefaultGradient;
import org.deeplearning4j.nn.gradient.Gradient;
import org.nd4j.linalg.api.buffer.DataType;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.common.primitives.Pair;
import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr;
import org.deeplearning4j.nn.workspace.ArrayType;
public class ActivationLayer extends AbstractLayer {
public ActivationLayer(NeuralNetConfiguration conf, DataType dataType) {
super(conf, dataType);
}
@Override
public double calcRegularizationScore(boolean backpropParamsOnly){
return 0;
}
@Override
public Type type() {
return Type.FEED_FORWARD;
}
@Override
public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
assertInputSet(true);
INDArray temp = workspaceMgr.dup(ArrayType.ACTIVATION_GRAD, input, input.ordering());
INDArray delta = layerConf().getActivationFn().backprop(temp, epsilon).getFirst(); //TODO handle activation function params
if(delta == epsilon ){
//Edge case: identity activation + external errors -> no-op
delta = workspaceMgr.dup(ArrayType.ACTIVATION_GRAD, delta);
}
delta = workspaceMgr.leverageTo(ArrayType.ACTIVATION_GRAD, delta); //Usually a no-op (except for perhaps identity)
Gradient ret = new DefaultGradient();
return new Pair<>(ret, delta);
}
@Override
public INDArray activate(boolean training, LayerWorkspaceMgr mgr) {
assertInputSet(false);
INDArray in;
if (training) {
//dup required: need to keep original input for backprop
in = mgr.dup(ArrayType.ACTIVATIONS, input, input.ordering());
} else {
in = mgr.leverageTo(ArrayType.ACTIVATIONS, input);
}
return layerConf().getActivationFn().getActivation(in, training);
}
@Override
public boolean isPretrainLayer() {
return false;
}
@Override
public void clearNoiseWeightParams() {
//No op
}
@Override
public INDArray params() {
return null;
}
}