Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
package org.nd4j.linalg.lossfunctions.impl;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import org.apache.commons.math3.util.Pair;
import org.nd4j.linalg.activations.IActivation;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.api.ops.impl.transforms.Sign;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.lossfunctions.ILossFunction;
import org.nd4j.linalg.lossfunctions.LossUtil;
import org.nd4j.linalg.lossfunctions.serde.RowVectorDeserializer;
import org.nd4j.linalg.lossfunctions.serde.RowVectorSerializer;
import org.nd4j.shade.jackson.annotation.JsonInclude;
import org.nd4j.shade.jackson.databind.annotation.JsonDeserialize;
import org.nd4j.shade.jackson.databind.annotation.JsonSerialize;
/**
* L1 loss function: i.e., sum of absolute errors, L = sum_i abs(predicted_i - actual_i)
* See also {@link LossMAE} for a mathematically similar loss function (MAE has division by N, where N is output size)
*
* @author Susan Eraly
*/
@EqualsAndHashCode
@JsonInclude(JsonInclude.Include.NON_NULL)
@Getter
public class LossL1 implements ILossFunction {
@JsonSerialize(using = RowVectorSerializer.class)
@JsonDeserialize(using = RowVectorDeserializer.class)
protected final INDArray weights;
public LossL1() {
this(null);
}
/**
* L1 loss function where each the output is (optionally) weighted/scaled by a fixed scalar value.
* Note that the weights array must be a row vector, of length equal to the labels/output dimension 1 size.
* A weight vector of 1s should give identical results to no weight vector.
*
* @param weights Weights array (row vector). May be null.
*/
public LossL1(INDArray weights) {
if (weights != null && !weights.isRowVector()) {
throw new IllegalArgumentException("Weights array must be a row vector");
}
this.weights = weights;
}
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
INDArray scoreArr;
//INDArray output = Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform(activationFn, preOutput.dup()));
INDArray output = activationFn.getActivation(preOutput.dup(), true);
scoreArr = output.subi(labels);
Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform("abs", scoreArr));
//Weighted loss function
if (weights != null) {
if (weights.length() != output.size(1)) {
throw new IllegalStateException("Weights vector (length " + weights.length()
+ ") does not match output.size(1)=" + output.size(1));
}
scoreArr.muliRowVector(weights);
}
if (mask != null) {
LossUtil.applyMask(scoreArr, mask);
}
return scoreArr;
}
@Override
public double computeScore(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask,
boolean average) {
INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);
double score = scoreArr.sumNumber().doubleValue();
if (average)
score /= scoreArr.size(0);
return score;
}
@Override
public INDArray computeScoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);
return scoreArr.sum(1);
}
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
INDArray output = activationFn.getActivation(preOutput.dup(), true);
INDArray outSubLabels = output.sub(labels);
INDArray dLda = Nd4j.getExecutioner().execAndReturn(new Sign(outSubLabels));
if (weights != null) {
dLda.muliRowVector(weights);
}
if(mask != null && LossUtil.isPerOutputMasking(dLda, mask)){
//For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later
//but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j)
//We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be
// error prone - but buy us a tiny bit of performance
LossUtil.applyMask(dLda, mask);
}
//dL/dz
INDArray gradients = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation function param gradients
if (mask != null) {
LossUtil.applyMask(gradients, mask);
}
return gradients;
}
@Override
public org.apache.commons.math3.util.Pair computeGradientAndScore(INDArray labels,
INDArray preOutput, IActivation activationFn, INDArray mask, boolean average) {
//TODO: probably a more efficient way to do this...
return new Pair<>(computeScore(labels, preOutput, activationFn, mask, average),
computeGradient(labels, preOutput, activationFn, mask));
}
@Override
public String toString() {
if (weights == null)
return "LossL1()";
return "LossL1(weights=" + weights + ")";
}
}