All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.deeplearning4j.nn.params.ElementWiseParamInitializer Maven / Gradle / Ivy

/*
 *  ******************************************************************************
 *  *
 *  *
 *  * This program and the accompanying materials are made available under the
 *  * terms of the Apache License, Version 2.0 which is available at
 *  * https://www.apache.org/licenses/LICENSE-2.0.
 *  *
 *  *  See the NOTICE file distributed with this work for additional
 *  *  information regarding copyright ownership.
 *  * Unless required by applicable law or agreed to in writing, software
 *  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 *  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 *  * License for the specific language governing permissions and limitations
 *  * under the License.
 *  *
 *  * SPDX-License-Identifier: Apache-2.0
 *  *****************************************************************************
 */

package org.deeplearning4j.nn.params;

import lombok.val;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.layers.FeedForwardLayer;
import org.deeplearning4j.nn.conf.layers.Layer;
import org.deeplearning4j.nn.weights.IWeightInit;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.indexing.NDArrayIndex;

import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;

public class ElementWiseParamInitializer extends DefaultParamInitializer{

    private static final ElementWiseParamInitializer INSTANCE = new ElementWiseParamInitializer();

    public static ElementWiseParamInitializer getInstance() {
        return INSTANCE;
    }

    @Override
    public long numParams(Layer layer) {
        FeedForwardLayer layerConf = (FeedForwardLayer) layer;
        val nIn = layerConf.getNIn();
        return nIn*2; //weights + bias
    }

    /**
     * Initialize the parameters
     *
     * @param conf             the configuration
     * @param paramsView       a view of the full network (backprop) parameters
     * @param initializeParams if true: initialize the parameters according to the configuration. If false: don't modify the
     *                         values in the paramsView array (but do select out the appropriate subset, reshape etc as required)
     * @return Map of parameters keyed by type (view of the 'paramsView' array)
     */
    @Override
    public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) {
        if (!(conf.getLayer() instanceof FeedForwardLayer))
            throw new IllegalArgumentException("unsupported layer type: " + conf.getLayer().getClass().getName());

        Map params = Collections.synchronizedMap(new LinkedHashMap());

        val length = numParams(conf);
        if (paramsView.length() != length)
            throw new IllegalStateException(
                    "Expected params view of length " + length + ", got length " + paramsView.length());

        FeedForwardLayer layerConf =
                (FeedForwardLayer) conf.getLayer();
        val nIn = layerConf.getNIn();

        INDArray paramsViewReshape = paramsView.reshape(paramsView.length());
        val nWeightParams = nIn ;
        INDArray weightView = paramsViewReshape.get(NDArrayIndex.interval(0, nWeightParams));
        INDArray biasView = paramsViewReshape.get(
                NDArrayIndex.interval(nWeightParams, nWeightParams + nIn));


        params.put(WEIGHT_KEY, createWeightMatrix(conf, weightView, initializeParams));
        params.put(BIAS_KEY, createBias(conf, biasView, initializeParams));
        conf.addVariable(WEIGHT_KEY);
        conf.addVariable(BIAS_KEY);

        return params;
    }

    /**
     * Return a map of gradients (in their standard non-flattened representation), taken from the flattened (row vector) gradientView array.
     * The idea is that operates in exactly the same way as the paramsView does in
     * thus the position in the view (and, the array orders) must match those of the parameters
     *
     * @param conf         Configuration
     * @param gradientView The flattened gradients array, as a view of the larger array
     * @return A map containing an array by parameter type, that is a view of the full network gradients array
     */
    @Override
    public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) {
        FeedForwardLayer layerConf =
                (FeedForwardLayer) conf.getLayer();
        val nIn = layerConf.getNIn();
        val nOut = layerConf.getNOut();
        val nWeightParams = nIn ;

        INDArray gradientViewReshape = gradientView.reshape(gradientView.length());
        INDArray weightGradientView = gradientViewReshape.get( NDArrayIndex.interval(0, nWeightParams));
        INDArray biasView = gradientViewReshape.get(
                NDArrayIndex.interval(nWeightParams, nWeightParams + nOut)); //Already a row vector

        Map out = new LinkedHashMap<>();
        out.put(WEIGHT_KEY, weightGradientView);
        out.put(BIAS_KEY, biasView);

        return out;
    }

    @Override
    protected INDArray createWeightMatrix(long nIn, long nOut, IWeightInit weightInit,
                                          INDArray weightParamView, boolean initializeParameters) {
        val shape = new long[] {1,nIn};

        if (initializeParameters) {
            INDArray ret = weightInit.init(nIn, //Fan in
                    nOut, //Fan out
                    shape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, weightParamView);
            return ret;
        } else {
            return weightParamView;
        }
    }


}





© 2015 - 2024 Weber Informatics LLC | Privacy Policy