All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.nd4j.linalg.api.ops.impl.layers.recurrent.LSTMLayer Maven / Gradle / Ivy

There is a newer version: 1.0.0-M2.1
Show newest version
/*******************************************************************************
 * Copyright (c) 2015-2019 Skymind, Inc.
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

package org.nd4j.linalg.api.ops.impl.layers.recurrent;

import lombok.Getter;
import lombok.NonNull;
import org.nd4j.autodiff.samediff.SDVariable;
import org.nd4j.autodiff.samediff.SameDiff;
import org.nd4j.base.Preconditions;
import org.nd4j.linalg.api.buffer.DataType;
import org.nd4j.linalg.api.ops.DynamicCustomOp;
import org.nd4j.linalg.api.ops.impl.layers.recurrent.config.LSTMConfiguration;
import org.nd4j.linalg.api.ops.impl.layers.recurrent.config.RnnDataFormat;
import org.nd4j.linalg.api.ops.impl.layers.recurrent.weights.LSTMWeights;
import org.tensorflow.framework.AttrValue;
import org.tensorflow.framework.GraphDef;
import org.tensorflow.framework.NodeDef;

import java.util.Arrays;
import java.util.List;
import java.util.Map;

/**
 * LSTM layer implemented as a single operation.
 * Implementation of operation for LSTM layer with optional peep hole connections.
* S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural Computation and https://research.google.com/pubs/archive/43905.pdf
* Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014.
* See also: https://arxiv.org/pdf/1503.04069.pdf
*

* See also {@link LSTMBlockCell} - lstmBlockCell op is used internally at C++ level for computation.
*
* Input arrays:
* 0: max sequence length; long/int64 scalar
* 1: input [seqLength, bS, inSize] at time t
* 2: previous/initial cell state [bS, numUnits]
* 3: previous/initial output [bS, numUnits]
* 4: Weights - concatenated (input-to-hidden, hidden-to-hidden weights) weights, [(inSize+numUnits), 4*numUnits]
* 5: weights - cell peephole (t-1) connections to input modulation gate, [numUnits]
* 6: weights - cell peephole (t-1) connections to forget gate, [numUnits]
* 7: weights - cell peephole (t) connections to output gate, [numUnits]
* 8: biases, shape [4*numUnits]
*
* Input integer arguments: set via {@link LSTMConfiguration}
* 0: if not zero, provide peephole connections
* 1: Data format - 0=TNS=[seqLen,mb,size]; 1=NST=[mb,size,seqLen]; 2=NTS=[mb,seqLen,size]
*
* Input float arguments: set via {@link LSTMConfiguration}
* 0: the bias added to forget gates in order to reduce the scale of forgetting in the beginning of the training
* 1: clipping value for cell state, if it is not equal to zero, then cell state is clipped
*

* Output arrays:
* 0: i - Input modulation gate activations, rank 3, shape as per dataFormat
* 1: c (cs) - Cell state (pre tanh), rank 3, shape as per dataFormat
* 2: f - Output - forget gate activations, rank 3, shape as per dataFormat
* 3: o - Output - output gate activations, rank 3, shape as per dataFormat
* 4: z (ci) - Output - block input, rank 3, shape as per dataFormat
* 5: h (co) - Cell state, post tanh, rank 3, shape as per dataFormat
* 6: y (h) - Current cell output, rank 3, shape as per dataFormat
* * @author Alex Black */ public class LSTMLayer extends DynamicCustomOp { private LSTMConfiguration configuration; @Getter private LSTMWeights weights; public LSTMLayer() { } public LSTMLayer(@NonNull SameDiff sameDiff, SDVariable maxTSLength, SDVariable x, SDVariable cLast, SDVariable yLast, LSTMWeights weights, LSTMConfiguration configuration) { super(null, sameDiff, weights.argsWithInputs(maxTSLength, x, cLast, yLast)); this.configuration = configuration; this.weights = weights; addIArgument(configuration.iArgs(true)); addTArgument(configuration.tArgs()); } @Override public List calculateOutputDataTypes(List inputDataTypes) { Preconditions.checkState(inputDataTypes != null && inputDataTypes.size() == 9, "Expected exactly 9 inputs to LSTMLayer, got %s", inputDataTypes); //7 outputs, all of same type as input. Note that input 0 is max sequence length (int64), input 1 is actual input DataType dt = inputDataTypes.get(1); Preconditions.checkState(dt.isFPType(), "Input type 1 must be a floating point type, got %s", dt); return Arrays.asList(dt, dt, dt, dt, dt, dt, dt); } @Override public List doDiff(List grads) { throw new UnsupportedOperationException("Not yet implemented"); } @Override public void initFromTensorFlow(NodeDef nodeDef, SameDiff initWith, Map attributesForNode, GraphDef graph) { configuration = LSTMConfiguration.builder() .forgetBias(attributesForNode.get("forget_bias").getF()) .clippingCellValue(attributesForNode.get("cell_clip").getF()) .peepHole(attributesForNode.get("use_peephole").getB()) .dataFormat(RnnDataFormat.TNS) //Always time major for TF BlockLSTM .build(); addIArgument(configuration.iArgs(true)); addTArgument(configuration.tArgs()); } @Override public String opName() { return "lstmBlock"; } @Override public Map propertiesForFunction() { return configuration.toProperties(true); } @Override public String tensorflowName() { return "BlockLSTM"; } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy