All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.nd4j.nativeblas.Nd4jCpu Maven / Gradle / Ivy

There is a newer version: 1.0.0-M2.1
Show newest version
// Targeted by JavaCPP version 1.5.5: DO NOT EDIT THIS FILE

package org.nd4j.nativeblas;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

public class Nd4jCpu extends org.nd4j.nativeblas.Nd4jCpuHelper {
    static { Loader.load(); }

@Name("std::vector >") public static class IntVectorVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public IntVectorVector(Pointer p) { super(p); }
    public IntVectorVector(int[] ... array) { this(array.length); put(array); }
    public IntVectorVector()       { allocate();  }
    public IntVectorVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator =") @ByRef IntVectorVector put(@ByRef IntVectorVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);
    public boolean empty(@Cast("size_t") long i) { return size(i) == 0; }
    public native @Index(function = "at") long size(@Cast("size_t") long i);
    public void clear(@Cast("size_t") long i) { resize(i, 0); }
    public native @Index(function = "at") void resize(@Cast("size_t") long i, @Cast("size_t") long n);

    @Index(function = "at") public native int get(@Cast("size_t") long i, @Cast("size_t") long j);
    public native IntVectorVector put(@Cast("size_t") long i, @Cast("size_t") long j, int value);

    public int[][] get() {
        int[][] array = new int[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE][];
        for (int i = 0; i < array.length; i++) {
            array[i] = new int[size(i) < Integer.MAX_VALUE ? (int)size(i) : Integer.MAX_VALUE];
            for (int j = 0; j < array[i].length; j++) {
                array[i][j] = get(i, j);
            }
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.deepToString(get());
    }

    public IntVectorVector put(int[] ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            if (size(i) != array[i].length) { resize(i, array[i].length); }
            for (int j = 0; j < array[i].length; j++) {
                put(i, j, array[i][j]);
            }
        }
        return this;
    }
}

@Name("std::vector >") public static class LongVectorVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public LongVectorVector(Pointer p) { super(p); }
    public LongVectorVector(long[] ... array) { this(array.length); put(array); }
    public LongVectorVector()       { allocate();  }
    public LongVectorVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator =") @ByRef LongVectorVector put(@ByRef LongVectorVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);
    public boolean empty(@Cast("size_t") long i) { return size(i) == 0; }
    public native @Index(function = "at") long size(@Cast("size_t") long i);
    public void clear(@Cast("size_t") long i) { resize(i, 0); }
    public native @Index(function = "at") void resize(@Cast("size_t") long i, @Cast("size_t") long n);

    @Index(function = "at") public native @Cast("Nd4jLong") long get(@Cast("size_t") long i, @Cast("size_t") long j);
    public native LongVectorVector put(@Cast("size_t") long i, @Cast("size_t") long j, long value);

    public long[][] get() {
        long[][] array = new long[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE][];
        for (int i = 0; i < array.length; i++) {
            array[i] = new long[size(i) < Integer.MAX_VALUE ? (int)size(i) : Integer.MAX_VALUE];
            for (int j = 0; j < array[i].length; j++) {
                array[i][j] = get(i, j);
            }
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.deepToString(get());
    }

    public LongVectorVector put(long[] ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            if (size(i) != array[i].length) { resize(i, array[i].length); }
            for (int j = 0; j < array[i].length; j++) {
                put(i, j, array[i][j]);
            }
        }
        return this;
    }
}

@Name("std::vector") public static class ConstNDArrayVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ConstNDArrayVector(Pointer p) { super(p); }
    public ConstNDArrayVector(NDArray value) { this(1); put(0, value); }
    public ConstNDArrayVector(NDArray ... array) { this(array.length); put(array); }
    public ConstNDArrayVector()       { allocate();  }
    public ConstNDArrayVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator =") @ByRef ConstNDArrayVector put(@ByRef ConstNDArrayVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @Const NDArray get(@Cast("size_t") long i);
    public native ConstNDArrayVector put(@Cast("size_t") long i, NDArray value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @Const NDArray value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator ++") @ByRef Iterator increment();
        public native @Name("operator ==") boolean equals(@ByRef Iterator it);
        public native @Name("operator *") @Const NDArray get();
    }

    public NDArray[] get() {
        NDArray[] array = new NDArray[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public NDArray pop_back() {
        long size = size();
        NDArray value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public ConstNDArrayVector push_back(NDArray value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public ConstNDArrayVector put(NDArray value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public ConstNDArrayVector put(NDArray ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class NDArrayVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public NDArrayVector(Pointer p) { super(p); }
    public NDArrayVector(NDArray value) { this(1); put(0, value); }
    public NDArrayVector(NDArray ... array) { this(array.length); put(array); }
    public NDArrayVector()       { allocate();  }
    public NDArrayVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator =") @ByRef NDArrayVector put(@ByRef NDArrayVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native NDArray get(@Cast("size_t") long i);
    public native NDArrayVector put(@Cast("size_t") long i, NDArray value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, NDArray value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator ++") @ByRef Iterator increment();
        public native @Name("operator ==") boolean equals(@ByRef Iterator it);
        public native @Name("operator *") @Const NDArray get();
    }

    public NDArray[] get() {
        NDArray[] array = new NDArray[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public NDArray pop_back() {
        long size = size();
        NDArray value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public NDArrayVector push_back(NDArray value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public NDArrayVector put(NDArray value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public NDArrayVector put(NDArray ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@NoOffset @Name("std::pair") public static class IntIntPair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public IntIntPair(Pointer p) { super(p); }
    public IntIntPair(int firstValue, int secondValue) { this(); put(firstValue, secondValue); }
    public IntIntPair()       { allocate();  }
    private native void allocate();
    public native @Name("operator =") @ByRef IntIntPair put(@ByRef IntIntPair x);


    @MemberGetter public native int first(); public native IntIntPair first(int first);
    @MemberGetter public native int second();  public native IntIntPair second(int second);

    public IntIntPair put(int firstValue, int secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

// Parsed from memory/MemoryType.h

//
// Created by raver119 on 07.05.19.
//

// #ifndef DEV_TESTS_MEMORYTYPE_H
// #define DEV_TESTS_MEMORYTYPE_H
        /** enum sd::memory::MemoryType */
        public static final int
            HOST = 0,
            DEVICE = 10;
    


// #endif //DEV_TESTS_MEMORYTYPE_H


// Parsed from array/DataType.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef ND4J_DATATYPE_H
// #define ND4J_DATATYPE_H
    /** enum sd::DataType */
    public static final int
        INHERIT = 0,
        BOOL = 1,
        FLOAT8 = 2,
        HALF = 3,
        HALF2 = 4,
        FLOAT32 = 5,
        DOUBLE = 6,
        INT8 = 7,
        INT16 = 8,
        INT32 = 9,
        INT64 = 10,
        UINT8 = 11,
        UINT16 = 12,
        UINT32 = 13,
        UINT64 = 14,
        QINT8 = 15,
        QINT16 = 16,
        BFLOAT16 = 17,
        UTF8 = 50,
        UTF16 = 51,
        UTF32 = 52,
        ANY = 100,
        AUTO = 200;


// #endif

// Parsed from array/DataBuffer.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//

// #ifndef DEV_TESTS_DATABUFFER_H
// #define DEV_TESTS_DATABUFFER_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

@Namespace("sd") @NoOffset public static class DataBuffer extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public DataBuffer(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public DataBuffer(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public DataBuffer position(long position) {
        return (DataBuffer)super.position(position);
    }
    @Override public DataBuffer getPointer(long i) {
        return new DataBuffer((Pointer)this).position(position + i);
    }


        public DataBuffer(Pointer primary, Pointer special,
                                       @Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType,
                                       @Cast("const bool") boolean isOwnerPrimary/*=false*/, @Cast("const bool") boolean isOwnerSpecial/*=false*/,
                                       Workspace workspace/*=nullptr*/) { super((Pointer)null); allocate(primary, special, lenInBytes, dataType, isOwnerPrimary, isOwnerSpecial, workspace); }
        private native void allocate(Pointer primary, Pointer special,
                                       @Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType,
                                       @Cast("const bool") boolean isOwnerPrimary/*=false*/, @Cast("const bool") boolean isOwnerSpecial/*=false*/,
                                       Workspace workspace/*=nullptr*/);
        public DataBuffer(Pointer primary, Pointer special,
                                       @Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType) { super((Pointer)null); allocate(primary, special, lenInBytes, dataType); }
        private native void allocate(Pointer primary, Pointer special,
                                       @Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType);

        public DataBuffer(Pointer primary,
                                       @Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType,
                                       @Cast("const bool") boolean isOwnerPrimary/*=false*/,
                                       Workspace workspace/*=nullptr*/) { super((Pointer)null); allocate(primary, lenInBytes, dataType, isOwnerPrimary, workspace); }
        private native void allocate(Pointer primary,
                                       @Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType,
                                       @Cast("const bool") boolean isOwnerPrimary/*=false*/,
                                       Workspace workspace/*=nullptr*/);
        public DataBuffer(Pointer primary,
                                       @Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType) { super((Pointer)null); allocate(primary, lenInBytes, dataType); }
        private native void allocate(Pointer primary,
                                       @Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType);

        public DataBuffer(@Const Pointer hostBuffer,
                                       @Cast("const sd::DataType") int dataType, @Cast("const size_t") long lenInBytes,
                                       Workspace workspace/*=nullptr*/) { super((Pointer)null); allocate(hostBuffer, dataType, lenInBytes, workspace); }
        private native void allocate(@Const Pointer hostBuffer,
                                       @Cast("const sd::DataType") int dataType, @Cast("const size_t") long lenInBytes,
                                       Workspace workspace/*=nullptr*/);
        public DataBuffer(@Const Pointer hostBuffer,
                                       @Cast("const sd::DataType") int dataType, @Cast("const size_t") long lenInBytes) { super((Pointer)null); allocate(hostBuffer, dataType, lenInBytes); }
        private native void allocate(@Const Pointer hostBuffer,
                                       @Cast("const sd::DataType") int dataType, @Cast("const size_t") long lenInBytes);

        public DataBuffer(@Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType, Workspace workspace/*=nullptr*/, @Cast("const bool") boolean allocBoth/*=false*/) { super((Pointer)null); allocate(lenInBytes, dataType, workspace, allocBoth); }
        private native void allocate(@Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType, Workspace workspace/*=nullptr*/, @Cast("const bool") boolean allocBoth/*=false*/);
        public DataBuffer(@Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType) { super((Pointer)null); allocate(lenInBytes, dataType); }
        private native void allocate(@Cast("const size_t") long lenInBytes, @Cast("const sd::DataType") int dataType);

        public DataBuffer(@Const @ByRef DataBuffer other) { super((Pointer)null); allocate(other); }
        private native void allocate(@Const @ByRef DataBuffer other);
        public DataBuffer() { super((Pointer)null); allocate(); }
        private native void allocate();

        public native @ByRef @Name("operator =") DataBuffer put(@Const @ByRef DataBuffer other);

        public native @Cast("sd::DataType") int getDataType();
        public native void setDataType(@Cast("sd::DataType") int dataType);
        public native @Cast("size_t") long getLenInBytes();

        public native Pointer primary();
        public native Pointer special();

        public native void allocatePrimary();
        public native void allocateSpecial();

        public native void writePrimary();
        public native void writeSpecial();
        public native void readPrimary();
        public native void readSpecial();
        public native @Cast("bool") boolean isPrimaryActual();
        public native @Cast("bool") boolean isSpecialActual();

        public native void expand(@Cast("const uint64_t") long size);

        public native int deviceId();
        public native void setDeviceId(int deviceId);
        public native void migrate();

        public native void syncToPrimary(@Const LaunchContext context, @Cast("const bool") boolean forceSync/*=false*/);
        public native void syncToPrimary(@Const LaunchContext context);
        public native void syncToSpecial(@Cast("const bool") boolean forceSync/*=false*/);
        public native void syncToSpecial();

        public native void setToZeroBuffers(@Cast("const bool") boolean both/*=false*/);
        public native void setToZeroBuffers();

        public native void copyBufferFrom(@Const @ByRef DataBuffer other, @Cast("size_t") long sizeToCopyinBytes/*=0*/, @Cast("const Nd4jLong") long offsetThis/*=0*/, @Cast("const Nd4jLong") long offsetOther/*=0*/);
        public native void copyBufferFrom(@Const @ByRef DataBuffer other);

        public static native void memcpy(@Const @ByRef DataBuffer dst, @Const @ByRef DataBuffer src);

        public native void setPrimaryBuffer(Pointer buffer, @Cast("size_t") long length);
        public native void setSpecialBuffer(Pointer buffer, @Cast("size_t") long length);

        /**
         * This method deletes buffers, if we're owners
         */
        public native @Name("close") void _close();
}
///// IMLEMENTATION OF INLINE METHODS /////

////////////////////////////////////////////////////////////////////////
    

////////////////////////////////////////////////////////////////////////
    




// #endif //DEV_TESTS_DATABUFFER_H


// Parsed from array/PointerDeallocator.h

/*
 *  ******************************************************************************
 *  *
 *  *
 *  * This program and the accompanying materials are made available under the
 *  * terms of the Apache License, Version 2.0 which is available at
 *  * https://www.apache.org/licenses/LICENSE-2.0.
 *  *
 *  * See the NOTICE file distributed with this work for additional
 *  * information regarding copyright ownership.
 *  * Unless required by applicable law or agreed to in writing, software
 *  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 *  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 *  * License for the specific language governing permissions and limitations
 *  * under the License.
 *  *
 *  * SPDX-License-Identifier: Apache-2.0
 *  *****************************************************************************
 */

//
//  @author [email protected]
//

// #ifndef SD_POINTERDEALLOCATOR_H_
// #define SD_POINTERDEALLOCATOR_H_

// #include 
// #include 



// #endif //SD_POINTERDEALLOCATOR_H_


// Parsed from array/PointerWrapper.h

/*
 *  ******************************************************************************
 *  *
 *  *
 *  * This program and the accompanying materials are made available under the
 *  * terms of the Apache License, Version 2.0 which is available at
 *  * https://www.apache.org/licenses/LICENSE-2.0.
 *  *
 *  * See the NOTICE file distributed with this work for additional
 *  * information regarding copyright ownership.
 *  * Unless required by applicable law or agreed to in writing, software
 *  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 *  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 *  * License for the specific language governing permissions and limitations
 *  * under the License.
 *  *
 *  * SPDX-License-Identifier: Apache-2.0
 *  *****************************************************************************
 */

//
//  @author [email protected]
//

// #ifndef SD_ARRAY_POINTER_H_
// #define SD_ARRAY_POINTER_H_

// #include 
// #include 
// #include 
// #include 
 // namespace sd

// #endif //SD_ARRAY_POINTER_H_


// Parsed from array/ConstantDataBuffer.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]

// #ifndef LIBND4J_CONSTANTDATABUFFER_H
// #define LIBND4J_CONSTANTDATABUFFER_H

// #include 
// #include 
// #include 
// #include 
// #include 
    @Namespace("sd") @NoOffset public static class ConstantDataBuffer extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public ConstantDataBuffer(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public ConstantDataBuffer(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public ConstantDataBuffer position(long position) {
            return (ConstantDataBuffer)super.position(position);
        }
        @Override public ConstantDataBuffer getPointer(long i) {
            return new ConstantDataBuffer((Pointer)this).position(position + i);
        }
    
        public ConstantDataBuffer(@Const @ByRef ConstantDataBuffer other) { super((Pointer)null); allocate(other); }
        private native void allocate(@Const @ByRef ConstantDataBuffer other);
        public ConstantDataBuffer() { super((Pointer)null); allocate(); }
        private native void allocate();

        public native @Cast("uint8_t") byte sizeOf();
        public native @Cast("uint64_t") long length();

        public native Pointer primary();
        public native Pointer special();

        public native @ByRef @Name("operator =") ConstantDataBuffer put(@Const @ByRef ConstantDataBuffer other);
    }


// #endif //DEV_TESTS_CONSTANTDATABUFFER_H


// Parsed from array/ConstantShapeBuffer.h

/*
 *  ******************************************************************************
 *  *
 *  *
 *  * This program and the accompanying materials are made available under the
 *  * terms of the Apache License, Version 2.0 which is available at
 *  * https://www.apache.org/licenses/LICENSE-2.0.
 *  *
 *  * See the NOTICE file distributed with this work for additional
 *  * information regarding copyright ownership.
 *  * Unless required by applicable law or agreed to in writing, software
 *  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 *  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 *  * License for the specific language governing permissions and limitations
 *  * under the License.
 *  *
 *  * SPDX-License-Identifier: Apache-2.0
 *  *****************************************************************************
 */

//
// @author [email protected]
//

// #ifndef SD_ARRAY_CONSTANTSHAPEBUFFER_H_
// #define SD_ARRAY_CONSTANTSHAPEBUFFER_H_

// #include 
// #include 
// #include 
// #include 

@Namespace("sd") public static class ConstantShapeBuffer extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ConstantShapeBuffer(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public ConstantShapeBuffer(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public ConstantShapeBuffer position(long position) {
        return (ConstantShapeBuffer)super.position(position);
    }
    @Override public ConstantShapeBuffer getPointer(long i) {
        return new ConstantShapeBuffer((Pointer)this).position(position + i);
    }

  public ConstantShapeBuffer() { super((Pointer)null); allocate(); }
  private native void allocate();

  public native @Cast("const Nd4jLong*") LongPointer primary();
  public native @Cast("const Nd4jLong*") LongPointer special();
  public native @Cast("const Nd4jLong*") LongPointer platform();
}

 // namespace sd

// #endif //SD_ARRAY_CONSTANTSHAPEBUFFER_H_


// Parsed from array/ConstantOffsetsBuffer.h

/*
 *  ******************************************************************************
 *  *
 *  *
 *  * This program and the accompanying materials are made available under the
 *  * terms of the Apache License, Version 2.0 which is available at
 *  * https://www.apache.org/licenses/LICENSE-2.0.
 *  *
 *  * See the NOTICE file distributed with this work for additional
 *  * information regarding copyright ownership.
 *  * Unless required by applicable law or agreed to in writing, software
 *  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 *  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 *  * License for the specific language governing permissions and limitations
 *  * under the License.
 *  *
 *  * SPDX-License-Identifier: Apache-2.0
 *  *****************************************************************************
 */

//
// @author [email protected]
//

// #ifndef SD_ARRAY_CONSTANTOFFSETSBUFFER_H_
// #define SD_ARRAY_CONSTANTOFFSETSBUFFER_H_

// #include 
// #include 
// #include 
// #include 

@Namespace("sd") public static class ConstantOffsetsBuffer extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ConstantOffsetsBuffer(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public ConstantOffsetsBuffer(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public ConstantOffsetsBuffer position(long position) {
        return (ConstantOffsetsBuffer)super.position(position);
    }
    @Override public ConstantOffsetsBuffer getPointer(long i) {
        return new ConstantOffsetsBuffer((Pointer)this).position(position + i);
    }

  public ConstantOffsetsBuffer() { super((Pointer)null); allocate(); }
  private native void allocate();

  public native @Cast("const Nd4jLong*") LongPointer primary();
  public native @Cast("const Nd4jLong*") LongPointer special();
  public native @Cast("const Nd4jLong*") LongPointer platform();
}

 // namespace sd

// #endif //SD_ARRAY_CONSTANTOFFSETSBUFFER_H_


// Parsed from array/ConstantDescriptor.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef DEV_TESTS_CONSTANTDESCRIPTOR_H
// #define DEV_TESTS_CONSTANTDESCRIPTOR_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
    @Namespace("sd") @NoOffset public static class ConstantDescriptor extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public ConstantDescriptor(Pointer p) { super(p); }
    
        public ConstantDescriptor(DoublePointer values, int length) { super((Pointer)null); allocate(values, length); }
        private native void allocate(DoublePointer values, int length);
        public ConstantDescriptor(DoubleBuffer values, int length) { super((Pointer)null); allocate(values, length); }
        private native void allocate(DoubleBuffer values, int length);
        public ConstantDescriptor(double[] values, int length) { super((Pointer)null); allocate(values, length); }
        private native void allocate(double[] values, int length);
        public ConstantDescriptor(@Cast("const Nd4jLong*") LongPointer values, int length) { super((Pointer)null); allocate(values, length); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer values, int length);
        public ConstantDescriptor(@Cast("const Nd4jLong*") LongBuffer values, int length) { super((Pointer)null); allocate(values, length); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer values, int length);
        public ConstantDescriptor(@Cast("const Nd4jLong*") long[] values, int length) { super((Pointer)null); allocate(values, length); }
        private native void allocate(@Cast("const Nd4jLong*") long[] values, int length);

        public ConstantDescriptor(@Cast("Nd4jLong*") @StdVector LongPointer values) { super((Pointer)null); allocate(values); }
        private native void allocate(@Cast("Nd4jLong*") @StdVector LongPointer values);
        public ConstantDescriptor(@Cast("Nd4jLong*") @StdVector LongBuffer values) { super((Pointer)null); allocate(values); }
        private native void allocate(@Cast("Nd4jLong*") @StdVector LongBuffer values);
        public ConstantDescriptor(@Cast("Nd4jLong*") @StdVector long[] values) { super((Pointer)null); allocate(values); }
        private native void allocate(@Cast("Nd4jLong*") @StdVector long[] values);
        public ConstantDescriptor(@StdVector DoublePointer values) { super((Pointer)null); allocate(values); }
        private native void allocate(@StdVector DoublePointer values);
        public ConstantDescriptor(@StdVector DoubleBuffer values) { super((Pointer)null); allocate(values); }
        private native void allocate(@StdVector DoubleBuffer values);
        public ConstantDescriptor(@StdVector double[] values) { super((Pointer)null); allocate(values); }
        private native void allocate(@StdVector double[] values);

        // equal to operator
        public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ConstantDescriptor other);

        // less than operator
        public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef ConstantDescriptor other);

        public native @Cast("bool") boolean isInteger();
        public native @Cast("bool") boolean isFloat();

        public native @Cast("Nd4jLong") long length();

        public native @Cast("Nd4jLong*") @StdVector LongPointer integerValues();
        public native @StdVector DoublePointer floatValues();
    }


// #ifndef __JAVACPP_HACK__

// #endif


// #endif //DEV_TESTS_CONSTANTDESCRIPTOR_H


// Parsed from array/TadPack.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef DEV_TESTS_TADPACK_H
// #define DEV_TESTS_TADPACK_H

// #include 
// #include 
    @Namespace("sd") @NoOffset public static class TadPack extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public TadPack(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public TadPack(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public TadPack position(long position) {
            return (TadPack)super.position(position);
        }
        @Override public TadPack getPointer(long i) {
            return new TadPack((Pointer)this).position(position + i);
        }
    
        public TadPack(@Const @ByRef ConstantShapeBuffer shapes, @Const @ByRef ConstantOffsetsBuffer offets, @Cast("Nd4jLong") long numTads) { super((Pointer)null); allocate(shapes, offets, numTads); }
        private native void allocate(@Const @ByRef ConstantShapeBuffer shapes, @Const @ByRef ConstantOffsetsBuffer offets, @Cast("Nd4jLong") long numTads);
        public TadPack() { super((Pointer)null); allocate(); }
        private native void allocate();

        public native @Cast("const Nd4jLong*") LongPointer primaryShapeInfo();
        public native @Cast("const Nd4jLong*") LongPointer primaryOffsets();

        public native @Cast("const Nd4jLong*") LongPointer specialShapeInfo();
        public native @Cast("const Nd4jLong*") LongPointer specialOffsets();

        public native @Cast("Nd4jLong") long numberOfTads();
        public native int shapeInfoLength();

        /**
         * These methods return either primary or special pointers depending on platform binaries were compiled for
         * @return
         */
        public native @Cast("const Nd4jLong*") LongPointer platformShapeInfo();
        public native @Cast("const Nd4jLong*") LongPointer platformOffsets();
    }



// #endif //DEV_TESTS_TADPACK_H


// Parsed from execution/ErrorReference.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef DEV_TESTS_ERRORREFERENCE_H
// #define DEV_TESTS_ERRORREFERENCE_H

// #include 
// #include 
    @Namespace("sd") @NoOffset public static class ErrorReference extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public ErrorReference(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public ErrorReference(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public ErrorReference position(long position) {
            return (ErrorReference)super.position(position);
        }
        @Override public ErrorReference getPointer(long i) {
            return new ErrorReference((Pointer)this).position(position + i);
        }
    
        public ErrorReference() { super((Pointer)null); allocate(); }
        private native void allocate();

        public native int errorCode();
        public native @Cast("char*") String errorMessage();

        public native void setErrorCode(int errorCode);
        public native void setErrorMessage(@StdString BytePointer message);
        public native void setErrorMessage(@StdString String message);
    }



// #endif //DEV_TESTS_ERRORREFERENCE_H


// Parsed from execution/Engine.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef SD_ENGINE_H
// #define SD_ENGINE_H
    /** enum samediff::Engine */
    public static final int
        ENGINE_CPU = 0,
        ENGINE_CUDA = 1;


// #endif //SD_ENGINE_H


// Parsed from execution/ExecutionMode.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/


//
// @author [email protected]
//

// #ifndef SD_EXECUTIONMODE_H
// #define SD_EXECUTIONMODE_H
    /** enum samediff::ExecutionMode */
    public static final int
        MODE_UNDEFINED = 0,
        MODE_TRAINING = 1,
        MODE_INFERENCE = 2;


// #endif //SD_EXECUTIONMODE_H


// Parsed from system/Environment.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 06.10.2017.
//

// #ifndef LIBND4J_ENVIRONMENT_H
// #define LIBND4J_ENVIRONMENT_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
    @Namespace("sd") @NoOffset public static class Environment extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public Environment(Pointer p) { super(p); }
    
        /**
         * These 3 fields are mostly for CUDA/cuBLAS version tracking
         */
        public native int _blasMajorVersion(); public native Environment _blasMajorVersion(int setter);
        public native int _blasMinorVersion(); public native Environment _blasMinorVersion(int setter);
        public native int _blasPatchVersion(); public native Environment _blasPatchVersion(int setter);

        public static native @ByRef Environment getInstance();

        public native @Cast("bool") boolean isVerbose();
        public native void setVerbose(@Cast("bool") boolean reallyVerbose);
        public native @Cast("bool") boolean isDebug();
        public native @Cast("bool") boolean isProfiling();
        public native @Cast("bool") boolean isDetectingLeaks();
        public native @Cast("bool") boolean isDebugAndVerbose();
        public native void setDebug(@Cast("bool") boolean reallyDebug);
        public native void setProfiling(@Cast("bool") boolean reallyProfile);
        public native void setLeaksDetector(@Cast("bool") boolean reallyDetect);
        public native @Cast("bool") boolean helpersAllowed();
        public native void allowHelpers(@Cast("bool") boolean reallyAllow);

        public native @Cast("bool") boolean blasFallback();
        
        public native int tadThreshold();
        public native void setTadThreshold(int threshold);

        public native int elementwiseThreshold();
        public native void setElementwiseThreshold(int threshold);

        public native int maxThreads();
        public native void setMaxThreads(int max);

        public native int maxMasterThreads();
        public native void setMaxMasterThreads(int max);

        /*
         * Legacy memory limits API, still used in new API as simplified version
         */
        public native void setMaxPrimaryMemory(@Cast("uint64_t") long maxBytes);
        public native void setMaxSpecialyMemory(@Cast("uint64_t") long maxBytes);
        public native void setMaxDeviceMemory(@Cast("uint64_t") long maxBytes);

        public native @Cast("uint64_t") long maxPrimaryMemory();
        public native @Cast("uint64_t") long maxSpecialMemory();
        ////////////////////////

        /*
         * Methods for memory limits/counters
         */
        public native void setGroupLimit(int group, @Cast("Nd4jLong") long numBytes);
        public native void setDeviceLimit(int deviceId, @Cast("Nd4jLong") long numBytes);

        public native @Cast("Nd4jLong") long getGroupLimit(int group);
        public native @Cast("Nd4jLong") long getDeviceLimit(int deviceId);

        public native @Cast("Nd4jLong") long getGroupCounter(int group);
        public native @Cast("Nd4jLong") long getDeviceCounter(int deviceId);
        ////////////////////////

        public native @Cast("bool") boolean isUseMKLDNN();
        public native void setUseMKLDNN(@Cast("bool") boolean useMKLDNN);

        public native @Cast("sd::DataType") int defaultFloatDataType();
        public native void setDefaultFloatDataType(@Cast("sd::DataType") int dtype);

        public native @Cast("bool") boolean precisionBoostAllowed();
        public native void allowPrecisionBoost(@Cast("bool") boolean reallyAllow);

        public native @Cast("bool") boolean isExperimentalBuild();

        public native @Cast("bool") boolean isCPU();

        public native int blasMajorVersion();
        public native int blasMinorVersion();
        public native int blasPatchVersion();

        public native @StdVector Pair capabilities();
    }



// #endif //LIBND4J_ENVIRONMENT_H


// Parsed from types/utf8string.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef DEV_TESTS_UTF8STRING_H
// #define DEV_TESTS_UTF8STRING_H

// #include 
// #include 
    @Namespace("sd") @NoOffset public static class utf8string extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public utf8string(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public utf8string(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public utf8string position(long position) {
            return (utf8string)super.position(position);
        }
        @Override public utf8string getPointer(long i) {
            return new utf8string((Pointer)this).position(position + i);
        }
    
        public native @Cast("char*") BytePointer _buffer(); public native utf8string _buffer(BytePointer setter);
        public native @Cast("unsigned int") int _length(); public native utf8string _length(int setter);

        public utf8string() { super((Pointer)null); allocate(); }
        private native void allocate();

        public utf8string(@Cast("char*") String string, int length) { super((Pointer)null); allocate(string, length); }
        private native void allocate(@Cast("char*") String string, int length);
        public utf8string(@Cast("char*") BytePointer string, int length) { super((Pointer)null); allocate(string, length); }
        private native void allocate(@Cast("char*") BytePointer string, int length);
        public utf8string(@StdString BytePointer string) { super((Pointer)null); allocate(string); }
        private native void allocate(@StdString BytePointer string);
        public utf8string(@StdString String string) { super((Pointer)null); allocate(string); }
        private native void allocate(@StdString String string);
        public utf8string(@Const @ByRef utf8string other) { super((Pointer)null); allocate(other); }
        private native void allocate(@Const @ByRef utf8string other);
        public native @ByRef @Name("operator =") utf8string put(@Const @ByRef utf8string other);
    }



// #endif //DEV_TESTS_UTF8STRING_H


// Parsed from legacy/NativeOps.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by agibsonccc on 2/21/16.
//

// #ifndef NATIVEOPERATIONS_NATIVEOPS_H
// #define NATIVEOPERATIONS_NATIVEOPS_H

/*
#ifndef thread_local
# if __STDC_VERSION__ >= 201112 && !defined __STDC_NO_THREADS__
#  define thread_local _Thread_local
# elif defined _WIN32 && ( \
       defined _MSC_VER || \
       defined __ICL || \
       defined __DMC__ || \
       defined __BORLANDC__ )
#  define thread_local __declspec(thread)
// note that ICC (linux) and Clang are covered by __GNUC__ 
# elif defined __GNUC__ || \
       defined __SUNPRO_C || \
       defined __xlC__
#  define thread_local __thread
# else
#  error "Cannot define thread_local"
# endif
#endif
*/

// #include 
// #include 
// #include 

//DO NOT REMOVE: THIS IS AN EDITOR SEMANTICS THING FOR CLION
//IT DEFINES THE EXPORT MACRO FOR THE EDITOR AND THEN
//RE ADDS THE DEFINITION VIA dll.h
// #ifdef  _WIN32
// #define ND4J_EXPORT __declspec(dllexport)
// #else
// #define ND4J_EXPORT
// #endif
// #include 

/*
int tad_threshold = 1;
int element_threshold = 32;

bool debug = false;
bool verbose = false;
*/

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

/**
 * This function returns last error code stored,
 * @return non-zero if something bad happened
 */
public native int lastErrorCode();

/**
 * This function returns last error message, if last error code > 0
 * @return
 */
public native @Cast("char*") String lastErrorMessage();

/**
 *
 * @param p
 * @param len
 */
public native void tryPointer(@Cast("Nd4jPointer") Pointer extra, @Cast("Nd4jPointer") Pointer p, int len);

/**
 *
 * @param num
 */
public native void setElementThreshold(int num);

/**
 *
 * @param num
 */
public native void setTADThreshold(int num);

/**
   *
   * @param opNum
   * @param x
   * @param xShapeInfo
   * @param extraParams
   */
public native void execIndexReduceScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                       int opNum,
                                       OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                       Pointer extraParams,
                                       OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo);
public native void execIndexReduceScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                       int opNum,
                                       OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                       Pointer extraParams,
                                       OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo);
public native void execIndexReduceScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                       int opNum,
                                       OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                       Pointer extraParams,
                                       OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo);

/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParams
 * @param result
 * @param resultShapeInfoBuffer
 * @param dimension
 * @param dimensionLength
 */
public native void execIndexReduce(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape);
public native void execIndexReduce(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape);
public native void execIndexReduce(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape);

/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param y
 * @param yShapeInfo
 * @param result
 * @param resultShapeInfo
 * @param dimension
 * @param dimensionLength
 */
public native void execBroadcast(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
        OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape);
public native void execBroadcast(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
        OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape);
public native void execBroadcast(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
        OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape);


public native void execBroadcastBool(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
        Pointer extraParams,
        OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape);
public native void execBroadcastBool(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
        Pointer extraParams,
        OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape);
public native void execBroadcastBool(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
        Pointer extraParams,
        OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape);

/**
 *
 * @param opNum
 * @param dx
 * @param xShapeInfo
 * @param y
 * @param yShapeInfo
 * @param result
 * @param resultShapeInfo
 * @param extraParams
 * @param n
 */
public native void execPairwiseTransform(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
        Pointer extraParams);
public native void execPairwiseTransform(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
        Pointer extraParams);
public native void execPairwiseTransform(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
        Pointer extraParams);

public native void execPairwiseTransformBool(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
        Pointer extraParams);
public native void execPairwiseTransformBool(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
        Pointer extraParams);
public native void execPairwiseTransformBool(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int opNum,
        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
        OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] dYShapeInfo,
        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
        Pointer extraParams);

/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParams
 * @param result
 * @param resultShapeInfo
 */
public native void execReduceFloat(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                  Pointer extraParams,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo);
public native void execReduceFloat(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                  Pointer extraParams,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo);
public native void execReduceFloat(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                  Pointer extraParams,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo);

public native void execReduceSame(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo);
public native void execReduceSame(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo);
public native void execReduceSame(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo);

public native void execReduceBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo);
public native void execReduceBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo);
public native void execReduceBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo);


public native void execReduceLong(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo);
public native void execReduceLong(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo);
public native void execReduceLong(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                 int opNum,
                                 OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                 Pointer extraParams,
                                 OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo);

/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParams
 * @param result
 * @param resultShapeInfo
 */
public native void execReduceFloat2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                    int opNum,
                                    OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                    Pointer extraParams,
                                    OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                    OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape);
public native void execReduceFloat2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                    int opNum,
                                    OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                    Pointer extraParams,
                                    OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                    OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape);
public native void execReduceFloat2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                    int opNum,
                                    OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                    Pointer extraParams,
                                    OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                    OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape);


public native void execReduceSame2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape);
public native void execReduceSame2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape);
public native void execReduceSame2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape);


public native void execReduceBool2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape);
public native void execReduceBool2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape);
public native void execReduceBool2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape);


public native void execReduceLong2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape);
public native void execReduceLong2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape);
public native void execReduceLong2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape);

/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParamsVals
 * @param y
 * @param yShapeInfo
 * @param result
 * @param resultShapeInfo
 */
public native void execReduce3(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                              int opNum,
                              OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                              Pointer extraParamsVals,
                              OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYShapeInfo,
                              OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo);
public native void execReduce3(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                              int opNum,
                              OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                              Pointer extraParamsVals,
                              OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo,
                              OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo);
public native void execReduce3(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                              int opNum,
                              OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                              Pointer extraParamsVals,
                              OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] dYShapeInfo,
                              OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo);

/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParamsVals
 * @param y
 * @param yShapeInfo
 */
public native void execReduce3Scalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                   Pointer extraParamsVals,
                                   OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo);
public native void execReduce3Scalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                   Pointer extraParamsVals,
                                   OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo);
public native void execReduce3Scalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                   Pointer extraParamsVals,
                                   OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] dYShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo);
/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParamsVals
 * @param y
 * @param yShapeInfo
 * @param result
 * @param resultShapeInfoBuffer
 * @param dimension
 * @param dimensionLength
 */
public native void execReduce3Tad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                Pointer extraParamsVals,
                                OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape,
                                @Cast("const Nd4jLong*") LongPointer tadOnlyShapeInfo, @Cast("const Nd4jLong*") LongPointer tadOffsets,
                                @Cast("const Nd4jLong*") LongPointer yTadOnlyShapeInfo, @Cast("const Nd4jLong*") LongPointer yTadOffsets);
public native void execReduce3Tad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                Pointer extraParamsVals,
                                OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape,
                                @Cast("const Nd4jLong*") LongBuffer tadOnlyShapeInfo, @Cast("const Nd4jLong*") LongBuffer tadOffsets,
                                @Cast("const Nd4jLong*") LongBuffer yTadOnlyShapeInfo, @Cast("const Nd4jLong*") LongBuffer yTadOffsets);
public native void execReduce3Tad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                Pointer extraParamsVals,
                                OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] dYShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape,
                                @Cast("const Nd4jLong*") long[] tadOnlyShapeInfo, @Cast("const Nd4jLong*") long[] tadOffsets,
                                @Cast("const Nd4jLong*") long[] yTadOnlyShapeInfo, @Cast("const Nd4jLong*") long[] yTadOffsets);


public native void execReduce3All(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                Pointer extraParamsVals,
                                OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape,
                                @Cast("const Nd4jLong*") LongPointer xTadShapeInfo, @Cast("const Nd4jLong*") LongPointer xOffsets,
                                @Cast("const Nd4jLong*") LongPointer yTadShapeInfo, @Cast("const Nd4jLong*") LongPointer yOffsets);
public native void execReduce3All(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                Pointer extraParamsVals,
                                OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape,
                                @Cast("const Nd4jLong*") LongBuffer xTadShapeInfo, @Cast("const Nd4jLong*") LongBuffer xOffsets,
                                @Cast("const Nd4jLong*") LongBuffer yTadShapeInfo, @Cast("const Nd4jLong*") LongBuffer yOffsets);
public native void execReduce3All(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                Pointer extraParamsVals,
                                OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] dYShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape,
                                @Cast("const Nd4jLong*") long[] xTadShapeInfo, @Cast("const Nd4jLong*") long[] xOffsets,
                                @Cast("const Nd4jLong*") long[] yTadShapeInfo, @Cast("const Nd4jLong*") long[] yOffsets);

/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param result
 * @param resultShapeInfo
 * @param scalar
 * @param extraParams
 * @param n
 */
public native void execScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                            int opNum,
                            OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                            OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                            OpaqueDataBuffer dbScalar, @Cast("const Nd4jLong*") LongPointer hSscalarShapeInfo, @Cast("const Nd4jLong*") LongPointer dSscalarShapeInfo,
                            Pointer extraParams);
public native void execScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                            int opNum,
                            OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                            OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                            OpaqueDataBuffer dbScalar, @Cast("const Nd4jLong*") LongBuffer hSscalarShapeInfo, @Cast("const Nd4jLong*") LongBuffer dSscalarShapeInfo,
                            Pointer extraParams);
public native void execScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                            int opNum,
                            OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                            OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                            OpaqueDataBuffer dbScalar, @Cast("const Nd4jLong*") long[] hSscalarShapeInfo, @Cast("const Nd4jLong*") long[] dSscalarShapeInfo,
                            Pointer extraParams);

public native void execScalarBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                OpaqueDataBuffer dbScalar, @Cast("const Nd4jLong*") LongPointer hSscalarShapeInfo, @Cast("const Nd4jLong*") LongPointer dSscalarShapeInfo,
                                Pointer extraParams);
public native void execScalarBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                OpaqueDataBuffer dbScalar, @Cast("const Nd4jLong*") LongBuffer hSscalarShapeInfo, @Cast("const Nd4jLong*") LongBuffer dSscalarShapeInfo,
                                Pointer extraParams);
public native void execScalarBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                int opNum,
                                OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                OpaqueDataBuffer dbScalar, @Cast("const Nd4jLong*") long[] hSscalarShapeInfo, @Cast("const Nd4jLong*") long[] dSscalarShapeInfo,
                                Pointer extraParams);

/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParams
 */
public native void execSummaryStatsScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                        int opNum,
                                        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                        Pointer extraParams,
                                        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                        @Cast("bool") boolean biasCorrected);
public native void execSummaryStatsScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                        int opNum,
                                        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                        Pointer extraParams,
                                        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                        @Cast("bool") boolean biasCorrected);
public native void execSummaryStatsScalar(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                        int opNum,
                                        OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                        Pointer extraParams,
                                        OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                        @Cast("bool") boolean biasCorrected);
/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParams
 * @param result
 * @param resultShapeInfo
 */
public native void execSummaryStats(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo,  @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                  Pointer extraParams,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                  @Cast("bool") boolean biasCorrected);
public native void execSummaryStats(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo,  @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                  Pointer extraParams,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                  @Cast("bool") boolean biasCorrected);
public native void execSummaryStats(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo,  @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                  Pointer extraParams,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                  @Cast("bool") boolean biasCorrected);
/**
 *
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param extraParams
 * @param result
 * @param resultShapeInfoBuffer
 * @param dimension
 * @param dimensionLength
 */
public native void execSummaryStatsTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                     int opNum,
                                     OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                     Pointer extraParams,
                                     OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                     OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape,
                                     @Cast("bool") boolean biasCorrected,
                                     @Cast("const Nd4jLong*") LongPointer tadShapeInfo, @Cast("const Nd4jLong*") LongPointer tadOffsets);
public native void execSummaryStatsTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                     int opNum,
                                     OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                     Pointer extraParams,
                                     OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                     OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape,
                                     @Cast("bool") boolean biasCorrected,
                                     @Cast("const Nd4jLong*") LongBuffer tadShapeInfo, @Cast("const Nd4jLong*") LongBuffer tadOffsets);
public native void execSummaryStatsTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                     int opNum,
                                     OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                     Pointer extraParams,
                                     OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                     OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape,
                                     @Cast("bool") boolean biasCorrected,
                                     @Cast("const Nd4jLong*") long[] tadShapeInfo, @Cast("const Nd4jLong*") long[] tadOffsets);

/**
 *
 * @param opNum
 * @param dx
 * @param xShapeInfo
 * @param result
 * @param resultShapeInfo
 * @param extraParams
 * @param n
 */
public native void execTransformFloat(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                    int opNum,
                                    OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                    OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                    Pointer extraParams);
public native void execTransformFloat(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                    int opNum,
                                    OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                    OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                    Pointer extraParams);
public native void execTransformFloat(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                    int opNum,
                                    OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                    OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                    Pointer extraParams);

public native void execTransformSame(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                   Pointer extraParams);
public native void execTransformSame(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                   Pointer extraParams);
public native void execTransformSame(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                   Pointer extraParams);

public native void execTransformBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                   Pointer extraParams);
public native void execTransformBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                   Pointer extraParams);
public native void execTransformBool(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                   Pointer extraParams);

public native void execTransformAny(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                  Pointer extraParams);
public native void execTransformAny(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                  Pointer extraParams);
public native void execTransformAny(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                  int opNum,
                                  OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                  OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                  Pointer extraParams);

public native void execTransformStrict(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                     int opNum,
                                     OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                     OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                     Pointer extraParams);
public native void execTransformStrict(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                     int opNum,
                                     OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                     OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                     Pointer extraParams);
public native void execTransformStrict(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                     int opNum,
                                     OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                     OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                     Pointer extraParams);

/**
 *
 * @param extraPointers
 * @param opNum
 * @param x
 * @param xShapeInfo
 * @param z
 * @param zShapeInfo
 * @param scalars
 * @param extraParams
 * @param dimension
 * @param dimensionLength
 */
public native void execScalarTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               int opNum,
                               OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                               OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                               OpaqueDataBuffer dbScalars, @Cast("const Nd4jLong*") LongPointer hScalarShapeInfo, @Cast("const Nd4jLong*") LongPointer dScalarShapeInfo,
                               Pointer extraParams,
                               OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape,
                               @Cast("const Nd4jLong*") LongPointer tadShapeInfo, @Cast("const Nd4jLong*") LongPointer tadOffsets,
                               @Cast("const Nd4jLong*") LongPointer tadShapeInfoZ, @Cast("const Nd4jLong*") LongPointer tadOffsetsZ);
public native void execScalarTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               int opNum,
                               OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                               OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                               OpaqueDataBuffer dbScalars, @Cast("const Nd4jLong*") LongBuffer hScalarShapeInfo, @Cast("const Nd4jLong*") LongBuffer dScalarShapeInfo,
                               Pointer extraParams,
                               OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape,
                               @Cast("const Nd4jLong*") LongBuffer tadShapeInfo, @Cast("const Nd4jLong*") LongBuffer tadOffsets,
                               @Cast("const Nd4jLong*") LongBuffer tadShapeInfoZ, @Cast("const Nd4jLong*") LongBuffer tadOffsetsZ);
public native void execScalarTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               int opNum,
                               OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                               OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                               OpaqueDataBuffer dbScalars, @Cast("const Nd4jLong*") long[] hScalarShapeInfo, @Cast("const Nd4jLong*") long[] dScalarShapeInfo,
                               Pointer extraParams,
                               OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape,
                               @Cast("const Nd4jLong*") long[] tadShapeInfo, @Cast("const Nd4jLong*") long[] tadOffsets,
                               @Cast("const Nd4jLong*") long[] tadShapeInfoZ, @Cast("const Nd4jLong*") long[] tadOffsetsZ);

public native void execScalarBoolTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeInfo, @Cast("const Nd4jLong*") LongPointer dZShapeInfo,
                                   OpaqueDataBuffer dbScalars, @Cast("const Nd4jLong*") LongPointer hScalarShapeInfo, @Cast("const Nd4jLong*") LongPointer dScalarShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongPointer hDimensionShape, @Cast("const Nd4jLong*") LongPointer dDimensionShape,
                                   @Cast("const Nd4jLong*") LongPointer tadShapeInfo, @Cast("const Nd4jLong*") LongPointer tadOffsets,
                                   @Cast("const Nd4jLong*") LongPointer tadShapeInfoZ, @Cast("const Nd4jLong*") LongPointer tadOffsetsZ);
public native void execScalarBoolTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeInfo, @Cast("const Nd4jLong*") LongBuffer dZShapeInfo,
                                   OpaqueDataBuffer dbScalars, @Cast("const Nd4jLong*") LongBuffer hScalarShapeInfo, @Cast("const Nd4jLong*") LongBuffer dScalarShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") LongBuffer hDimensionShape, @Cast("const Nd4jLong*") LongBuffer dDimensionShape,
                                   @Cast("const Nd4jLong*") LongBuffer tadShapeInfo, @Cast("const Nd4jLong*") LongBuffer tadOffsets,
                                   @Cast("const Nd4jLong*") LongBuffer tadShapeInfoZ, @Cast("const Nd4jLong*") LongBuffer tadOffsetsZ);
public native void execScalarBoolTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   int opNum,
                                   OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] dXShapeInfo,
                                   OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeInfo, @Cast("const Nd4jLong*") long[] dZShapeInfo,
                                   OpaqueDataBuffer dbScalars, @Cast("const Nd4jLong*") long[] hScalarShapeInfo, @Cast("const Nd4jLong*") long[] dScalarShapeInfo,
                                   Pointer extraParams,
                                   OpaqueDataBuffer dbDimension, @Cast("const Nd4jLong*") long[] hDimensionShape, @Cast("const Nd4jLong*") long[] dDimensionShape,
                                   @Cast("const Nd4jLong*") long[] tadShapeInfo, @Cast("const Nd4jLong*") long[] tadOffsets,
                                   @Cast("const Nd4jLong*") long[] tadShapeInfoZ, @Cast("const Nd4jLong*") long[] tadOffsetsZ);

public native void specialConcat(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int dimension,
        int numArrays,
        @Cast("Nd4jPointer*") PointerPointer data,
        @Cast("Nd4jPointer*") PointerPointer inputShapeInfo,
        Pointer result,
        @Cast("const Nd4jLong*") LongPointer resultShapeInfo,
        @Cast("Nd4jPointer*") PointerPointer tadPointers,
        @Cast("Nd4jPointer*") PointerPointer offsetPointers);
public native void specialConcat(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int dimension,
        int numArrays,
        @Cast("Nd4jPointer*") PointerPointer data,
        @Cast("Nd4jPointer*") PointerPointer inputShapeInfo,
        Pointer result,
        @Cast("const Nd4jLong*") LongBuffer resultShapeInfo,
        @Cast("Nd4jPointer*") PointerPointer tadPointers,
        @Cast("Nd4jPointer*") PointerPointer offsetPointers);
public native void specialConcat(
        @Cast("Nd4jPointer*") PointerPointer extraPointers,
        int dimension,
        int numArrays,
        @Cast("Nd4jPointer*") PointerPointer data,
        @Cast("Nd4jPointer*") PointerPointer inputShapeInfo,
        Pointer result,
        @Cast("const Nd4jLong*") long[] resultShapeInfo,
        @Cast("Nd4jPointer*") PointerPointer tadPointers,
        @Cast("Nd4jPointer*") PointerPointer offsetPointers);

/**
 * This method implementation exists only for cuda.
 * The other backends should have dummy method for JNI compatibility reasons.
 */
public native void initializeDevicesAndFunctions();

public native void initializeFunctions(@Cast("Nd4jPointer*") PointerPointer functions);

/**
 * This method acquires memory chunk of requested size on host side
 *
 * @param pointer pointer that'll be used for allocation
 * @param memorySize memory size, in bytes
 * @param flags optional parameter
 */
public native @Cast("Nd4jPointer") Pointer mallocHost(@Cast("Nd4jLong") long memorySize, int flags);

/**
 * This method acquires memory chunk of requested size on specified device
 *
 * @param pointer pointer that'll be used for allocation
 * @param memorySize memory size, in bytes
 * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
 * @param flags optional parameter
 */
public native @Cast("Nd4jPointer") Pointer mallocDevice(@Cast("Nd4jLong") long memorySize, int deviceId, int flags);

/**
 * This method releases previously allocated host memory space
 *
 * @param pointer pointer that'll be freed
 */
public native int freeHost(@Cast("Nd4jPointer") Pointer pointer);

/**
 * This method releases previously allocated memory space on device
 *
 * @param pointer pointer that'll be freed
 * @param ptrToDeviceId pointer to deviceId.
 */
public native int freeDevice(@Cast("Nd4jPointer") Pointer pointer, int deviceId);

/**
 *
 * @return
 */
public native int ompGetMaxThreads();

/**
 *
 * @return
 */
public native int ompGetNumThreads();

/**
 *
 * @param threads
 */
public native void setOmpNumThreads(int threads);

/**
 *
 * @param threads
 */
public native void setOmpMinThreads(int threads);


public native @Cast("bool") boolean isBlasVersionMatches(int major, int minor, int build);

/**
 *
 * @return
 */
public native @Cast("Nd4jPointer") Pointer createContext();

/**
 *
 * @return
 */
public native @Cast("Nd4jPointer") Pointer createStream();

/**
 *
 * @return
 */
public native @Cast("Nd4jPointer") Pointer createEvent();

/**
 *
 * @param event
 * @param stream
 * @return
 */
public native int registerEvent(@Cast("Nd4jPointer") Pointer event, @Cast("Nd4jPointer") Pointer stream);

/**
 *
 * @param event
 * @return
 */
public native int destroyEvent(@Cast("Nd4jPointer") Pointer event);

/**
 *
 * @param ptrToDeviceId
 * @return
 */
public native int setDevice(int deviceId);

/**
 *
 * @return
 */
public native int getDevice();

/**
 *
 * @param stream
 * @return
 */
public native int streamSynchronize(@Cast("Nd4jPointer") Pointer stream);

/**
 *
 * @param event
 * @return
 */
public native int eventSynchronize(@Cast("Nd4jPointer") Pointer event);

/**
 *
 * @param ptrToDeviceId
 * @return
 */
public native @Cast("Nd4jLong") long getDeviceFreeMemory(int deviceId);

/**
 * Returns amount of free memory for current device
 * @return
 */
public native @Cast("Nd4jLong") long getDeviceFreeMemoryDefault();

/**
 *
 * @param ptrToDeviceId
 * @return
 */
public native @Cast("Nd4jLong") long getDeviceTotalMemory(int deviceId);

/**
 *
 * @param ptrToDeviceId
 * @return
 */
public native int getDeviceMajor(int deviceId);

/**
 * This method returns amount of cached memory
 * @param deviceId
 * @return
 */
public native @Cast("Nd4jLong") long getCachedMemory(int deviceId);

/**
 *
 * @param ptrToDeviceId
 * @return
 */
public native int getDeviceMinor(int deviceId);

/**
 *
 * @param ptrToDeviceId
 * @return
 */
public native @Cast("char*") String getDeviceName(int deviceId);

/**
 *
 * @param dst
 * @param src
 * @param size
 * @param flags
 * @param reserved
 * @return
 */
public native int memcpySync(@Cast("Nd4jPointer") Pointer dst,
                           @Cast("Nd4jPointer") Pointer src,
                           @Cast("Nd4jLong") long size,
                           int flags,
                           @Cast("Nd4jPointer") Pointer reserved);

/**
 *
 * @param dst
 * @param src
 * @param size
 * @param flags
 * @param reserved
 * @return
 */
public native int memcpyAsync(@Cast("Nd4jPointer") Pointer dst,
                            @Cast("Nd4jPointer") Pointer src,
                            @Cast("Nd4jLong") long size,
                            int flags,
                            @Cast("Nd4jPointer") Pointer reserved);

/**
 *
 * @param dst
 * @param value
 * @param size
 * @param flags
 * @param reserved
 * @return
 */
public native int memsetSync(@Cast("Nd4jPointer") Pointer dst,
                           int value,
                           @Cast("Nd4jLong") long size,
                           int flags,
                           @Cast("Nd4jPointer") Pointer reserved);

/**
 *
 * @param dst
 * @param value
 * @param size
 * @param flags
 * @param reserved
 * @return
 */
public native int memsetAsync(@Cast("Nd4jPointer") Pointer dst,
                            int value,
                            @Cast("Nd4jLong") long size,
                            int flags,
                            @Cast("Nd4jPointer") Pointer reserved);

/**
 *
 * @param dst
 * @param src
 * @param size
 * @param flags
 * @param reserved
 * @return
 */
public native int memcpyConstantAsync(@Cast("Nd4jLong") long dst,
                                    @Cast("Nd4jPointer") Pointer src,
                                    @Cast("Nd4jLong") long size,
                                    int flags,
                                    @Cast("Nd4jPointer") Pointer reserved);

/**
 *
 * @return
 */
public native @Cast("Nd4jPointer") Pointer getConstantSpace();

/**
 *
 * @return
 */
public native int getAvailableDevices();

/**
 *
 * @param reallyEnable
 */
public native void enableDebugMode(@Cast("bool") boolean reallyEnable);

/**
 *
 * @param reallyEnable
 */
public native void enableVerboseMode(@Cast("bool") boolean reallyEnable);

/**
 *
 * @param gridSize
 */
public native void setGridLimit(int gridSize);

/**
 *
 * @param xShapeInfo
 * @param dimension
 * @param dimensionLength
 * @param targetBuffer
 * @param offsetsBuffer
 */
public native OpaqueTadPack tadOnlyShapeInfo(@Cast("const Nd4jLong*") LongPointer xShapeInfo,
                                            IntPointer dimension,
                                            int dimensionLength);
public native OpaqueTadPack tadOnlyShapeInfo(@Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                                            IntBuffer dimension,
                                            int dimensionLength);
public native OpaqueTadPack tadOnlyShapeInfo(@Cast("const Nd4jLong*") long[] xShapeInfo,
                                            int[] dimension,
                                            int dimensionLength);

public native @Cast("const Nd4jLong*") LongPointer getPrimaryShapeInfo(OpaqueTadPack pack);
public native @Cast("const Nd4jLong*") LongPointer getPrimaryOffsets(OpaqueTadPack pack);
public native @Cast("const Nd4jLong*") LongPointer getSpecialShapeInfo(OpaqueTadPack pack);
public native @Cast("const Nd4jLong*") LongPointer getSpecialOffsets(OpaqueTadPack pack);
public native @Cast("Nd4jLong") long getNumberOfTads(OpaqueTadPack pack);
public native int getShapeInfoLength(OpaqueTadPack pack);

public native void deleteTadPack(OpaqueTadPack ptr);

/*
 * PullRow special op
 */

/**
 *
 * @param extraPointers
 * @param x
 * @param xShapeInfo
 * @param z
 * @param zShapeInfo
 * @param n
 * @param indexes
 * @param tadShapeInfo
 * @param tadOffsets
 * @param zTadShapeInfo
 * @param zTadOffsets
 */
public native void pullRows(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                          OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer xShapeInfo, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                          OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer zShapeInfo, @Cast("const Nd4jLong*") LongPointer dzShapeInfo,
                          @Cast("Nd4jLong") long n,
                          @Cast("Nd4jLong*") LongPointer indexes,
                          @Cast("const Nd4jLong*") LongPointer tadShapeInfo,
                          @Cast("const Nd4jLong*") LongPointer tadOffsets,
                          @Cast("const Nd4jLong*") LongPointer zTadShapeInfo,
                          @Cast("const Nd4jLong*") LongPointer zTadOffsets);
public native void pullRows(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                          OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer xShapeInfo, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                          OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer zShapeInfo, @Cast("const Nd4jLong*") LongBuffer dzShapeInfo,
                          @Cast("Nd4jLong") long n,
                          @Cast("Nd4jLong*") LongBuffer indexes,
                          @Cast("const Nd4jLong*") LongBuffer tadShapeInfo,
                          @Cast("const Nd4jLong*") LongBuffer tadOffsets,
                          @Cast("const Nd4jLong*") LongBuffer zTadShapeInfo,
                          @Cast("const Nd4jLong*") LongBuffer zTadOffsets);
public native void pullRows(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                          OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] xShapeInfo, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                          OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] zShapeInfo, @Cast("const Nd4jLong*") long[] dzShapeInfo,
                          @Cast("Nd4jLong") long n,
                          @Cast("Nd4jLong*") long[] indexes,
                          @Cast("const Nd4jLong*") long[] tadShapeInfo,
                          @Cast("const Nd4jLong*") long[] tadOffsets,
                          @Cast("const Nd4jLong*") long[] zTadShapeInfo,
                          @Cast("const Nd4jLong*") long[] zTadOffsets);

/**
 *
 * @param extras
 * @param dx
 * @param dz
 * @param n
 * @param length
 * @param propagate
 */
public native void average(@Cast("Nd4jPointer*") PointerPointer extras,
                         @Cast("Nd4jPointer*") PointerPointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dx, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                         Pointer z, @Cast("const Nd4jLong*") LongPointer zShapeInfo,
                         Pointer dz, @Cast("const Nd4jLong*") LongPointer dzShapeInfo,
                         int n,
                         @Cast("Nd4jLong") long length,
                         @Cast("bool") boolean propagate);
public native void average(@Cast("Nd4jPointer*") PointerPointer extras,
                         @Cast("Nd4jPointer*") PointerPointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dx, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                         Pointer z, @Cast("const Nd4jLong*") LongBuffer zShapeInfo,
                         Pointer dz, @Cast("const Nd4jLong*") LongBuffer dzShapeInfo,
                         int n,
                         @Cast("Nd4jLong") long length,
                         @Cast("bool") boolean propagate);
public native void average(@Cast("Nd4jPointer*") PointerPointer extras,
                         @Cast("Nd4jPointer*") PointerPointer x, @Cast("const Nd4jLong*") long[] xShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dx, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                         Pointer z, @Cast("const Nd4jLong*") long[] zShapeInfo,
                         Pointer dz, @Cast("const Nd4jLong*") long[] dzShapeInfo,
                         int n,
                         @Cast("Nd4jLong") long length,
                         @Cast("bool") boolean propagate);


public native void accumulate(@Cast("Nd4jPointer*") PointerPointer extras,
                            @Cast("Nd4jPointer*") PointerPointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo,
                            @Cast("Nd4jPointer*") PointerPointer dx, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                            Pointer z, @Cast("const Nd4jLong*") LongPointer zShapeInfo,
                            Pointer dz, @Cast("const Nd4jLong*") LongPointer dzShapeInfo,
                            int n,
                            @Cast("Nd4jLong") long length);
public native void accumulate(@Cast("Nd4jPointer*") PointerPointer extras,
                            @Cast("Nd4jPointer*") PointerPointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                            @Cast("Nd4jPointer*") PointerPointer dx, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                            Pointer z, @Cast("const Nd4jLong*") LongBuffer zShapeInfo,
                            Pointer dz, @Cast("const Nd4jLong*") LongBuffer dzShapeInfo,
                            int n,
                            @Cast("Nd4jLong") long length);
public native void accumulate(@Cast("Nd4jPointer*") PointerPointer extras,
                            @Cast("Nd4jPointer*") PointerPointer x, @Cast("const Nd4jLong*") long[] xShapeInfo,
                            @Cast("Nd4jPointer*") PointerPointer dx, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                            Pointer z, @Cast("const Nd4jLong*") long[] zShapeInfo,
                            Pointer dz, @Cast("const Nd4jLong*") long[] dzShapeInfo,
                            int n,
                            @Cast("Nd4jLong") long length);


/**
 * P2P enabler
 */
/**
 *
 * @param enable
 */
public native void enableP2P(@Cast("bool") boolean enable);

/**
 *
 */
public native void checkP2P();

/**
 *
 * @return
 */
public native @Cast("bool") boolean isP2PAvailable();

/**
 * Shuffle methods
 */

/**
 *
 * @param extras
 * @param dx
 * @param xShapeInfo
 * @param dz
 * @param zShapeInfo
 * @param N
 * @param shuffleMap
 * @param tadShapeInfo
 * @param tadOffsets
 */
public native void shuffle(@Cast("Nd4jPointer*") PointerPointer extras,
                         @Cast("Nd4jPointer*") PointerPointer x, @Cast("Nd4jPointer*") PointerPointer xShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dx, @Cast("Nd4jPointer*") PointerPointer dxShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer z, @Cast("Nd4jPointer*") PointerPointer zShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dz, @Cast("Nd4jPointer*") PointerPointer dzShapeInfo,
                         int N,
                         IntPointer shuffleMap,
                         @Cast("Nd4jPointer*") PointerPointer tadShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer tadOffsets);
public native void shuffle(@Cast("Nd4jPointer*") PointerPointer extras,
                         @Cast("Nd4jPointer*") PointerPointer x, @Cast("Nd4jPointer*") PointerPointer xShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dx, @Cast("Nd4jPointer*") PointerPointer dxShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer z, @Cast("Nd4jPointer*") PointerPointer zShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dz, @Cast("Nd4jPointer*") PointerPointer dzShapeInfo,
                         int N,
                         IntBuffer shuffleMap,
                         @Cast("Nd4jPointer*") PointerPointer tadShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer tadOffsets);
public native void shuffle(@Cast("Nd4jPointer*") PointerPointer extras,
                         @Cast("Nd4jPointer*") PointerPointer x, @Cast("Nd4jPointer*") PointerPointer xShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dx, @Cast("Nd4jPointer*") PointerPointer dxShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer z, @Cast("Nd4jPointer*") PointerPointer zShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer dz, @Cast("Nd4jPointer*") PointerPointer dzShapeInfo,
                         int N,
                         int[] shuffleMap,
                         @Cast("Nd4jPointer*") PointerPointer tadShapeInfo,
                         @Cast("Nd4jPointer*") PointerPointer tadOffsets);


/**
 * Type Conversions
 */

/**
 *
 * @param extras
 * @param srcType
 * @param x
 * @param N
 * @param dstType
 * @param z
 */
public native void convertTypes(@Cast("Nd4jPointer*") PointerPointer extras, int srcType, @Cast("Nd4jPointer") Pointer x, @Cast("Nd4jLong") long N, int dstType, @Cast("Nd4jPointer") Pointer z);


/**
 *
 * @return
 */
public native @Cast("bool") boolean isExperimentalEnabled();

/**
 * Aggregate
 */

/**
 *
 * @param extraPointers
 * @param opNum
 * @param arguments
 * @param numArguments
 * @param shapeArguments
 * @param numShapeArguments
 * @param indexArguments
 * @param numIndexArguments
 * @param intArrays
 * @param numIntArrays
 * @param realArguments
 * @param numRealArguments
 */
public native void execAggregate(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               int opNum,
                               @Cast("void**") PointerPointer arguments,
                               int numArguments,
                               @Cast("Nd4jLong**") PointerPointer shapeArguments,
                               int numShapeArguments,
                               IntPointer indexArguments,
                               int numIndexArguments,
                               @Cast("int**") PointerPointer intArrays,
                               int numIntArrays,
                               Pointer realArguments,
                               int numRealArguments,
                               @Cast("sd::DataType") int dtype);
public native void execAggregate(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               int opNum,
                               @Cast("void**") @ByPtrPtr Pointer arguments,
                               int numArguments,
                               @Cast("Nd4jLong**") @ByPtrPtr LongPointer shapeArguments,
                               int numShapeArguments,
                               IntPointer indexArguments,
                               int numIndexArguments,
                               @ByPtrPtr IntPointer intArrays,
                               int numIntArrays,
                               Pointer realArguments,
                               int numRealArguments,
                               @Cast("sd::DataType") int dtype);
public native void execAggregate(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               int opNum,
                               @Cast("void**") @ByPtrPtr Pointer arguments,
                               int numArguments,
                               @Cast("Nd4jLong**") @ByPtrPtr LongBuffer shapeArguments,
                               int numShapeArguments,
                               IntBuffer indexArguments,
                               int numIndexArguments,
                               @ByPtrPtr IntBuffer intArrays,
                               int numIntArrays,
                               Pointer realArguments,
                               int numRealArguments,
                               @Cast("sd::DataType") int dtype);
public native void execAggregate(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               int opNum,
                               @Cast("void**") @ByPtrPtr Pointer arguments,
                               int numArguments,
                               @Cast("Nd4jLong**") @ByPtrPtr long[] shapeArguments,
                               int numShapeArguments,
                               int[] indexArguments,
                               int numIndexArguments,
                               @ByPtrPtr int[] intArrays,
                               int numIntArrays,
                               Pointer realArguments,
                               int numRealArguments,
                               @Cast("sd::DataType") int dtype);


public native void batchExecutor(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               int numAggregates,
                               int opNum,
                               int maxArgs,
                               int maxShapes,
                               int maxIntArrays,
                               int maxIntArraySize,
                               int maxIdx,
                               int maxReals,
                               Pointer ptrToArguments,
                               @Cast("sd::DataType") int dtype);

public native void execAggregateBatch(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                    int numAggregates,
                                    int opNum,
                                    int maxArgs,
                                    int maxShapes,
                                    int maxIntArrays,
                                    int maxIntArraySize,
                                    int maxIdx,
                                    int maxReals,
                                    Pointer ptrToArguments,
                                    @Cast("sd::DataType") int dtype);

/**
 * Random operations
 */

/**
 *
 * @param extraPointers
 * @param opNum
 * @param state
 * @param z
 * @param zShapeBuffer
 * @param extraArguments
 */
public native void execRandom(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                            int opNum,
                            @Cast("Nd4jPointer") Pointer state,
                            OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeBuffer, @Cast("const Nd4jLong*") LongPointer dZShapeBuffer,
                            Pointer extraArguments);
public native void execRandom(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                            int opNum,
                            @Cast("Nd4jPointer") Pointer state,
                            OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeBuffer, @Cast("const Nd4jLong*") LongBuffer dZShapeBuffer,
                            Pointer extraArguments);
public native void execRandom(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                            int opNum,
                            @Cast("Nd4jPointer") Pointer state,
                            OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeBuffer, @Cast("const Nd4jLong*") long[] dZShapeBuffer,
                            Pointer extraArguments);

/**
 *
 * @param extraPointers
 * @param opNum
 * @param state
 * @param x
 * @param xShapeBuffer
 * @param y
 * @param yShapeBuffer
 * @param z
 * @param zShapeBuffer
 * @param extraArguments
 */
public native void execRandom3(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             int opNum,
                             @Cast("Nd4jPointer") Pointer state,
                             OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeBuffer, @Cast("const Nd4jLong*") LongPointer dXShapeBuffer,
                             OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongPointer hYShapeBuffer, @Cast("const Nd4jLong*") LongPointer dYShapeBuffer,
                             OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeBuffer, @Cast("const Nd4jLong*") LongPointer dZShapeBuffer,
                             Pointer extraArguments);
public native void execRandom3(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             int opNum,
                             @Cast("Nd4jPointer") Pointer state,
                             OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeBuffer, @Cast("const Nd4jLong*") LongBuffer dXShapeBuffer,
                             OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") LongBuffer hYShapeBuffer, @Cast("const Nd4jLong*") LongBuffer dYShapeBuffer,
                             OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeBuffer, @Cast("const Nd4jLong*") LongBuffer dZShapeBuffer,
                             Pointer extraArguments);
public native void execRandom3(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             int opNum,
                             @Cast("Nd4jPointer") Pointer state,
                             OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeBuffer, @Cast("const Nd4jLong*") long[] dXShapeBuffer,
                             OpaqueDataBuffer dbY, @Cast("const Nd4jLong*") long[] hYShapeBuffer, @Cast("const Nd4jLong*") long[] dYShapeBuffer,
                             OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeBuffer, @Cast("const Nd4jLong*") long[] dZShapeBuffer,
                             Pointer extraArguments);

/**
 *
 * @param extraPointers
 * @param opNum
 * @param state
 * @param x
 * @param xShapeBuffer
 * @param z
 * @param zShapeBuffer
 * @param extraArguments
 */
public native void execRandom2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             int opNum,
                             @Cast("Nd4jPointer") Pointer state,
                             OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer hXShapeBuffer, @Cast("const Nd4jLong*") LongPointer dXShapeBuffer,
                             OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongPointer hZShapeBuffer, @Cast("const Nd4jLong*") LongPointer dZShapeBuffer,
                             Pointer extraArguments);
public native void execRandom2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             int opNum,
                             @Cast("Nd4jPointer") Pointer state,
                             OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer hXShapeBuffer, @Cast("const Nd4jLong*") LongBuffer dXShapeBuffer,
                             OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") LongBuffer hZShapeBuffer, @Cast("const Nd4jLong*") LongBuffer dZShapeBuffer,
                             Pointer extraArguments);
public native void execRandom2(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             int opNum,
                             @Cast("Nd4jPointer") Pointer state,
                             OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] hXShapeBuffer, @Cast("const Nd4jLong*") long[] dXShapeBuffer,
                             OpaqueDataBuffer dbZ, @Cast("const Nd4jLong*") long[] hZShapeBuffer, @Cast("const Nd4jLong*") long[] dZShapeBuffer,
                             Pointer extraArguments);


/**
 *
 * @param extraPointers
 * @param seed
 * @param bufferSize
 * @param ptrToBuffer
 * @return
 */
public native @Cast("Nd4jPointer") Pointer initRandom(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                   long seed,
                                   long bufferSize,
                                   @Cast("Nd4jPointer") Pointer ptrToBuffer);

/**
 *
 * @param extraPointers
 * @param seed
 * @param ptrRandom
 */
public native void refreshBuffer(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                               long seed,
                               @Cast("Nd4jPointer") Pointer ptrRandom);

/**
 *
 * @param extraPointers
 * @param seed
 * @param ptrRandom
 */
public native void reSeedBuffer(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                              long seed,
                              @Cast("Nd4jPointer") Pointer ptrRandom);

/**
 *
 * @param ptrRandom
 */
public native void destroyRandom(@Cast("Nd4jPointer") Pointer ptrRandom);

/**
*
* @param data
* @param shapeBuffer
* @param wordSize
* @param headerSize
* @return
*/

public native @Cast("Nd4jPointer") Pointer numpyHeaderForNd4j(@Cast("Nd4jPointer") Pointer data,@Cast("Nd4jPointer") Pointer shapeBuffer,@Cast("Nd4jLong") long wordSize,@Cast("Nd4jLong*") LongPointer headerSize);
public native @Cast("Nd4jPointer") Pointer numpyHeaderForNd4j(@Cast("Nd4jPointer") Pointer data,@Cast("Nd4jPointer") Pointer shapeBuffer,@Cast("Nd4jLong") long wordSize,@Cast("Nd4jLong*") LongBuffer headerSize);
public native @Cast("Nd4jPointer") Pointer numpyHeaderForNd4j(@Cast("Nd4jPointer") Pointer data,@Cast("Nd4jPointer") Pointer shapeBuffer,@Cast("Nd4jLong") long wordSize,@Cast("Nd4jLong*") long[] headerSize);

/**
* Load numpy from a header
* based on the cnpy parse from header method.
* @param data the header data to parse
* @return a pointer to a numpy cnpy:NpyArray struct
*/
public native @Cast("Nd4jPointer") Pointer loadNpyFromHeader(@Cast("Nd4jPointer") Pointer data);

/**
* Create a numpy array from an nd4j
* array
* @param data a pointer to the data
* @param shapeBuffer  the shapebuffer for the nd4j array
* @param wordSize  the word size (4 for float, 8 for doubles)
* @return a pointer to a numpy array
*/

public native @Cast("Nd4jPointer") Pointer numpyFromNd4j(@Cast("Nd4jPointer") Pointer data,@Cast("Nd4jPointer") Pointer shapeBuffer,@Cast("Nd4jLong") long wordSize);


/**
*
* @param npyArray
* @return
*/
public native @Cast("Nd4jPointer") Pointer shapeBufferForNumpy(@Cast("Nd4jPointer") Pointer npyArray);


/**
* Get the shape buffer from a
* numpy array.
* **Warning** this allocates memory
* @param npyArray
* @return
*/
public native @Cast("Nd4jPointer") Pointer shapeBufferForNumpyHeader(@Cast("Nd4jPointer") Pointer npyArray);



/**
*
* @param npyArray
* @return
*/
public native @Cast("Nd4jPointer") Pointer dataPointForNumpyHeader(@Cast("Nd4jPointer") Pointer npyArray);

/**
*
* @param npyArray
* @return
*/
public native @Cast("Nd4jPointer") Pointer dataPointForNumpyStruct(@Cast("Nd4jPointer") Pointer npyArrayStruct);

/**
*
* @param npyArray
* @param fromFile
* @return
*/
public native @Cast("Nd4jPointer") Pointer dataPointForNumpy(@Cast("Nd4jPointer") Pointer npyArray);

/**
* Load a numpy array from a file
* and return it as an Nd4jPointer
* @param path
* @return
*/
public native @Cast("Nd4jPointer") Pointer numpyFromFile(@StdString BytePointer path);
public native @Cast("Nd4jPointer") Pointer numpyFromFile(@StdString String path);


////// NPZ //////

public native Pointer mapFromNpzFile(@StdString BytePointer path);
public native Pointer mapFromNpzFile(@StdString String path);


public native int getNumNpyArraysInMap(Pointer map);

public native @Cast("char*") String getNpyArrayNameFromMap(Pointer map, int index,@Cast("char*") BytePointer nameBuffer);
public native @Cast("char*") BytePointer getNpyArrayNameFromMap(Pointer map, int index,@Cast("char*") String nameBuffer);

public native Pointer getNpyArrayFromMap(Pointer map, int index);

public native int dataTypeFromNpyHeader(Pointer header);

public native Pointer getNpyArrayData(Pointer npArray);

public native int getNpyArrayRank(Pointer npArray);

public native @Cast("Nd4jLong*") LongPointer getNpyArrayShape(Pointer npArray);

public native char getNpyArrayOrder(Pointer npArray);

public native int getNpyArrayElemSize(Pointer npArray);

public native void deleteNPArrayStruct(Pointer npArray);

public native void deleteNPArrayMap(Pointer map);
//////

/**
* Get the element size for a numpy array
* @param npyArray  the numpy array's address
* to get the length for
* @return
*/
public native int elementSizeForNpyArray(@Cast("Nd4jPointer") Pointer npyArray);


/**
* Get the element size for a numpy array
* @param npyArray  the numpy array's address
* to get the length for
* @return
*/
public native int elementSizeForNpyArrayHeader(@Cast("Nd4jPointer") Pointer npyArray);


public native void releaseNumpy(@Cast("Nd4jPointer") Pointer npyArray);


/**
 * Return the length of a shape buffer
 * based on the pointer
 * @param buffer  the buffer pointer to check
 * @return
 */
public native int lengthForShapeBufferPointer(@Cast("Nd4jPointer") Pointer buffer);


/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/

public native @Cast("Nd4jPointer") Pointer pointerForAddress(@Cast("Nd4jLong") long _address);

/**
 * This method takes single N-dimensional tensor, and copies its TADs to target arrays
 *
 * @param x
 * @param xShapeInfo
 * @param targets
 * @param zShapeInfo
 * @return
 */
public native void tear(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                      OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongPointer xShapeInfo, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                      @Cast("Nd4jPointer*") PointerPointer targets, @Cast("const Nd4jLong*") LongPointer zShapeInfo,
                      @Cast("const Nd4jLong*") LongPointer tadShapeInfo,
                      @Cast("const Nd4jLong*") LongPointer tadOffsets);
public native void tear(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                      OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") LongBuffer xShapeInfo, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                      @Cast("Nd4jPointer*") PointerPointer targets, @Cast("const Nd4jLong*") LongBuffer zShapeInfo,
                      @Cast("const Nd4jLong*") LongBuffer tadShapeInfo,
                      @Cast("const Nd4jLong*") LongBuffer tadOffsets);
public native void tear(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                      OpaqueDataBuffer dbX, @Cast("const Nd4jLong*") long[] xShapeInfo, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                      @Cast("Nd4jPointer*") PointerPointer targets, @Cast("const Nd4jLong*") long[] zShapeInfo,
                      @Cast("const Nd4jLong*") long[] tadShapeInfo,
                      @Cast("const Nd4jLong*") long[] tadOffsets);

public native void sort(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                      Pointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo,
                      Pointer dx, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                      @Cast("bool") boolean descending);
public native void sort(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                      Pointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                      Pointer dx, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                      @Cast("bool") boolean descending);
public native void sort(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                      Pointer x, @Cast("const Nd4jLong*") long[] xShapeInfo,
                      Pointer dx, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                      @Cast("bool") boolean descending);

public native void sortByKey(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                           Pointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo,
                           Pointer dx, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                           Pointer y, @Cast("const Nd4jLong*") LongPointer yShapeInfo,
                           Pointer dy, @Cast("const Nd4jLong*") LongPointer dyShapeInfo,
                           @Cast("bool") boolean descending);
public native void sortByKey(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                           Pointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                           Pointer dx, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                           Pointer y, @Cast("const Nd4jLong*") LongBuffer yShapeInfo,
                           Pointer dy, @Cast("const Nd4jLong*") LongBuffer dyShapeInfo,
                           @Cast("bool") boolean descending);
public native void sortByKey(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                           Pointer x, @Cast("const Nd4jLong*") long[] xShapeInfo,
                           Pointer dx, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                           Pointer y, @Cast("const Nd4jLong*") long[] yShapeInfo,
                           Pointer dy, @Cast("const Nd4jLong*") long[] dyShapeInfo,
                           @Cast("bool") boolean descending);

public native void sortByValue(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             Pointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo,
                             Pointer dx, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                             Pointer y, @Cast("const Nd4jLong*") LongPointer yShapeInfo,
                             Pointer dy, @Cast("const Nd4jLong*") LongPointer dyShapeInfo,
                             @Cast("bool") boolean descending);
public native void sortByValue(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             Pointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                             Pointer dx, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                             Pointer y, @Cast("const Nd4jLong*") LongBuffer yShapeInfo,
                             Pointer dy, @Cast("const Nd4jLong*") LongBuffer dyShapeInfo,
                             @Cast("bool") boolean descending);
public native void sortByValue(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                             Pointer x, @Cast("const Nd4jLong*") long[] xShapeInfo,
                             Pointer dx, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                             Pointer y, @Cast("const Nd4jLong*") long[] yShapeInfo,
                             Pointer dy, @Cast("const Nd4jLong*") long[] dyShapeInfo,
                             @Cast("bool") boolean descending);

public native void sortTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                         Pointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo,
                         Pointer dx, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                         IntPointer dimension,
                         int dimensionLength,
                         @Cast("const Nd4jLong*") LongPointer tadShapeInfo,
                         @Cast("const Nd4jLong*") LongPointer tadOffsets,
                         @Cast("bool") boolean descending);
public native void sortTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                         Pointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                         Pointer dx, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                         IntBuffer dimension,
                         int dimensionLength,
                         @Cast("const Nd4jLong*") LongBuffer tadShapeInfo,
                         @Cast("const Nd4jLong*") LongBuffer tadOffsets,
                         @Cast("bool") boolean descending);
public native void sortTad(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                         Pointer x, @Cast("const Nd4jLong*") long[] xShapeInfo,
                         Pointer dx, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                         int[] dimension,
                         int dimensionLength,
                         @Cast("const Nd4jLong*") long[] tadShapeInfo,
                         @Cast("const Nd4jLong*") long[] tadOffsets,
                         @Cast("bool") boolean descending);

public native void sortTadByKey(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                              Pointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo,
                              Pointer dx, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                              Pointer y, @Cast("const Nd4jLong*") LongPointer yShapeInfo,
                              Pointer dy, @Cast("const Nd4jLong*") LongPointer dyShapeInfo,
                              IntPointer dimension,
                              int dimensionLength,
                              @Cast("bool") boolean descending);
public native void sortTadByKey(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                              Pointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                              Pointer dx, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                              Pointer y, @Cast("const Nd4jLong*") LongBuffer yShapeInfo,
                              Pointer dy, @Cast("const Nd4jLong*") LongBuffer dyShapeInfo,
                              IntBuffer dimension,
                              int dimensionLength,
                              @Cast("bool") boolean descending);
public native void sortTadByKey(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                              Pointer x, @Cast("const Nd4jLong*") long[] xShapeInfo,
                              Pointer dx, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                              Pointer y, @Cast("const Nd4jLong*") long[] yShapeInfo,
                              Pointer dy, @Cast("const Nd4jLong*") long[] dyShapeInfo,
                              int[] dimension,
                              int dimensionLength,
                              @Cast("bool") boolean descending);

public native void sortTadByValue(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                Pointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo,
                                Pointer dx, @Cast("const Nd4jLong*") LongPointer dxShapeInfo,
                                Pointer y, @Cast("const Nd4jLong*") LongPointer yShapeInfo,
                                Pointer dy, @Cast("const Nd4jLong*") LongPointer dyShapeInfo,
                                IntPointer dimension,
                                int dimensionLength,
                                @Cast("bool") boolean descending);
public native void sortTadByValue(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                Pointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo,
                                Pointer dx, @Cast("const Nd4jLong*") LongBuffer dxShapeInfo,
                                Pointer y, @Cast("const Nd4jLong*") LongBuffer yShapeInfo,
                                Pointer dy, @Cast("const Nd4jLong*") LongBuffer dyShapeInfo,
                                IntBuffer dimension,
                                int dimensionLength,
                                @Cast("bool") boolean descending);
public native void sortTadByValue(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                Pointer x, @Cast("const Nd4jLong*") long[] xShapeInfo,
                                Pointer dx, @Cast("const Nd4jLong*") long[] dxShapeInfo,
                                Pointer y, @Cast("const Nd4jLong*") long[] yShapeInfo,
                                Pointer dy, @Cast("const Nd4jLong*") long[] dyShapeInfo,
                                int[] dimension,
                                int dimensionLength,
                                @Cast("bool") boolean descending);


// special sort impl for sorting out COO indices and values
public native void sortCooIndices(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                @Cast("Nd4jLong*") LongPointer indices,
                                Pointer x,
                                @Cast("Nd4jLong") long length,
                                @Cast("const Nd4jLong*") LongPointer xShapeInfo);
public native void sortCooIndices(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                @Cast("Nd4jLong*") LongBuffer indices,
                                Pointer x,
                                @Cast("Nd4jLong") long length,
                                @Cast("const Nd4jLong*") LongBuffer xShapeInfo);
public native void sortCooIndices(@Cast("Nd4jPointer*") PointerPointer extraPointers,
                                @Cast("Nd4jLong*") long[] indices,
                                Pointer x,
                                @Cast("Nd4jLong") long length,
                                @Cast("const Nd4jLong*") long[] xShapeInfo);

/**
 *
 * @param extraPointers     not used
 * @param indices           DataBuffer containing COO indices for a sparse matrix that is to be raveled/flattened
 * @param flatIndices       DataBuffer where the raveled/flattened indices are to be written to
 * @param length            number of non-zero entries (length of flatIndices)
 * @param fullShapeBuffer   DataBuffer with ShapeInfo for the full matrix to be flattened
 * @param mode              clipMode determines the strategy to use if some of the the passed COO indices does
 *                          not fit into the shape determined by fullShapeBuffer
 *                              0   throw an exception (default)
 *                              1   wrap around shape
 *                              2   clip to shape
 */
public native void ravelMultiIndex(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") LongPointer indices, @Cast("Nd4jLong*") LongPointer flatIndices, @Cast("Nd4jLong") long length,  @Cast("Nd4jLong*") LongPointer shapeInfo, int mode);
public native void ravelMultiIndex(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") LongBuffer indices, @Cast("Nd4jLong*") LongBuffer flatIndices, @Cast("Nd4jLong") long length,  @Cast("Nd4jLong*") LongBuffer shapeInfo, int mode);
public native void ravelMultiIndex(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") long[] indices, @Cast("Nd4jLong*") long[] flatIndices, @Cast("Nd4jLong") long length,  @Cast("Nd4jLong*") long[] shapeInfo, int mode);

/**
 *
 * @param extraPointers     not used
 * @param indices           DataBuffer where the unraveled COO indices are to be written
 * @param flatIndices       DataBuffer containing the raveled/flattened indices to be unravel
 * @param length            number of non-zero entries (length of flatIndices)
 * @param fullShapeBuffer   DataBuffer with ShapeInfo for the full matrix to be unraveled
 */
public native void unravelIndex(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") LongPointer indices, @Cast("Nd4jLong*") LongPointer flatIndices, @Cast("Nd4jLong") long length,  @Cast("Nd4jLong*") LongPointer shapeInfo);
public native void unravelIndex(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") LongBuffer indices, @Cast("Nd4jLong*") LongBuffer flatIndices, @Cast("Nd4jLong") long length,  @Cast("Nd4jLong*") LongBuffer shapeInfo);
public native void unravelIndex(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") long[] indices, @Cast("Nd4jLong*") long[] flatIndices, @Cast("Nd4jLong") long length,  @Cast("Nd4jLong*") long[] shapeInfo);

public native @Cast("Nd4jLong*") LongPointer mmapFile(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("char*") String fileName, @Cast("Nd4jLong") long length);
public native @Cast("Nd4jLong*") LongBuffer mmapFile(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("char*") BytePointer fileName, @Cast("Nd4jLong") long length);

public native void munmapFile(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") LongPointer ptrMap, @Cast("Nd4jLong") long length);
public native void munmapFile(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") LongBuffer ptrMap, @Cast("Nd4jLong") long length);
public native void munmapFile(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong*") long[] ptrMap, @Cast("Nd4jLong") long length);

// flatbuffers execution
public native OpaqueResultWrapper executeFlatGraph(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer flatBufferPointer);

public native @Cast("Nd4jLong") long getResultWrapperSize(OpaqueResultWrapper ptr);
public native @Cast("Nd4jPointer") Pointer getResultWrapperPointer(OpaqueResultWrapper ptr);

public native @Cast("char*") String getAllCustomOps();

public native @Cast("char*") String getAllOperations();

// customOp executioner
public native int execCustomOp(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs, DoublePointer tArgs, int numTArgs, @Cast("Nd4jLong*") LongPointer iArgs, int numIArgs, @Cast("bool*") BooleanPointer bArgs, int numBArgs, @Cast("bool") boolean isInplace);
public native int execCustomOp(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs, DoubleBuffer tArgs, int numTArgs, @Cast("Nd4jLong*") LongBuffer iArgs, int numIArgs, @Cast("bool*") boolean[] bArgs, int numBArgs, @Cast("bool") boolean isInplace);
public native int execCustomOp(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs, double[] tArgs, int numTArgs, @Cast("Nd4jLong*") long[] iArgs, int numIArgs, @Cast("bool*") BooleanPointer bArgs, int numBArgs, @Cast("bool") boolean isInplace);
public native int execCustomOp(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs, DoublePointer tArgs, int numTArgs, @Cast("Nd4jLong*") LongPointer iArgs, int numIArgs, @Cast("bool*") boolean[] bArgs, int numBArgs, @Cast("bool") boolean isInplace);
public native int execCustomOp(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs, DoubleBuffer tArgs, int numTArgs, @Cast("Nd4jLong*") LongBuffer iArgs, int numIArgs, @Cast("bool*") BooleanPointer bArgs, int numBArgs, @Cast("bool") boolean isInplace);
public native int execCustomOp(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs, double[] tArgs, int numTArgs, @Cast("Nd4jLong*") long[] iArgs, int numIArgs, @Cast("bool*") boolean[] bArgs, int numBArgs, @Cast("bool") boolean isInplace);
public native int execCustomOp2(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer") Pointer opContext);

public native OpaqueShapeList calculateOutputShapes(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, DoublePointer tArgs, int numTArgs, @Cast("Nd4jLong*") LongPointer iArgs, int numIArgs);
public native OpaqueShapeList calculateOutputShapes(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, DoubleBuffer tArgs, int numTArgs, @Cast("Nd4jLong*") LongBuffer iArgs, int numIArgs);
public native OpaqueShapeList calculateOutputShapes(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, double[] tArgs, int numTArgs, @Cast("Nd4jLong*") long[] iArgs, int numIArgs);
public native OpaqueShapeList calculateOutputShapes2(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, DoublePointer tArgs, int numTArgs, @Cast("Nd4jLong*") LongPointer iArgs, int numIArgs, @Cast("bool*") BooleanPointer bArgs, int numBArgs, IntPointer dArgs, int numDArgs);
public native OpaqueShapeList calculateOutputShapes2(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, DoubleBuffer tArgs, int numTArgs, @Cast("Nd4jLong*") LongBuffer iArgs, int numIArgs, @Cast("bool*") boolean[] bArgs, int numBArgs, IntBuffer dArgs, int numDArgs);
public native OpaqueShapeList calculateOutputShapes2(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, double[] tArgs, int numTArgs, @Cast("Nd4jLong*") long[] iArgs, int numIArgs, @Cast("bool*") BooleanPointer bArgs, int numBArgs, int[] dArgs, int numDArgs);
public native OpaqueShapeList calculateOutputShapes2(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, DoublePointer tArgs, int numTArgs, @Cast("Nd4jLong*") LongPointer iArgs, int numIArgs, @Cast("bool*") boolean[] bArgs, int numBArgs, IntPointer dArgs, int numDArgs);
public native OpaqueShapeList calculateOutputShapes2(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, DoubleBuffer tArgs, int numTArgs, @Cast("Nd4jLong*") LongBuffer iArgs, int numIArgs, @Cast("bool*") BooleanPointer bArgs, int numBArgs, IntBuffer dArgs, int numDArgs);
public native OpaqueShapeList calculateOutputShapes2(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long hash, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputShapes, double[] tArgs, int numTArgs, @Cast("Nd4jLong*") long[] iArgs, int numIArgs, @Cast("bool*") boolean[] bArgs, int numBArgs, int[] dArgs, int numDArgs);

public native @Cast("Nd4jLong") long getShapeListSize(OpaqueShapeList list);
public native @Cast("const Nd4jLong*") LongPointer getShape(OpaqueShapeList list, @Cast("Nd4jLong") long i);

public native void deleteShapeList(@Cast("Nd4jPointer") Pointer shapeList);

public native int registerGraph(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long graphId, @Cast("Nd4jPointer") Pointer flatBufferPointer);

public native OpaqueVariablesSet executeStoredGraph(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long graphId, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, IntPointer inputIndices, int numInputs);
public native OpaqueVariablesSet executeStoredGraph(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long graphId, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, IntBuffer inputIndices, int numInputs);
public native OpaqueVariablesSet executeStoredGraph(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long graphId, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int[] inputIndices, int numInputs);

public native @Cast("Nd4jLong") long getVariablesSetSize(OpaqueVariablesSet set);
public native @Cast("Nd4jStatus") int getVariablesSetStatus(OpaqueVariablesSet set);
public native OpaqueVariable getVariable(OpaqueVariablesSet set, @Cast("Nd4jLong") long i);
public native int getVariableId(OpaqueVariable variable);
public native int getVariableIndex(OpaqueVariable variable);
public native @Cast("char*") String getVariableName(OpaqueVariable variable);
public native @Cast("const Nd4jLong*") LongPointer getVariableShape(OpaqueVariable variable);
public native Pointer getVariableBuffer(OpaqueVariable variable);

public native int unregisterGraph(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jLong") long graphId);

public native void deleteCharArray(@Cast("Nd4jPointer") Pointer pointer);
public native void deleteIntArray(@Cast("Nd4jPointer") Pointer pointer);
public native void deleteLongArray(@Cast("Nd4jPointer") Pointer pointer);
public native void deletePointerArray(@Cast("Nd4jPointer") Pointer pointer);

public native void deleteVariablesSet(OpaqueVariablesSet pointer);

// GraphState creation
public native @Cast("Nd4jPointer") Pointer getGraphState(@Cast("Nd4jLong") long id);

public native void deleteGraphState(@Cast("Nd4jPointer") Pointer state);

public native void deleteResultWrapper(@Cast("Nd4jPointer") Pointer ptr);

public native int estimateThreshold(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer x, @Cast("const Nd4jLong*") LongPointer xShapeInfo, int N, float threshold);
public native int estimateThreshold(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer x, @Cast("const Nd4jLong*") LongBuffer xShapeInfo, int N, float threshold);
public native int estimateThreshold(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer x, @Cast("const Nd4jLong*") long[] xShapeInfo, int N, float threshold);

// this method executes op that requires scope to be present: if/while/cond/whatever
public native @Cast("Nd4jStatus") int execCustomOpWithScope(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer state, @Cast("Nd4jLong") long opHash, @Cast("Nd4jLong*") LongPointer scopes, int numScopes, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs);
public native @Cast("Nd4jStatus") int execCustomOpWithScope(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer state, @Cast("Nd4jLong") long opHash, @Cast("Nd4jLong*") LongBuffer scopes, int numScopes, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs);
public native @Cast("Nd4jStatus") int execCustomOpWithScope(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer state, @Cast("Nd4jLong") long opHash, @Cast("Nd4jLong*") long[] scopes, int numScopes, @Cast("Nd4jPointer*") PointerPointer inputBuffers, @Cast("Nd4jPointer*") PointerPointer inputShapes, int numInputs, @Cast("Nd4jPointer*") PointerPointer outputBuffers, @Cast("Nd4jPointer*") PointerPointer outputShapes, int numOutputs);

//void fillUtf8String(Nd4jPointer *extraPointers, const char **string, int numStrings, Nd4jPointer buffer);
public native @Cast("Nd4jPointer") Pointer createUtf8String(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("char*") String string, int length);
public native @Cast("Nd4jPointer") Pointer createUtf8String(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("char*") BytePointer string, int length);
public native @Cast("Nd4jLong") long getUtf8StringLength(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer ptr);
public native @Cast("char*") BytePointer getUtf8StringBuffer(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer ptr);
public native void deleteUtf8String(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer ptr);

public native void scatterUpdate(@Cast("Nd4jPointer*") PointerPointer extraPointers, int opCode, int numOfSubArrs,
                               Pointer hX, @Cast("const Nd4jLong*") LongPointer hXShapeInfo, @Cast("const Nd4jLong*") LongPointer hXOffsets,
                               Pointer dX, @Cast("const Nd4jLong*") LongPointer dXShapeInfo, @Cast("const Nd4jLong*") LongPointer dXOffsets,
                               Pointer hY, @Cast("const Nd4jLong*") LongPointer hYShapeInfo, @Cast("const Nd4jLong*") LongPointer hYOffsets,
                               Pointer dY, @Cast("const Nd4jLong*") LongPointer dYShapeInfo, @Cast("const Nd4jLong*") LongPointer dYOffsets,
                               Pointer hIindexes, @Cast("const Nd4jLong*") LongPointer hIndicesShapeInfo, Pointer dIindexes, @Cast("const Nd4jLong*") LongPointer dIndicesShapeInfo);
public native void scatterUpdate(@Cast("Nd4jPointer*") PointerPointer extraPointers, int opCode, int numOfSubArrs,
                               Pointer hX, @Cast("const Nd4jLong*") LongBuffer hXShapeInfo, @Cast("const Nd4jLong*") LongBuffer hXOffsets,
                               Pointer dX, @Cast("const Nd4jLong*") LongBuffer dXShapeInfo, @Cast("const Nd4jLong*") LongBuffer dXOffsets,
                               Pointer hY, @Cast("const Nd4jLong*") LongBuffer hYShapeInfo, @Cast("const Nd4jLong*") LongBuffer hYOffsets,
                               Pointer dY, @Cast("const Nd4jLong*") LongBuffer dYShapeInfo, @Cast("const Nd4jLong*") LongBuffer dYOffsets,
                               Pointer hIindexes, @Cast("const Nd4jLong*") LongBuffer hIndicesShapeInfo, Pointer dIindexes, @Cast("const Nd4jLong*") LongBuffer dIndicesShapeInfo);
public native void scatterUpdate(@Cast("Nd4jPointer*") PointerPointer extraPointers, int opCode, int numOfSubArrs,
                               Pointer hX, @Cast("const Nd4jLong*") long[] hXShapeInfo, @Cast("const Nd4jLong*") long[] hXOffsets,
                               Pointer dX, @Cast("const Nd4jLong*") long[] dXShapeInfo, @Cast("const Nd4jLong*") long[] dXOffsets,
                               Pointer hY, @Cast("const Nd4jLong*") long[] hYShapeInfo, @Cast("const Nd4jLong*") long[] hYOffsets,
                               Pointer dY, @Cast("const Nd4jLong*") long[] dYShapeInfo, @Cast("const Nd4jLong*") long[] dYOffsets,
                               Pointer hIindexes, @Cast("const Nd4jLong*") long[] hIndicesShapeInfo, Pointer dIindexes, @Cast("const Nd4jLong*") long[] dIndicesShapeInfo);

public native void inspectArray(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer buffer, @Cast("Nd4jLong*") LongPointer shapeInfo, @Cast("Nd4jPointer") Pointer specialBuffer, @Cast("Nd4jLong*") LongPointer specialShapeInfo, @Cast("Nd4jPointer") Pointer debugInfo);
public native void inspectArray(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer buffer, @Cast("Nd4jLong*") LongBuffer shapeInfo, @Cast("Nd4jPointer") Pointer specialBuffer, @Cast("Nd4jLong*") LongBuffer specialShapeInfo, @Cast("Nd4jPointer") Pointer debugInfo);
public native void inspectArray(@Cast("Nd4jPointer*") PointerPointer extraPointers, @Cast("Nd4jPointer") Pointer buffer, @Cast("Nd4jLong*") long[] shapeInfo, @Cast("Nd4jPointer") Pointer specialBuffer, @Cast("Nd4jLong*") long[] specialShapeInfo, @Cast("Nd4jPointer") Pointer debugInfo);

public native OpaqueConstantShapeBuffer shapeBuffer(int rank, @Cast("Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer strides, @Cast("sd::DataType") int dtype, char order, @Cast("Nd4jLong") long ews, @Cast("bool") boolean empty);
public native OpaqueConstantShapeBuffer shapeBuffer(int rank, @Cast("Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer strides, @Cast("sd::DataType") int dtype, char order, @Cast("Nd4jLong") long ews, @Cast("bool") boolean empty);
public native OpaqueConstantShapeBuffer shapeBuffer(int rank, @Cast("Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] strides, @Cast("sd::DataType") int dtype, char order, @Cast("Nd4jLong") long ews, @Cast("bool") boolean empty);
public native OpaqueConstantShapeBuffer shapeBufferEx(int rank, @Cast("Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer strides, @Cast("sd::DataType") int dtype, char order, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras);
public native OpaqueConstantShapeBuffer shapeBufferEx(int rank, @Cast("Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer strides, @Cast("sd::DataType") int dtype, char order, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras);
public native OpaqueConstantShapeBuffer shapeBufferEx(int rank, @Cast("Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] strides, @Cast("sd::DataType") int dtype, char order, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras);

public native OpaqueConstantDataBuffer constantBufferLong(@Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongPointer data, int length);
public native OpaqueConstantDataBuffer constantBufferLong(@Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongBuffer data, int length);
public native OpaqueConstantDataBuffer constantBufferLong(@Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") long[] data, int length);
public native OpaqueConstantDataBuffer constantBufferDouble(@Cast("sd::DataType") int dtype, DoublePointer data, int length);
public native OpaqueConstantDataBuffer constantBufferDouble(@Cast("sd::DataType") int dtype, DoubleBuffer data, int length);
public native OpaqueConstantDataBuffer constantBufferDouble(@Cast("sd::DataType") int dtype, double[] data, int length);
public native OpaqueConstantDataBuffer constantBuffer(@Cast("sd::DataType") int dtype, ConstantDescriptor descriptor);

public native @Cast("Nd4jPointer") Pointer getConstantDataBufferPrimary(OpaqueConstantDataBuffer dbf);
public native @Cast("Nd4jPointer") Pointer getConstantDataBufferSpecial(OpaqueConstantDataBuffer dbf);
public native @Cast("Nd4jLong") long getConstantDataBufferLength(OpaqueConstantDataBuffer dbf);

public native @Cast("Nd4jPointer") Pointer getConstantShapeBufferPrimary(OpaqueConstantShapeBuffer dbf);
public native @Cast("Nd4jPointer") Pointer getConstantShapeBufferSpecial(OpaqueConstantShapeBuffer dbf);

public native void deleteConstantShapeBuffer(OpaqueConstantShapeBuffer ptr);
public native void deleteConstantDataBuffer(OpaqueConstantDataBuffer ptr);

public native OpaqueContext createGraphContext(int nodeId);
public native OpaqueRandomGenerator getGraphContextRandomGenerator(OpaqueContext ptr);
public native void ctxAllowHelpers(OpaqueContext ptr, @Cast("bool") boolean reallyAllow);
public native void ctxShapeFunctionOverride(OpaqueContext ptr, @Cast("bool") boolean reallyOverride);
public native void ctxSetExecutionMode(OpaqueContext ptr, int execMode);
public native void ctxPurge(OpaqueContext ptr);
public native void markGraphContextInplace(OpaqueContext ptr, @Cast("bool") boolean reallyInplace);
public native void setGraphContextCudaContext(OpaqueContext ptr, Pointer stream, Pointer reductionPointer, Pointer allocationPointer);
public native void setGraphContextInputArray(OpaqueContext ptr, int index, Pointer buffer, Pointer shapeInfo, Pointer specialBuffer, Pointer specialShapeInfo);
public native void setGraphContextOutputArray(OpaqueContext ptr, int index, Pointer buffer, Pointer shapeInfo, Pointer specialBuffer, Pointer specialShapeInfo);
public native void setGraphContextInputBuffer(OpaqueContext ptr, int index, OpaqueDataBuffer buffer, Pointer shapeInfo, Pointer specialShapeInfo);
public native void setGraphContextOutputBuffer(OpaqueContext ptr, int index, OpaqueDataBuffer buffer, Pointer shapeInfo, Pointer specialShapeInfo);
public native void setGraphContextDArguments(OpaqueContext ptr, IntPointer arguments, int numberOfArguments);
public native void setGraphContextDArguments(OpaqueContext ptr, IntBuffer arguments, int numberOfArguments);
public native void setGraphContextDArguments(OpaqueContext ptr, int[] arguments, int numberOfArguments);
public native void setGraphContextTArguments(OpaqueContext ptr, DoublePointer arguments, int numberOfArguments);
public native void setGraphContextTArguments(OpaqueContext ptr, DoubleBuffer arguments, int numberOfArguments);
public native void setGraphContextTArguments(OpaqueContext ptr, double[] arguments, int numberOfArguments);
public native void setGraphContextIArguments(OpaqueContext ptr, @Cast("Nd4jLong*") LongPointer arguments, int numberOfArguments);
public native void setGraphContextIArguments(OpaqueContext ptr, @Cast("Nd4jLong*") LongBuffer arguments, int numberOfArguments);
public native void setGraphContextIArguments(OpaqueContext ptr, @Cast("Nd4jLong*") long[] arguments, int numberOfArguments);
public native void setGraphContextBArguments(OpaqueContext ptr, @Cast("bool*") BooleanPointer arguments, int numberOfArguments);
public native void setGraphContextBArguments(OpaqueContext ptr, @Cast("bool*") boolean[] arguments, int numberOfArguments);
public native void deleteGraphContext(OpaqueContext ptr);

public native OpaqueRandomGenerator createRandomGenerator(@Cast("Nd4jLong") long rootSeed/*=0*/, @Cast("Nd4jLong") long nodeSeed/*=0*/);
public native OpaqueRandomGenerator createRandomGenerator();
public native @Cast("Nd4jLong") long getRandomGeneratorRootState(OpaqueRandomGenerator ptr);
public native @Cast("Nd4jLong") long getRandomGeneratorNodeState(OpaqueRandomGenerator ptr);
public native void setRandomGeneratorStates(OpaqueRandomGenerator ptr, @Cast("Nd4jLong") long rootSeed/*=0*/, @Cast("Nd4jLong") long nodeSeed/*=0*/);
public native void setRandomGeneratorStates(OpaqueRandomGenerator ptr);
public native float getRandomGeneratorRelativeFloat(OpaqueRandomGenerator ptr, @Cast("Nd4jLong") long index);
public native double getRandomGeneratorRelativeDouble(OpaqueRandomGenerator ptr, @Cast("Nd4jLong") long index);
public native int getRandomGeneratorRelativeInt(OpaqueRandomGenerator ptr, @Cast("Nd4jLong") long index);
public native @Cast("Nd4jLong") long getRandomGeneratorRelativeLong(OpaqueRandomGenerator ptr, @Cast("Nd4jLong") long index);
public native void deleteRandomGenerator(OpaqueRandomGenerator ptr);

public native @Cast("char*") String runLightBenchmarkSuit(@Cast("bool") boolean printOut);
public native @Cast("char*") String runFullBenchmarkSuit(@Cast("bool") boolean printOut);

public native OpaqueLaunchContext defaultLaunchContext();
public native @Cast("Nd4jPointer") Pointer lcScalarPointer(OpaqueLaunchContext lc);
public native @Cast("Nd4jPointer") Pointer lcReductionPointer(OpaqueLaunchContext lc);
public native @Cast("Nd4jPointer") Pointer lcAllocationPointer(OpaqueLaunchContext lc);
public native @Cast("Nd4jPointer") Pointer lcExecutionStream(OpaqueLaunchContext lc);
public native @Cast("Nd4jPointer") Pointer lcCopyStream(OpaqueLaunchContext lc);
public native @Cast("Nd4jPointer") Pointer lcBlasHandle(OpaqueLaunchContext lc);
public native @Cast("Nd4jPointer") Pointer lcSolverHandle(OpaqueLaunchContext lc);

public native OpaqueDataBuffer allocateDataBuffer(@Cast("Nd4jLong") long elements, int dataType, @Cast("bool") boolean allocateBoth);
public native OpaqueDataBuffer dbAllocateDataBuffer(@Cast("Nd4jLong") long elements, int dataType, @Cast("bool") boolean allocateBoth);
public native OpaqueDataBuffer dbCreateExternalDataBuffer(@Cast("Nd4jLong") long elements, int dataType, @Cast("Nd4jPointer") Pointer primary, @Cast("Nd4jPointer") Pointer special);
public native OpaqueDataBuffer dbCreateView(OpaqueDataBuffer dataBuffer, @Cast("Nd4jLong") long length, @Cast("Nd4jLong") long offset);
public native @Cast("Nd4jPointer") Pointer dbPrimaryBuffer(OpaqueDataBuffer dataBuffer);
public native @Cast("Nd4jPointer") Pointer dbSpecialBuffer(OpaqueDataBuffer dataBuffer);
public native void dbExpandBuffer(OpaqueDataBuffer dataBuffer, @Cast("Nd4jLong") long elements);
public native void dbAllocatePrimaryBuffer(OpaqueDataBuffer dataBuffer);
public native void dbAllocateSpecialBuffer(OpaqueDataBuffer dataBuffer);
public native void dbSetPrimaryBuffer(OpaqueDataBuffer dataBuffer, @Cast("Nd4jPointer") Pointer primaryBuffer, @Cast("Nd4jLong") long numBytes);
public native void dbSetSpecialBuffer(OpaqueDataBuffer dataBuffer, @Cast("Nd4jPointer") Pointer specialBuffer, @Cast("Nd4jLong") long numBytes);
public native void dbSyncToSpecial(OpaqueDataBuffer dataBuffer);
public native void dbSyncToPrimary(OpaqueDataBuffer dataBuffer);
public native int dbLocality(OpaqueDataBuffer dataBuffer);
public native int dbDeviceId(OpaqueDataBuffer dataBuffer);
public native void dbSetDeviceId(OpaqueDataBuffer dataBuffer, int deviceId);
public native void dbTickHostRead(OpaqueDataBuffer dataBuffer);
public native void dbTickHostWrite(OpaqueDataBuffer dataBuffer);
public native void dbTickDeviceRead(OpaqueDataBuffer dataBuffer);
public native void dbTickDeviceWrite(OpaqueDataBuffer dataBuffer);
public native void dbClose(OpaqueDataBuffer dataBuffer);
public native void deleteDataBuffer(OpaqueDataBuffer dataBuffer);
public native void dbExpand(OpaqueDataBuffer dataBuffer, @Cast("Nd4jLong") long elements);


public native int binaryLevel();
public native int optimalLevel();

public native @Cast("bool") boolean isMinimalRequirementsMet();
public native @Cast("bool") boolean isOptimalRequirementsMet();

// #endif //NATIVEOPERATIONS_NATIVEOPS_H


// Parsed from build_info.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

// #ifndef LIBND4J_BUILD_INFO_H
// #define LIBND4J_BUILD_INFO_H

// #ifdef  _WIN32
// #define ND4J_EXPORT   __declspec( dllexport )
// #else
// #define ND4J_EXPORT
// #endif

// #define STRINGIFY(x) #x
// #define TOSTRING(x) STRINGIFY(x)

// #ifdef __cplusplus
// #endif

public native @Cast("char*") String buildInfo();

// #ifdef __cplusplus
// #endif

// #endif


// Parsed from memory/ExternalWorkspace.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_EXTERNALWORKSPACE_H
// #define LIBND4J_EXTERNALWORKSPACE_H

// #include 
// #include 
        @Namespace("sd::memory") @NoOffset public static class ExternalWorkspace extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public ExternalWorkspace(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public ExternalWorkspace(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public ExternalWorkspace position(long position) {
                return (ExternalWorkspace)super.position(position);
            }
            @Override public ExternalWorkspace getPointer(long i) {
                return new ExternalWorkspace((Pointer)this).position(position + i);
            }
        
            public ExternalWorkspace() { super((Pointer)null); allocate(); }
            private native void allocate();

            public ExternalWorkspace(@Cast("Nd4jPointer") Pointer ptrH, @Cast("Nd4jLong") long sizeH, @Cast("Nd4jPointer") Pointer ptrD, @Cast("Nd4jLong") long sizeD) { super((Pointer)null); allocate(ptrH, sizeH, ptrD, sizeD); }
            private native void allocate(@Cast("Nd4jPointer") Pointer ptrH, @Cast("Nd4jLong") long sizeH, @Cast("Nd4jPointer") Pointer ptrD, @Cast("Nd4jLong") long sizeD);
            
            public native Pointer pointerHost();
            public native Pointer pointerDevice();

            public native @Cast("Nd4jLong") long sizeHost();
            public native @Cast("Nd4jLong") long sizeDevice();
        }
    


// #endif

// Parsed from memory/Workspace.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// This class implements Workspace functionality in c++
//
//
// @author [email protected]
//

// #ifndef LIBND4J_WORKSPACE_H
// #define LIBND4J_WORKSPACE_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

        @Namespace("sd::memory") @NoOffset public static class Workspace extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Workspace(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Workspace(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Workspace position(long position) {
                return (Workspace)super.position(position);
            }
            @Override public Workspace getPointer(long i) {
                return new Workspace((Pointer)this).position(position + i);
            }
        
            public Workspace(ExternalWorkspace external) { super((Pointer)null); allocate(external); }
            private native void allocate(ExternalWorkspace external);
            public Workspace(@Cast("Nd4jLong") long initialSize/*=0L*/, @Cast("Nd4jLong") long secondaryBytes/*=0L*/) { super((Pointer)null); allocate(initialSize, secondaryBytes); }
            private native void allocate(@Cast("Nd4jLong") long initialSize/*=0L*/, @Cast("Nd4jLong") long secondaryBytes/*=0L*/);
            public Workspace() { super((Pointer)null); allocate(); }
            private native void allocate();

            public native @Cast("Nd4jLong") long getAllocatedSize();
            public native @Cast("Nd4jLong") long getCurrentSize();
            public native @Cast("Nd4jLong") long getCurrentOffset();
            public native @Cast("Nd4jLong") long getSpilledSize();
            public native @Cast("Nd4jLong") long getUsedSize();

            public native @Cast("Nd4jLong") long getAllocatedSecondarySize();
            public native @Cast("Nd4jLong") long getCurrentSecondarySize();
            public native @Cast("Nd4jLong") long getCurrentSecondaryOffset();
            public native @Cast("Nd4jLong") long getSpilledSecondarySize();
            public native @Cast("Nd4jLong") long getUsedSecondarySize();

            public native void expandBy(@Cast("Nd4jLong") long primaryBytes, @Cast("Nd4jLong") long secondaryBytes/*=0L*/);
            public native void expandBy(@Cast("Nd4jLong") long primaryBytes);
            public native void expandTo(@Cast("Nd4jLong") long primaryBytes, @Cast("Nd4jLong") long secondaryBytes/*=0L*/);
            public native void expandTo(@Cast("Nd4jLong") long primaryBytes);

//            bool resizeSupported();

            public native Pointer allocateBytes(@Cast("Nd4jLong") long numBytes);
            public native Pointer allocateBytes(@Cast("sd::memory::MemoryType") int type, @Cast("Nd4jLong") long numBytes);

            public native void scopeIn();
            public native void scopeOut();

            /*
             * This method creates NEW workspace of the same memory size and returns pointer to it
             */
            public native Workspace clone();
        }
    


// #endif //LIBND4J_WORKSPACE_H


// Parsed from indexing/NDIndex.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_NDINDEX_H
// #define LIBND4J_NDINDEX_H

// #include 
// #include 
// #include 
    @Namespace("sd") @NoOffset public static class NDIndex extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public NDIndex(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public NDIndex(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public NDIndex position(long position) {
            return (NDIndex)super.position(position);
        }
        @Override public NDIndex getPointer(long i) {
            return new NDIndex((Pointer)this).position(position + i);
        }
    
        public NDIndex() { super((Pointer)null); allocate(); }
        private native void allocate();

        public native @Cast("bool") boolean isAll();
        public native @Cast("bool") boolean isPoint();
        public native @Cast("bool") boolean isInterval();

        public native @Cast("Nd4jLong*") @StdVector LongPointer getIndices();
        public native @Cast("Nd4jLong") long stride();

        public native NDIndex all();
        public native NDIndex point(@Cast("Nd4jLong") long pt);
        public native NDIndex interval(@Cast("Nd4jLong") long start, @Cast("Nd4jLong") long end, @Cast("Nd4jLong") long stride/*=1*/);
        public native NDIndex interval(@Cast("Nd4jLong") long start, @Cast("Nd4jLong") long end);
    }

    @Namespace("sd") public static class NDIndexAll extends NDIndex {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public NDIndexAll(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public NDIndexAll(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public NDIndexAll position(long position) {
            return (NDIndexAll)super.position(position);
        }
        @Override public NDIndexAll getPointer(long i) {
            return new NDIndexAll((Pointer)this).position(position + i);
        }
    
        public NDIndexAll() { super((Pointer)null); allocate(); }
        private native void allocate();
        public native @Cast("bool") boolean isInterval();
    }


    @Namespace("sd") public static class NDIndexPoint extends NDIndex {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public NDIndexPoint(Pointer p) { super(p); }
    
        public NDIndexPoint(@Cast("Nd4jLong") long point) { super((Pointer)null); allocate(point); }
        private native void allocate(@Cast("Nd4jLong") long point);
        public native @Cast("bool") boolean isInterval();
    }

    @Namespace("sd") public static class NDIndexInterval extends NDIndex {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public NDIndexInterval(Pointer p) { super(p); }
    
        public NDIndexInterval(@Cast("Nd4jLong") long start, @Cast("Nd4jLong") long end, @Cast("Nd4jLong") long stride/*=1*/) { super((Pointer)null); allocate(start, end, stride); }
        private native void allocate(@Cast("Nd4jLong") long start, @Cast("Nd4jLong") long end, @Cast("Nd4jLong") long stride/*=1*/);
        public NDIndexInterval(@Cast("Nd4jLong") long start, @Cast("Nd4jLong") long end) { super((Pointer)null); allocate(start, end); }
        private native void allocate(@Cast("Nd4jLong") long start, @Cast("Nd4jLong") long end);
        public native @Cast("bool") boolean isInterval();
    }




// #endif //LIBND4J_NDINDEX_H


// Parsed from indexing/IndicesList.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_INDICESLIST_H
// #define LIBND4J_INDICESLIST_H

// #include 
// #include "NDIndex.h"
    @Namespace("sd") @NoOffset public static class IndicesList extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public IndicesList(Pointer p) { super(p); }
    

        public native int size();
        public native NDIndex at(int idx);
        public native void push_back(NDIndex idx);
        public native @Cast("bool") boolean isScalar();
    }

// #endif //LIBND4J_INDICESLIST_H


// Parsed from graph/VariableType.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef ND4J_VARIABLE_TYPE_H
// #define ND4J_VARIABLE_TYPE_H
        /** enum sd::graph::VariableType */
        public static final int
            NDARRAY = 0,
            ARRAY_LIST = 1,
            FLOW = 2,
            CONSTANT = 3,
            PLACEHOLDER = 4;
    


// #endif

// Parsed from graph/ArgumentsList.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 24.01.18.
//

// #ifndef LIBND4J_INPUTLIST_H
// #define LIBND4J_INPUTLIST_H

// #include 
// #include 
// #include 
// #include 
// #include 
    @Namespace("sd::graph") @NoOffset public static class ArgumentsList extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public ArgumentsList(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public ArgumentsList(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public ArgumentsList position(long position) {
            return (ArgumentsList)super.position(position);
        }
        @Override public ArgumentsList getPointer(long i) {
            return new ArgumentsList((Pointer)this).position(position + i);
        }
    
        public ArgumentsList() { super((Pointer)null); allocate(); }
        private native void allocate();

        /**
         * This method returns number of argument pairs available
         *
         * @return
         */
        public native int size();

        /**
         * This method returns Pair at specified index
         *
         * @param index
         * @return
         */
        public native @ByRef Pair at(int index);
    }



// #endif //LIBND4J_INPUTLIST_H


// Parsed from types/pair.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 24.01.18.
//

// #ifndef LIBND4J_PAIR_H
// #define LIBND4J_PAIR_H

// #include 
    @Namespace("sd") @NoOffset public static class Pair extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public Pair(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public Pair(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public Pair position(long position) {
            return (Pair)super.position(position);
        }
        @Override public Pair getPointer(long i) {
            return new Pair((Pointer)this).position(position + i);
        }
    
        public Pair(int first/*=0*/, int second/*=0*/) { super((Pointer)null); allocate(first, second); }
        private native void allocate(int first/*=0*/, int second/*=0*/);
        public Pair() { super((Pointer)null); allocate(); }
        private native void allocate();

        public native int first();
        public native int second();
    }



// #endif //LIBND4J_PAIR_H


// Parsed from array/NDArray.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

// #ifndef NDARRAY_H
// #define NDARRAY_H

// #include 
// #include 
// #include 
// #include 
// #include "legacy/NativeOpExecutioner.h"
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 




    @Namespace("sd") public native @ByVal NDArray mmul(@Const @ByRef NDArray arg0, @Const @ByRef NDArray arg1);

    @Namespace("sd") @NoOffset public static class NDArray extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public NDArray(Pointer p) { super(p); }
    
        public NDArray() { super((Pointer)null); allocate(); }
        private native void allocate();

        /**
        *  do not allocate memory, memory for array is passed from outside
        */
// #ifndef __JAVACPP_HACK__

// #endif

        /**
        *  do not allocate memory, memory for array is passed from outside
        */
        public NDArray(Pointer buffer, @Cast("Nd4jLong*") LongPointer shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/) { super((Pointer)null); allocate(buffer, shapeInfo, context, isBuffAlloc); }
        private native void allocate(Pointer buffer, @Cast("Nd4jLong*") LongPointer shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/);
        public NDArray(Pointer buffer, @Cast("Nd4jLong*") LongPointer shapeInfo) { super((Pointer)null); allocate(buffer, shapeInfo); }
        private native void allocate(Pointer buffer, @Cast("Nd4jLong*") LongPointer shapeInfo);
        public NDArray(Pointer buffer, @Cast("Nd4jLong*") LongBuffer shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/) { super((Pointer)null); allocate(buffer, shapeInfo, context, isBuffAlloc); }
        private native void allocate(Pointer buffer, @Cast("Nd4jLong*") LongBuffer shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/);
        public NDArray(Pointer buffer, @Cast("Nd4jLong*") LongBuffer shapeInfo) { super((Pointer)null); allocate(buffer, shapeInfo); }
        private native void allocate(Pointer buffer, @Cast("Nd4jLong*") LongBuffer shapeInfo);
        public NDArray(Pointer buffer, @Cast("Nd4jLong*") long[] shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/) { super((Pointer)null); allocate(buffer, shapeInfo, context, isBuffAlloc); }
        private native void allocate(Pointer buffer, @Cast("Nd4jLong*") long[] shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/);
        public NDArray(Pointer buffer, @Cast("Nd4jLong*") long[] shapeInfo) { super((Pointer)null); allocate(buffer, shapeInfo); }
        private native void allocate(Pointer buffer, @Cast("Nd4jLong*") long[] shapeInfo);

        /**
        *  do not allocate memory, memory for array is passed from outside
        *  we suppose the content of both (device and host) buffers is identical
        */
        public NDArray(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") LongPointer shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/, @Cast("bool") boolean isBuffDAlloc/*=false*/) { super((Pointer)null); allocate(buffer, bufferD, shapeInfo, context, isBuffAlloc, isBuffDAlloc); }
        private native void allocate(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") LongPointer shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/, @Cast("bool") boolean isBuffDAlloc/*=false*/);
        public NDArray(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") LongPointer shapeInfo) { super((Pointer)null); allocate(buffer, bufferD, shapeInfo); }
        private native void allocate(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") LongPointer shapeInfo);
        public NDArray(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") LongBuffer shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/, @Cast("bool") boolean isBuffDAlloc/*=false*/) { super((Pointer)null); allocate(buffer, bufferD, shapeInfo, context, isBuffAlloc, isBuffDAlloc); }
        private native void allocate(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") LongBuffer shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/, @Cast("bool") boolean isBuffDAlloc/*=false*/);
        public NDArray(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") LongBuffer shapeInfo) { super((Pointer)null); allocate(buffer, bufferD, shapeInfo); }
        private native void allocate(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") LongBuffer shapeInfo);
        public NDArray(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") long[] shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/, @Cast("bool") boolean isBuffDAlloc/*=false*/) { super((Pointer)null); allocate(buffer, bufferD, shapeInfo, context, isBuffAlloc, isBuffDAlloc); }
        private native void allocate(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") long[] shapeInfo, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isBuffAlloc/*=false*/, @Cast("bool") boolean isBuffDAlloc/*=false*/);
        public NDArray(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") long[] shapeInfo) { super((Pointer)null); allocate(buffer, bufferD, shapeInfo); }
        private native void allocate(Pointer buffer, Pointer bufferD, @Cast("const Nd4jLong*") long[] shapeInfo);

        /**
        *  copy constructor
        */
        public NDArray(@Const @ByRef NDArray other) { super((Pointer)null); allocate(other); }
        private native void allocate(@Const @ByRef NDArray other);

        /**
        *  move constructor
        */

        /**
        *  constructor, create array stored at given workspace
        */
        public NDArray(LaunchContext context) { super((Pointer)null); allocate(context); }
        private native void allocate(LaunchContext context);


        /**
		*  constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently
        */
		public NDArray(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/) { super((Pointer)null); allocate(shapeInfo, copyStrides, context, nullify); }
		private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/);
		public NDArray(@Cast("const Nd4jLong*") LongPointer shapeInfo) { super((Pointer)null); allocate(shapeInfo); }
		private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo);
		public NDArray(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/) { super((Pointer)null); allocate(shapeInfo, copyStrides, context, nullify); }
		private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/);
		public NDArray(@Cast("const Nd4jLong*") LongBuffer shapeInfo) { super((Pointer)null); allocate(shapeInfo); }
		private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
		public NDArray(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/) { super((Pointer)null); allocate(shapeInfo, copyStrides, context, nullify); }
		private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/);
		public NDArray(@Cast("const Nd4jLong*") long[] shapeInfo) { super((Pointer)null); allocate(shapeInfo); }
		private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo);

        /**
        *  constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently
        *  set dtype as array type
        */
        public NDArray(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("sd::DataType") int dtype, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/) { super((Pointer)null); allocate(shapeInfo, dtype, copyStrides, context, nullify); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("sd::DataType") int dtype, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/);
        public NDArray(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("sd::DataType") int dtype) { super((Pointer)null); allocate(shapeInfo, dtype); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("sd::DataType") int dtype);
        public NDArray(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("sd::DataType") int dtype, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/) { super((Pointer)null); allocate(shapeInfo, dtype, copyStrides, context, nullify); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("sd::DataType") int dtype, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/);
        public NDArray(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("sd::DataType") int dtype) { super((Pointer)null); allocate(shapeInfo, dtype); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("sd::DataType") int dtype);
        public NDArray(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("sd::DataType") int dtype, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/) { super((Pointer)null); allocate(shapeInfo, dtype, copyStrides, context, nullify); }
        private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("sd::DataType") int dtype, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean nullify/*=true*/);
        public NDArray(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("sd::DataType") int dtype) { super((Pointer)null); allocate(shapeInfo, dtype); }
        private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("sd::DataType") int dtype);

        /**
        *  this constructor creates new array using shape information contained in vector argument
        */
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/) { super((Pointer)null); allocate(order, shape, dtype, context); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector LongPointer shape) { super((Pointer)null); allocate(order, shape); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector LongPointer shape);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/) { super((Pointer)null); allocate(order, shape, dtype, context); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape) { super((Pointer)null); allocate(order, shape); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/) { super((Pointer)null); allocate(order, shape, dtype, context); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector long[] shape) { super((Pointer)null); allocate(order, shape); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector long[] shape);

        /**
        * This constructor creates new array with elements copied from data and using shape information stored in shape, elements from data will be casted to dtype
        */
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @StdVector DoublePointer data, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/) { super((Pointer)null); allocate(order, shape, data, dtype, context); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @StdVector DoublePointer data, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @StdVector DoublePointer data) { super((Pointer)null); allocate(order, shape, data); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @StdVector DoublePointer data);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @StdVector DoubleBuffer data, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/) { super((Pointer)null); allocate(order, shape, data, dtype, context); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @StdVector DoubleBuffer data, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @StdVector DoubleBuffer data) { super((Pointer)null); allocate(order, shape, data); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @StdVector DoubleBuffer data);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector long[] shape, @StdVector double[] data, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/) { super((Pointer)null); allocate(order, shape, data, dtype, context); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector long[] shape, @StdVector double[] data, @Cast("sd::DataType") int dtype/*=sd::DOUBLE*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/);
        public NDArray(char order, @Cast("Nd4jLong*") @StdVector long[] shape, @StdVector double[] data) { super((Pointer)null); allocate(order, shape, data); }
        private native void allocate(char order, @Cast("Nd4jLong*") @StdVector long[] shape, @StdVector double[] data);

        /**
        *  this constructor creates new array using given buffer (without memory allocation) and shape information stored in shape
        */
        public NDArray(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector LongPointer shape,  @Cast("sd::DataType") int dtype, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("const bool") boolean isBuffAlloc/*=false*/) { super((Pointer)null); allocate(buffer, order, shape, dtype, context, isBuffAlloc); }
        private native void allocate(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector LongPointer shape,  @Cast("sd::DataType") int dtype, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("const bool") boolean isBuffAlloc/*=false*/);
        public NDArray(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector LongPointer shape,  @Cast("sd::DataType") int dtype) { super((Pointer)null); allocate(buffer, order, shape, dtype); }
        private native void allocate(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector LongPointer shape,  @Cast("sd::DataType") int dtype);
        public NDArray(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape,  @Cast("sd::DataType") int dtype, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("const bool") boolean isBuffAlloc/*=false*/) { super((Pointer)null); allocate(buffer, order, shape, dtype, context, isBuffAlloc); }
        private native void allocate(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape,  @Cast("sd::DataType") int dtype, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("const bool") boolean isBuffAlloc/*=false*/);
        public NDArray(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape,  @Cast("sd::DataType") int dtype) { super((Pointer)null); allocate(buffer, order, shape, dtype); }
        private native void allocate(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector LongBuffer shape,  @Cast("sd::DataType") int dtype);
        public NDArray(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector long[] shape,  @Cast("sd::DataType") int dtype, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("const bool") boolean isBuffAlloc/*=false*/) { super((Pointer)null); allocate(buffer, order, shape, dtype, context, isBuffAlloc); }
        private native void allocate(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector long[] shape,  @Cast("sd::DataType") int dtype, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("const bool") boolean isBuffAlloc/*=false*/);
        public NDArray(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector long[] shape,  @Cast("sd::DataType") int dtype) { super((Pointer)null); allocate(buffer, order, shape, dtype); }
        private native void allocate(Pointer buffer, char order, @Cast("Nd4jLong*") @StdVector long[] shape,  @Cast("sd::DataType") int dtype);

        /**
        * This method returns new array with the same shape & data type
        * @return
        */
        public native @ByVal NDArray like();

        /**
         * This method returns new uninitialized array with the same shape & data type
         * @return
         */
        public native @ByVal NDArray ulike();


        /**
        *  this constructor creates new NDArray with shape matching "other" array,
        *  doesn't copy "other" elements into new array !!!
        */
        public NDArray(@Const NDArray other, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/) { super((Pointer)null); allocate(other, copyStrides, context); }
        private native void allocate(@Const NDArray other, @Cast("bool") boolean copyStrides/*=false*/, LaunchContext context/*=sd::LaunchContext::defaultContext()*/);

        /**
        *  this constructor creates scalar(and set its value = 0) or empty array depending on bool argument isScalar
        */
        public NDArray(@Cast("sd::DataType") int dtype, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isScalar/*=true*/) { super((Pointer)null); allocate(dtype, context, isScalar); }
        private native void allocate(@Cast("sd::DataType") int dtype, LaunchContext context/*=sd::LaunchContext::defaultContext()*/, @Cast("bool") boolean isScalar/*=true*/);
        public NDArray(@Cast("sd::DataType") int dtype) { super((Pointer)null); allocate(dtype); }
        private native void allocate(@Cast("sd::DataType") int dtype);

        /**
         * This method blocks until asynchronous operation finishes
         */
        public native void synchronize(@Cast("char*") String msg);
        public native void synchronize(@Cast("char*") BytePointer msg);

        /**
         * This method allows to set _isAttached flag
         * @param reallyAttached
         */
        public native void setAttached(@Cast("bool") boolean reallyAttached);

        public native void tickWriteHost();
        public native void tickWriteDevice();
        public native void tickReadHost();
        public native void tickReadDevice();
        public native void tickBothActual();
        public native @Cast("bool") boolean isActualOnHostSide();
        public native @Cast("bool") boolean isActualOnDeviceSide();
        public native void makeBothBuffersActual();

        public native void syncToHost();
        public native void syncToDevice();
        public native void syncShape();

        /**
         * This method can be used on architectures that use special buffers
         * @param writeList
         * @param readList
         */
        public native void registerSpecialUse(@Const @ByRef ConstNDArrayVector writeList, @Const @ByRef(nullValue = "std::vector({})") ConstNDArrayVector readList);
        public native void registerSpecialUse(@Const @ByRef ConstNDArrayVector writeList);
        public native void prepareSpecialUse(@Const @ByRef ConstNDArrayVector writeList, @Const @ByRef(nullValue = "std::vector({})") ConstNDArrayVector readList, @Cast("bool") boolean synchronizeWritables/*=false*/);
        public native void prepareSpecialUse(@Const @ByRef ConstNDArrayVector writeList);

        public native void registerPrimaryUse(@Const @ByRef ConstNDArrayVector writeList, @Const @ByRef(nullValue = "std::vector({})") ConstNDArrayVector readList);
        public native void registerPrimaryUse(@Const @ByRef ConstNDArrayVector writeList);
        public native void preparePrimaryUse(@Const @ByRef ConstNDArrayVector writeList, @Const @ByRef(nullValue = "std::vector({})") ConstNDArrayVector readList, @Cast("bool") boolean synchronizeWritables/*=false*/);
        public native void preparePrimaryUse(@Const @ByRef ConstNDArrayVector writeList);

        /**
         * This method returns buffer pointer offset by given number of elements, wrt own data type
         * @param offset
         * @return
         */
        public native Pointer bufferWithOffset(@Cast("Nd4jLong") long offset);
        public native Pointer specialBufferWithOffset(@Cast("Nd4jLong") long offset);
        /**
        *  copy assignment operator
        *  in particular, when _dataType != other._dataType and both shapes are the same, there will be allocation of new _buffer and _dataType acquires other._dataType
        */
        public native @ByRef @Name("operator =") NDArray put(@Const @ByRef NDArray other);

        /**
        *  move assignment operator
        */

        /**
        *  assignment operator, assigns the same scalar to all array elements
        */


        /**
        *   operators for memory allocation and deletion
        */
        public native @Name("operator new") Pointer _new(@Cast("size_t") long i);
        public native @Name("operator delete") void _delete(Pointer p);


        public native void setContext(LaunchContext context);

        /**
        *  create a new array by replicating current array by repeats times along given dimension
        *  axis - axis along which to repeat elements
        *  repeats - number of repetitions
        */
        public native @ByVal NDArray repeat(int axis, @StdVector IntPointer repeats);
        public native @ByVal NDArray repeat(int axis, @StdVector IntBuffer repeats);
        public native @ByVal NDArray repeat(int axis, @StdVector int[] repeats);

        /**
         * This method fills this array with zeros
         */
        public native void nullify();

        /**
         * This method returns quantized copy of given array
         *
         * @param array
         * @return
         */
        public native @ByVal NDArray quantize(@Const @ByRef NDArray array);

        /**
        *  fill target array by repeating current array
        *  axis - axis along which to repeat elements
        *  repeats - vector containing numbers of repetition for elements at given axis
        */
        public native void repeat(int axis, @StdVector IntPointer repeats, @ByRef NDArray target);
        public native void repeat(int axis, @StdVector IntBuffer repeats, @ByRef NDArray target);
        public native void repeat(int axis, @StdVector int[] repeats, @ByRef NDArray target);

        /**
        *  creates array which points on certain sub-range of this array, sub-range is defined by given indices
        */
        
        
        

        /**
        *  cast array elements to given dtype
        */
        public native @ByVal NDArray cast(@Cast("sd::DataType") int dtype);

        public native void cast(@ByRef NDArray target, @Cast("sd::DataType") int dtype);

        /**
        *   returns _context
        */
        public native LaunchContext getContext();

// #ifndef __JAVACPP_HACK__
// #endif

        /**
        *   returns host buffer
        */
        public native Pointer buffer();


        /**
        *   returns buffer offset (offset is the same for host and device buffers)
        */
        public native @Cast("Nd4jLong") long bufferOffset();

        /**
         *  checks if array has padded buffer
         */
        public native @Cast("bool") boolean hasPaddedBuffer();

        /**
        *  if _bufferD==nullptr return _buffer, else return _bufferD
        */
        public native Pointer specialBuffer();

        /**
        *   returns device buffer if compilation is for cuda case, otherwise returns host buffer
        */
        public native Pointer platformBuffer();

        /**
        *   returns _shapeInfo
        */
        public native @Cast("const Nd4jLong*") LongPointer shapeInfo();


        /**
         * Returns True if it's legally empty NDArray, or false otherwise
         * @return
         */
        public native @Cast("bool") boolean isEmpty();

        /**
        *  if _shapeInfoD==nullptr return _shapeInfo, else return _shapeInfoD
        */
        public native @Cast("const Nd4jLong*") LongPointer specialShapeInfo();

        public native @Cast("const Nd4jLong*") LongPointer platformShapeInfo();

        /**
        *  permutes (in-place) the dimensions in array according to "dimensions" array
        */
        public native @Cast("bool") boolean permutei(@StdVector IntPointer dimensions);
        public native @Cast("bool") boolean permutei(@StdVector IntBuffer dimensions);
        public native @Cast("bool") boolean permutei(@StdVector int[] dimensions);
        public native @Cast("bool") boolean permutei(@Const IntPointer dimensions, int rank);
        public native @Cast("bool") boolean permutei(@Const IntBuffer dimensions, int rank);
        public native @Cast("bool") boolean permutei(@Const int[] dimensions, int rank);
        public native @Cast("bool") boolean permutei(@Cast("Nd4jLong*") @StdVector LongPointer dimensions);
        public native @Cast("bool") boolean permutei(@Cast("Nd4jLong*") @StdVector LongBuffer dimensions);
        public native @Cast("bool") boolean permutei(@Cast("Nd4jLong*") @StdVector long[] dimensions);
        public native @Cast("bool") boolean permutei(@Cast("const Nd4jLong*") LongPointer dimensions, int rank);
        public native @Cast("bool") boolean permutei(@Cast("const Nd4jLong*") LongBuffer dimensions, int rank);
        public native @Cast("bool") boolean permutei(@Cast("const Nd4jLong*") long[] dimensions, int rank);

        public native @Cast("bool") boolean isFinite();
        public native @Cast("bool") boolean hasNaNs();
        public native @Cast("bool") boolean hasInfs();

        public native void copyBuffersContinuouslyFrom(@Const @ByRef NDArray other, @Cast("size_t") long sizeToCopyInBytes/*=0*/, @Cast("Nd4jLong") long offsetThis/*=0*/, @Cast("Nd4jLong") long offsetOther/*=0*/);
        public native void copyBuffersContinuouslyFrom(@Const @ByRef NDArray other);

        /**
        *  permutes the dimensions in array according to "dimensions" array, new array points on _buffer of this array
        */
        public native @ByVal NDArray permute(@StdVector IntPointer dimensions);
        public native @ByVal NDArray permute(@StdVector IntBuffer dimensions);
        public native @ByVal NDArray permute(@StdVector int[] dimensions);
        public native @ByVal NDArray permute(@Const IntPointer dimensions, int rank);
        public native @ByVal NDArray permute(@Const IntBuffer dimensions, int rank);
        public native @ByVal NDArray permute(@Const int[] dimensions, int rank);
        
        
        

        public native void permute(@Const IntPointer dimensions, int rank, @ByRef NDArray target);
        public native void permute(@Const IntBuffer dimensions, int rank, @ByRef NDArray target);
        public native void permute(@Const int[] dimensions, int rank, @ByRef NDArray target);
        public native void permute(@StdVector IntPointer dimensions, @ByRef NDArray target);
        public native void permute(@StdVector IntBuffer dimensions, @ByRef NDArray target);
        public native void permute(@StdVector int[] dimensions, @ByRef NDArray target);
        public native @ByVal NDArray permute(@Cast("Nd4jLong*") @StdVector LongPointer dimensions);
        public native @ByVal NDArray permute(@Cast("Nd4jLong*") @StdVector LongBuffer dimensions);
        public native @ByVal NDArray permute(@Cast("Nd4jLong*") @StdVector long[] dimensions);
        public native @ByVal NDArray permute(@Cast("const Nd4jLong*") LongPointer dimensions, int rank);
        public native @ByVal NDArray permute(@Cast("const Nd4jLong*") LongBuffer dimensions, int rank);
        public native @ByVal NDArray permute(@Cast("const Nd4jLong*") long[] dimensions, int rank);
        
        
        

        public native void permute(@Cast("const Nd4jLong*") LongPointer dimensions, int rank, @ByRef NDArray target);
        public native void permute(@Cast("const Nd4jLong*") LongBuffer dimensions, int rank, @ByRef NDArray target);
        public native void permute(@Cast("const Nd4jLong*") long[] dimensions, int rank, @ByRef NDArray target);
        public native void permute(@Cast("Nd4jLong*") @StdVector LongPointer dimensions, @ByRef NDArray target);
        public native void permute(@Cast("Nd4jLong*") @StdVector LongBuffer dimensions, @ByRef NDArray target);
        public native void permute(@Cast("Nd4jLong*") @StdVector long[] dimensions, @ByRef NDArray target);

        /**
         * This method streamlines given view or permuted array, and reallocates buffer
         */
        public native void streamline(char order/*='a'*/);
        public native void streamline();

        /**
        *  prints information about array shape
        *  msg - message to print out
        */
        public native void printShapeInfo(@Cast("char*") String msg/*=nullptr*/);
        public native void printShapeInfo();
        public native void printShapeInfo(@Cast("char*") BytePointer msg/*=nullptr*/);

        /**
        *  prints buffer elements
        *  msg - message to print out
        *  limit - number of array elements to print out
        *  sync - if true check whether host buffer is actual, if it is not then make it so
        */
        public native void printBuffer(@Cast("char*") String msg/*=nullptr*/, @Cast("Nd4jLong") long _limit/*=-1*/, @Cast("const bool") boolean sync/*=true*/);
        public native void printBuffer();
        public native void printBuffer(@Cast("char*") BytePointer msg/*=nullptr*/, @Cast("Nd4jLong") long _limit/*=-1*/, @Cast("const bool") boolean sync/*=true*/);

        /**
        * print element by element consequently in a way they (elements) are stored in physical memory
        */
        public native void printLinearBuffer();

        /**
        *  prints _buffer (if host = true) or _bufferD (if host = false) as it is, that is in current state without checking buffer status
        */

        /**
        *  prints buffer elements, takes into account offset between elements (element-wise-stride)
        *  msg - message to print out
        *  limit - number of array elements to print out
        */
        public native void printIndexedBuffer(@Cast("char*") String msg/*=nullptr*/, @Cast("Nd4jLong") long _limit/*=-1*/);
        public native void printIndexedBuffer();
        public native void printIndexedBuffer(@Cast("char*") BytePointer msg/*=nullptr*/, @Cast("Nd4jLong") long _limit/*=-1*/);

        public native @StdString BytePointer asIndexedString(@Cast("Nd4jLong") long _limit/*=-1*/);
        public native @StdString BytePointer asIndexedString();
        public native @StdString BytePointer asString(@Cast("Nd4jLong") long _limit/*=-1*/);
        public native @StdString BytePointer asString();

        /**
        *  this method assigns values of given array to this one
        */
        public native void assign(@Const NDArray other, @Cast("bool") boolean allowParallelism/*=true*/);
        public native void assign(@Const NDArray other);

        /**
        *  this method assigns values of given array to this one
        */

        /**
        *  this method assigns given value to all elements in array
        */

        /**
        *  returns new copy of this array, optionally in different order
        */
        public native @ByVal NDArray dup(byte newOrder/*='a'*/);
        public native @ByVal NDArray dup();

        /**
        *  returns sum of all elements of array
        */
        public native @ByVal NDArray sumNumber();

        /**
        *  returns mean number of array
        */
        public native @ByVal NDArray meanNumber();

// #ifndef __JAVACPP_HACK__

// #endif

        /**
        *   apply transpose operation to the copy of this array, that is this array remains unaffected
        */
        public native @ByVal NDArray transpose();
        

        /**
        *  perform transpose operation and store result in target, this array remains unaffected
        *  target - where to store result
        */
        public native void transpose(@ByRef NDArray target);

        /**
        *  apply in-place transpose operation to this array, so this array becomes transposed
        */
        public native void transposei();

        /**
        *  returns the number of arrays pointing on specified dimension(s)
        *  dimensions - array of dimensions to point on
        */
        public native @Cast("Nd4jLong") long tensorsAlongDimension(@StdVector IntPointer dimensions);
        public native @Cast("Nd4jLong") long tensorsAlongDimension(@StdVector IntBuffer dimensions);
        public native @Cast("Nd4jLong") long tensorsAlongDimension(@StdVector int[] dimensions);

        /**
        *  returns true if elements of two arrays are equal to within given epsilon value
        *  other - input array to compare
        *  eps - epsilon, this value defines the precision of elements comparison
        */
        public native @Cast("bool") boolean equalsTo(@Const NDArray other, double eps/*=1e-5*/);
        public native @Cast("bool") boolean equalsTo(@Const NDArray other);

        /**
        *  add given row vector to all rows of this array
        *  row - row vector to add
        */
        public native void addiRowVector(@Const @ByRef NDArray row);

        /**
        *  add given row vector to all rows of this array, store result in target
        *  row - row vector to add
        *  target - where to store result
        */
        public native void addRowVector(@Const @ByRef NDArray row, @ByRef NDArray target);

        /**
        *  subtract given row vector from all rows of this array, store result in target
        *  row - row vector to subtract
        *  target - where to store result
        */
        public native void subRowVector(@Const @ByRef NDArray row, @ByRef NDArray target);

        /**
        *  multiply all rows of this array on given row vector, store result in target
        *  row - row vector to multiply on
        *  target - where to store result
        */
        public native void mulRowVector(@Const @ByRef NDArray row, @ByRef NDArray target);

        /**
        *  divide all rows of this array on given row vector, store result in target
        *  row - row vector to divide on
        *  target - where to store result
        */
        public native void divRowVector(@Const @ByRef NDArray row, @ByRef NDArray target);

        /**
        *  add given column vector to all columns of this array, store result in target
        *  column - column vector to add
        *  target - where to store result
        */
        public native void addColumnVector(@Const @ByRef NDArray column, @ByRef NDArray target);

        /**
        *  add given column vector to all columns of this array, this array becomes affected (in-place operation)
        *  column - column vector to add
        */
		public native void addiColumnVector(@Const @ByRef NDArray column);

        /**
        *  multiply all columns of this array on given column vector, this array becomes affected (in-place operation)
        *  column - column vector to multiply on
        */
		public native void muliColumnVector(@Const @ByRef NDArray column);

        /**
        *  returns number of bytes used by _buffer & _shapeInfo
        */
        public native @Cast("Nd4jLong") long memoryFootprint();

        /**
        *  these methods suited for FlatBuffers use
        */
        public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeAsVector();
        public native @StdVector IntPointer getShapeAsVectorInt();
        public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeInfoAsVector();
        public native @Cast("int64_t*") @StdVector LongPointer getShapeInfoAsFlatVector();
        public native @Cast("int64_t*") @StdVector LongPointer getShapeAsFlatVector();

        /**
        *  set new order and shape in case of suitable array length (in-place operation)
        *  order - order to set
        *  shape - shape to set
        *  copyToNewBuff - if true then old buffer will be copied to new buffer if last one will be allocated after reshaping
        *  if there was permute applied before or there are weird strides, then new buffer is allocated for array
        */
		public native @Cast("bool") boolean reshapei(byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @Cast("bool") boolean reshapei(byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape);
		public native @Cast("bool") boolean reshapei(byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @Cast("bool") boolean reshapei(byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape);
		public native @Cast("bool") boolean reshapei(byte order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @Cast("bool") boolean reshapei(byte order, @Cast("Nd4jLong*") @StdVector long[] shape);
		public native @Cast("bool") boolean reshapei(@Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @Cast("bool") boolean reshapei(@Cast("Nd4jLong*") @StdVector LongPointer shape);
		public native @Cast("bool") boolean reshapei(@Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @Cast("bool") boolean reshapei(@Cast("Nd4jLong*") @StdVector LongBuffer shape);
		public native @Cast("bool") boolean reshapei(@Cast("Nd4jLong*") @StdVector long[] shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @Cast("bool") boolean reshapei(@Cast("Nd4jLong*") @StdVector long[] shape);

        /**
        *  creates new array with corresponding order and shape, new array will point on _buffer of this array
        *  order - order to set
        *  shape - shape to set
        *
        * if permute have been applied before or there are weird strides, then new buffer is allocated for new array
        */
		public native @ByVal NDArray reshape(byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @ByVal NDArray reshape(byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape);
		public native @ByVal NDArray reshape(byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @ByVal NDArray reshape(byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape);
		public native @ByVal NDArray reshape(byte order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("const bool") boolean copyToNewBuff/*=true*/);
		public native @ByVal NDArray reshape(byte order, @Cast("Nd4jLong*") @StdVector long[] shape);
        

        /**
        *  calculate strides and set given order
        *  order - order to set
        */
		public native void updateStrides(byte order);

        /**
        *  change an array by repeating it the number of times given by reps (in-place operation)
        *  repeats - contains numbers of repetitions
        */
		public native void tilei(@Cast("Nd4jLong*") @StdVector LongPointer repeats);
		public native void tilei(@Cast("Nd4jLong*") @StdVector LongBuffer repeats);
		public native void tilei(@Cast("Nd4jLong*") @StdVector long[] repeats);

        /**
        *  returns new array which is created by repeating of this array the number of times given by reps
        *  repeats - contains numbers of repetitions
        */
		public native @ByVal NDArray tile(@Cast("Nd4jLong*") @StdVector LongPointer repeats);
		public native @ByVal NDArray tile(@Cast("Nd4jLong*") @StdVector LongBuffer repeats);
		public native @ByVal NDArray tile(@Cast("Nd4jLong*") @StdVector long[] repeats);

        /**
        *  change an array by repeating it the number of times given by reps (in-place operation)
        *  repeats - contains numbers of repetitions
        *  target - where to store result
        */
        public native void tile(@Cast("Nd4jLong*") @StdVector LongPointer repeats, @ByRef NDArray target);
        public native void tile(@Cast("Nd4jLong*") @StdVector LongBuffer repeats, @ByRef NDArray target);
        public native void tile(@Cast("Nd4jLong*") @StdVector long[] repeats, @ByRef NDArray target);

        /**
        *  change an array by repeating it the number of times to acquire the new shape which is the same as target shape
        *  target - where to store result
        */
        public native void tile(@ByRef NDArray target);

        /**
        *  check whether array is identity matrix
        */
		public native @Cast("bool") boolean isIdentityMatrix();

        /**
        *  check whether array is unitary matrix
        */
		public native @Cast("bool") boolean isUnitary();

        /**
        *  operator returns subarray with buffer pointing at this->_buffer with offset defined by given intervals
        *  idx - intervals of indexes which define the subarrays to point on, idx has form {dim0Start,dim0End,  dim1Start,dim1End, ....} and length (2 * this->rankOf())
        *        when (dimStart == dimEnd) then whole range will be used for current dimension
        *  keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b}
        *  isStrided - if true then idx has length (3 * this->rankOf()) and contains additional stride numbers which correspond to stride between dimStart and dimEnd,
        *              so structure of idx is like {dim0Start,dim0End,dim0Stride,    dim1Start,dim1End,dim1Stride, ....}
        */
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("Nd4jLong*") @StdVector LongPointer idx, @Cast("const bool") boolean keepUnitiesInShape/*=false*/, @Cast("const bool") boolean isStrided/*=false*/);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("Nd4jLong*") @StdVector LongPointer idx);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("Nd4jLong*") @StdVector LongBuffer idx, @Cast("const bool") boolean keepUnitiesInShape/*=false*/, @Cast("const bool") boolean isStrided/*=false*/);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("Nd4jLong*") @StdVector LongBuffer idx);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("Nd4jLong*") @StdVector long[] idx, @Cast("const bool") boolean keepUnitiesInShape/*=false*/, @Cast("const bool") boolean isStrided/*=false*/);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("Nd4jLong*") @StdVector long[] idx);

        /**
        *  evaluates subarray with buffer pointing at this->_buffer and offset defined by given sequential index subArrIdx and dimensions in dimsToExclude
        *  subArrIdx - index of current sub-array
        *  dimsToExclude - MUST BE SORTED, dimensions to evaluate sub-array along, i.e. when shape is [2,3,4,5] and dimsToExclude={0,2}, then there will be 8 sub-arrays with shape [3,5], and subArrIdx must be in range [0,7]
        *                  if dimsToExclude is empty then idxRanges containing all zeros (means whole array) will be returned.
        *  keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b}
        */
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("const Nd4jLong") long subArrIdx, @StdVector IntPointer dimsToExclude, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("const Nd4jLong") long subArrIdx, @StdVector IntPointer dimsToExclude);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("const Nd4jLong") long subArrIdx, @StdVector IntBuffer dimsToExclude, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("const Nd4jLong") long subArrIdx, @StdVector IntBuffer dimsToExclude);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("const Nd4jLong") long subArrIdx, @StdVector int[] dimsToExclude, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
        public native @ByVal @Name("operator ()") NDArray apply(@Cast("const Nd4jLong") long subArrIdx, @StdVector int[] dimsToExclude);

        /**
        * processes whole set of sub-arrays
        * evaluates shapeInfo of sub-arrays (all sub-arrays have the same shapeInfo) and their buffer offsets (each sub-array has its own unique offset from original this-buffer)
        * dimsToExclude - MUST BE SORTED, dimensions to evaluate sub-array along, i.e. when shape is [2,3,4,5] and dimsToExclude={0,2}, then there will be 8 sub-arrays with shape [3,5]
        *                 if dimsToExclude.size() = array rank it means sub-array is whole array and copy of original_shapeInfo will be returned and one zero offset
        * subArrShapeInfo    - output argument, contains shapeInfo common for all sub-arrays
        * subArrOffsets      - output argument, contains successive sub-arrays offsets from original this-buffer
        * keepUnitiesInShape - if false then eliminate unities from sub-array shapeInfo, for example {1,a,1,b} -> {a,b}
        */
        public native void getSubArrShapeAndOffsets(@StdVector IntPointer dimsToExclude, @Cast("Nd4jLong*&") @ByPtrRef LongPointer subArrShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef LongPointer subArrOffsets, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
        public native void getSubArrShapeAndOffsets(@StdVector IntPointer dimsToExclude, @Cast("Nd4jLong*&") @ByPtrRef LongPointer subArrShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef LongPointer subArrOffsets);
        public native void getSubArrShapeAndOffsets(@StdVector IntBuffer dimsToExclude, @Cast("Nd4jLong*&") @ByPtrRef LongBuffer subArrShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef LongBuffer subArrOffsets, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
        public native void getSubArrShapeAndOffsets(@StdVector IntBuffer dimsToExclude, @Cast("Nd4jLong*&") @ByPtrRef LongBuffer subArrShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef LongBuffer subArrOffsets);
        public native void getSubArrShapeAndOffsets(@StdVector int[] dimsToExclude, @Cast("Nd4jLong*&") @ByPtrRef long[] subArrShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef long[] subArrOffsets, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
        public native void getSubArrShapeAndOffsets(@StdVector int[] dimsToExclude, @Cast("Nd4jLong*&") @ByPtrRef long[] subArrShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef long[] subArrOffsets);

        /**
        *  addition unary operator array += other
        *  other - input array to add
        */
        public native @Name("operator +=") void addPut(@Const @ByRef NDArray other);

        /**
        *  subtraction unary operator array -= other
        *  other - input array to add
        */
        public native @Name("operator -=") void subtractPut(@Const @ByRef NDArray other);

        /**
        *  negative operator, it changes sign of all array elements on opposite
        */
        public native @ByVal @Name("operator -") NDArray subtract();
        

        /**
        *  pairwise multiplication unary operator array *= other
        *  other - input array to multiply on
        */
        public native @Name("operator *=") void multiplyPut(@Const @ByRef NDArray other);

        /**
        *  multiplication unary operator array *= scalar
        *  scalar - input scalar to multiply on
        */

        /**
        *  pairwise division unary operator: array /= other
        *  other - input array to divide on
        */
        public native @Name("operator /=") void dividePut(@Const @ByRef NDArray other);

        /**
        *  division unary operator: array /= scalar
        *  scalar - input scalar to divide on
        */

        /**
        *  friend function which implements mathematical multiplication of two arrays
        *  left - input array
        *  right - input array
        */
        

        /**
        *  return vector containing _buffer as flat binary array
        */
        public native @StdVector BytePointer asByteVector();

        /**
        *  makes array to be identity matrix (not necessarily square), that is set all diagonal elements = 1, rest = 0
        */
        public native void setIdentity();

        /**
        *  swaps the contents of tow arrays,
        *  PLEASE NOTE: method doesn't take into account the shapes of arrays, shapes may be different except one condition: arrays lengths must be the same
        */
        public native void swapUnsafe(@ByRef NDArray other);

        /**
        *  return vector with buffer which points on corresponding diagonal elements of array
        *  type - means of vector to be returned: column ('c') or row ('r')
        */
        public native @ByVal NDArray diagonal(byte type );

        /**
        * fill target matrix with given value in one or two directions from main diagonal:
        *   - down from main diagonal starting at subdiagonal number "lower" if direction = 'l' (down) or 'b' (both)
        *   - up from main diagonal starting at superdiagonal number "upper"if direction = 'u' (up) or 'b' (both)
        * direction - in what direction to fill matrix. There are 3 possible directions:
        *   'u' - fill up, mathematically this corresponds to lower triangular matrix, subdiagonal "lower" unaffected
        *   'l' - fill down, mathematically this corresponds to upper triangular matrix, superdiagonal "upper" remains unaffected
        *   'b' - fill in both directions, both "lower" and "upper" are taken into account
        * rest of target elements are equal to this array elements
        * target and this array should have same shapes, except when this_rank = 1 (in that case should be target_rank = 2)
        */

		/**
        *  change an array by repeating it the number of times in order to acquire new shape equal to the input shape
        *
        *  shape  - contains new shape to broadcast array to
        *  target - optional argument, if target != nullptr the resulting array will be placed in target, in opposite case tile operation is done in place
        */
        public native @ByVal NDArray tileToShape(@Cast("const Nd4jLong*") LongPointer shapeInfo);
        public native @ByVal NDArray tileToShape(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
        public native @ByVal NDArray tileToShape(@Cast("const Nd4jLong*") long[] shapeInfo);
        public native void tileToShape(@Cast("Nd4jLong*") @StdVector LongPointer shape, @ByRef NDArray target);
        public native void tileToShape(@Cast("Nd4jLong*") @StdVector LongBuffer shape, @ByRef NDArray target);
        public native void tileToShape(@Cast("Nd4jLong*") @StdVector long[] shape, @ByRef NDArray target);
// #ifndef __JAVACPP_HACK__
// #endif

        public native @ByVal NDArray asT(@Cast("sd::DataType") int dtype);


        public native void linspace(double start);

        public native void linspace(double start, double step);

        /**
        *  calculates the trace of an array, that is sum of elements on main diagonal = sum array[i, i, i, ...]
        */
        public native double getTrace();

        public native @ByVal ResultSet multipleTensorsAlongDimension(@StdVector IntPointer indices, @StdVector IntPointer dimensions);
        public native @ByVal ResultSet multipleTensorsAlongDimension(@StdVector IntBuffer indices, @StdVector IntBuffer dimensions);
        public native @ByVal ResultSet multipleTensorsAlongDimension(@StdVector int[] indices, @StdVector int[] dimensions);

        public native @ByVal ResultSet allTensorsAlongDimension(@StdVector IntPointer dimensions);
        public native @ByVal ResultSet allTensorsAlongDimension(@StdVector IntBuffer dimensions);
        public native @ByVal ResultSet allTensorsAlongDimension(@StdVector int[] dimensions);

        public native @ByVal ResultSet allExamples();

        /**
        *  set _shapeInfo
        */
        public native void setShapeInfo(@Cast("const Nd4jLong*") LongPointer shapeInfo);
        public native void setShapeInfo(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
        public native void setShapeInfo(@Cast("const Nd4jLong*") long[] shapeInfo);
        public native void setShapeInfo(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const sd::DataType") int dtype);
        public native void setShapeInfo(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const sd::DataType") int dtype);
        public native void setShapeInfo(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const sd::DataType") int dtype);
        public native void setShapeInfo(@Const @ByRef ShapeDescriptor descriptor);
        public native void setShapeInfo(@Const @ByRef ConstantShapeBuffer shapeBuffer);

        /**
        *  returns absolute offset which corresponds to given sequential index
        */
        public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong") long i);

        /**
        *  returns reference on array element with given index
        */


        /**
        *  returns array element with given index
        *  i - element index in array
        */


        /**
        *  default destructor
        */

        /**
        *  set _shapeInfo
        */

        /**
        *  returns the value of "dim" dimension
        */
        public native @Cast("Nd4jLong") long sizeAt(int dim);

        /**
        *  returns stride of "dim" dimension
        */
        public native @Cast("Nd4jLong") long strideAt(int dim);

        /**
        *  returns order of array
        */
        public native char ordering();

        /**
        *  return _isView
        */
        public native @Cast("bool") boolean isView();

        /**
        *  returns shape portion of shapeInfo
        */
        public native @Cast("Nd4jLong*") LongPointer shapeOf();

        /**
        *  returns strides portion of shapeInfo
        */
        public native @Cast("Nd4jLong*") LongPointer stridesOf();

        /**
        *  returns rank of array
        */
        public native int rankOf();

        /**
        *  returns length of array
        */
        public native @Cast("Nd4jLong") long lengthOf();

        /**
        *  returns number of rows in array
        */
        public native @Cast("Nd4jLong") long rows();

        /**
        *  returns number of columns in array
        */
        public native @Cast("Nd4jLong") long columns();

        /**
        *  returns size of array elements type
        */
        public native @Cast("size_t") long sizeOfT();

        /**
        *  returns element-wise-stride
        */
        public native @Cast("Nd4jLong") long ews();

        // returns true if arrays have same shape
        public native @Cast("bool") boolean isSameShape(@Const NDArray other);
        public native @Cast("bool") boolean isSameShape(@Cast("Nd4jLong*") @StdVector LongPointer shape);
        public native @Cast("bool") boolean isSameShape(@Cast("Nd4jLong*") @StdVector LongBuffer shape);
        public native @Cast("bool") boolean isSameShape(@Cast("Nd4jLong*") @StdVector long[] shape);
        public native @Cast("bool") boolean areSameShapeAndType(@Const @ByRef NDArray other);

        /**
        *  returns true if these two NDArrays have same rank, dimensions, strides, ews and order
        */
        public native @Cast("bool") boolean isSameShapeStrict(@Const @ByRef NDArray other);

        /**
        *  returns true if buffer && shapeInfo were defined (non nullptr)
        */
        public native @Cast("bool") boolean nonNull();

        /**
        *  returns array element with given index from linear buffer
        *  i - element index in array
        */

        /**
        *  returns element with given indexes from 2D array
        *  i - number of row
        *  j - number of column
        */

        /**
        *  returns element with given indexes from 3D array
        *  i - height
        *  j - width
        *  k - depth
        */

        /**
        *  returns element with given indexes from DD array
        */

        /**
        *  returns array-scalar containing element of this array with given index
        *  i - element index in array
        */
        public native @ByVal NDArray e(@Cast("const Nd4jLong") long i);

        /**
        *  assigns given scalar to array element by given index, regards array buffer as linear
        *  i - element index in array
        *  value - scalar value to assign
        */

        public native void p(@Cast("const Nd4jLong") long i, @Const @ByRef NDArray value);

        /**
        *  assigns given scalar to 2D array element by given indexes
        *  i - number of row
        *  j - number of row
        *  value - scalar value to assign
        */

        /**
        *  assigns given scalar to 3D array element by given indexes
        *  i - height
        *  j - width
        *  k - depth
        *  value - scalar value to assign
        */
        public native void p(@Cast("const Nd4jLong") long i, @Cast("const Nd4jLong") long j, @Cast("const Nd4jLong") long k, @Cast("const Nd4jLong") long l, @Const @ByRef NDArray value);

        /**
        *  returns true if array is 2D
        */
        public native @Cast("bool") boolean isMatrix();

        /**
        *  returns true if array is vector
        */
        public native @Cast("bool") boolean isVector();

        /**
        *  returns true if array is column vector
        */
        public native @Cast("bool") boolean isColumnVector();

        /**
        *  returns true if array is row vector
        */
        public native @Cast("bool") boolean isRowVector();

        /**
        *  returns true if all dimensions of array except one are unities, for example: [1,1,n,1], [n,1,1], [n], ...
        *  posOfNonUnityDim - one dimension with value > 1
        */
        public native @Cast("bool") boolean isCommonVector(@ByRef IntPointer posOfNonUnityDim);
        public native @Cast("bool") boolean isCommonVector(@ByRef IntBuffer posOfNonUnityDim);
        public native @Cast("bool") boolean isCommonVector(@ByRef int[] posOfNonUnityDim);


        /**
        *  returns true if array is scalar
        */
        public native @Cast("bool") boolean isScalar();

        /**
        * Returns data type of this array
        * @return
        */
        public native @Cast("sd::DataType") int dataType();

        /**
         * This method returns true if value is from Integer space
         * @return
         */
        public native @Cast("bool") boolean isZ();

        /**
         * This method returns true if array is from Real space
         * @return
         */
        public native @Cast("bool") boolean isR();

        /**
         * This method returns true if array is from Boolean space
         * @return
         */
        public native @Cast("bool") boolean isB();

        /**
         * This method returns true if array contains Complex numbers
         * @return
         */
        public native @Cast("bool") boolean isC();

        /**
         * This method returns true if array contains String
         * @return
         */
        public native @Cast("bool") boolean isS();

        public native @Cast("bool") boolean isAttached();

        public native NDArray detach();

        public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef NDArray other);

        public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef NDArray other);
    }




//////////////////////////////////////////////////////////////////////////
///// IMLEMENTATION OF INLINE METHODS /////
//////////////////////////////////////////////////////////////////////////




//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////



//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////



//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////
// still the definition of inline function must be in header file


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////
// returns true if these two NDArrays have same _shapeInfo
// still the definition of inline function must be in header file



//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////






////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////


// #ifndef __JAVACPP_HACK__
// #endif

////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////



// #if defined(__CUDACC__) //&& defined(BUILD_TESTS)
// for CUDA we need stil stuff inline
// #include 
// #endif



// #endif


// Parsed from array/NDArrayList.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// This class describes collection of NDArrays
//
// @author raver119!gmail.com
//

// #ifndef NDARRAY_LIST_H
// #define NDARRAY_LIST_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
    @Namespace("sd") @NoOffset public static class NDArrayList extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public NDArrayList(Pointer p) { super(p); }
    
        public NDArrayList(int height, @Cast("bool") boolean expandable/*=false*/) { super((Pointer)null); allocate(height, expandable); }
        private native void allocate(int height, @Cast("bool") boolean expandable/*=false*/);
        public NDArrayList(int height) { super((Pointer)null); allocate(height); }
        private native void allocate(int height);

        public native @Cast("sd::DataType") int dataType();

        public native NDArray read(int idx);
        public native NDArray readRaw(int idx);
        public native @Cast("Nd4jStatus") int write(int idx, NDArray array);
        public native NDArray pick(@StdVector IntPointer indices);
        public native NDArray pick(@StdVector IntBuffer indices);
        public native NDArray pick(@StdVector int[] indices);
        public native @Cast("bool") boolean isWritten(int index);

        public native @Cast("Nd4jLong*") @StdVector LongPointer shape();

        public native NDArray stack();
        public native void unstack(NDArray array, int axis);

        public native @ByRef IntIntPair id();
        public native @StdString @ByRef @Cast({"char*", "std::string*"}) BytePointer name();
        //sd::memory::Workspace* workspace();
        public native LaunchContext context();
        public native NDArrayList clone();

        public native @Cast("bool") boolean equals(@ByRef NDArrayList other);

        public native int elements();
        public native int height();

        public native int counter();
    }


// #endif

// Parsed from array/ResultSet.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// This class is suited for execution results representation.
//
// PLESE NOTE: It will delete all stored NDArrays upon destructor call
//
// @author [email protected]
//

// #ifndef LIBND4J_RESULTSET_H
// #define LIBND4J_RESULTSET_H

// #include 
// #include 
// #include 
// #include  // forward declaration of template class NDArray

    @Namespace("sd") @NoOffset public static class ResultSet extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public ResultSet(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public ResultSet(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public ResultSet position(long position) {
            return (ResultSet)super.position(position);
        }
        @Override public ResultSet getPointer(long i) {
            return new ResultSet((Pointer)this).position(position + i);
        }
    
        public ResultSet() { super((Pointer)null); allocate(); }
        private native void allocate();

// #ifndef __JAVACPP_HACK__
// #endif

        public ResultSet(@Const @ByRef ResultSet other) { super((Pointer)null); allocate(other); }
        @NoException private native void allocate(@Const @ByRef ResultSet other);

        public native @ByRef @Name("operator =") @NoException ResultSet put(@Const @ByRef ResultSet other);

        // move constructor

        // move assignment operator

        public native int size();
        public native NDArray at(@Cast("const unsigned long") long idx);
        public native @Name("operator []") NDArray get(@Cast("const unsigned long") long idx);
        public native void push_back(NDArray array);

        public native @Cast("Nd4jStatus") int status();
        public native void setStatus(@Cast("Nd4jStatus") int status);
        public native void purge();
        public native void setNonRemovable();
    }


// #endif //LIBND4J_RESULTSET_H


// Parsed from graph/RandomGenerator.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_GRAPH_RNG_H
// #define LIBND4J_GRAPH_RNG_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

// #ifdef __CUDACC__
// #endif
// #ifdef __CUDACC__
// #else
        @Namespace("sd::graph") @NoOffset public static class RandomGenerator extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public RandomGenerator(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public RandomGenerator(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public RandomGenerator position(long position) {
                return (RandomGenerator)super.position(position);
            }
            @Override public RandomGenerator getPointer(long i) {
                return new RandomGenerator((Pointer)this).position(position + i);
            }
        
            public native @Cast("uint32_t") int xoroshiro32(@Cast("uint64_t") long index);
            public native @Cast("uint64_t") long xoroshiro64(@Cast("uint64_t") long index);
            public RandomGenerator(@Cast("Nd4jLong") long rootSeed/*=0*/, @Cast("Nd4jLong") long nodeSeed/*=0*/) { super((Pointer)null); allocate(rootSeed, nodeSeed); }
            private native void allocate(@Cast("Nd4jLong") long rootSeed/*=0*/, @Cast("Nd4jLong") long nodeSeed/*=0*/);
            public RandomGenerator() { super((Pointer)null); allocate(); }
            private native void allocate();

            /**
             * This method allows to change graph-level state in runtime.
             * PLEASE NOTE: this method will change state of node as well.
             */
            public native void setStates(@Cast("Nd4jLong") long rootSeed, @Cast("Nd4jLong") long nodeState/*=0*/);
            public native void setStates(@Cast("Nd4jLong") long rootSeed);

            

            /**
             * This method returns T value between from and to
             */

            /**
             * This method returns T value between 0 and MAX_T
             */

            /**
             * These two methods are made for JVM
             * @param index
             * @return
             */
            public native int relativeInt(@Cast("Nd4jLong") long index);
            public native @Cast("Nd4jLong") long relativeLong(@Cast("Nd4jLong") long index);

            public native void rewindH(@Cast("uint64_t") long steps);

            /**
             * These methods set up only node states, with non-changed root ones
             */
            public native void setSeed(int seed);

            public native void setSeed(@Cast("uint64_t") long seed);

            public native @Cast("Nd4jLong") long rootState();

            public native @Cast("Nd4jLong") long nodeState();
        }


        

        

        

        

        

        

        

        

        

        

        

        

        


        

        

        //////
        @Namespace("sd::graph") public native @Cast("uint32_t") int rotl(@Cast("const uint32_t") int x, int k);

        @Namespace("sd::graph") public native @Cast("uint64_t") long rotl(@Cast("const uint64_t") long x, int k);

        @Namespace("sd::graph") public native @Cast("uint32_t") int next(@Cast("uint32_t") int s0, @Cast("uint32_t") int s1, @Cast("uint32_t") int s2, @Cast("uint32_t") int s3);

        

        

        
    


// #endif


// Parsed from graph/Variable.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_VARIABLE_H
// #define LIBND4J_VARIABLE_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

// #ifndef __JAVACPP_HACK__

// #endif
        @Namespace("sd::graph") @NoOffset public static class Variable extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Variable(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Variable(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Variable position(long position) {
                return (Variable)super.position(position);
            }
            @Override public Variable getPointer(long i) {
                return new Variable((Pointer)this).position(position + i);
            }
        
            public Variable(@Cast("bool") boolean placeHolder) { super((Pointer)null); allocate(placeHolder); }
            private native void allocate(@Cast("bool") boolean placeHolder);
            public Variable(NDArray arrayw, @Cast("char*") String name, int id, int idx/*=0*/) { super((Pointer)null); allocate(arrayw, name, id, idx); }
            private native void allocate(NDArray arrayw, @Cast("char*") String name, int id, int idx/*=0*/);
            public Variable(NDArray arrayw, @Cast("char*") String name, int id) { super((Pointer)null); allocate(arrayw, name, id); }
            private native void allocate(NDArray arrayw, @Cast("char*") String name, int id);
            public Variable(NDArray arrayw, @Cast("char*") BytePointer name, int id, int idx/*=0*/) { super((Pointer)null); allocate(arrayw, name, id, idx); }
            private native void allocate(NDArray arrayw, @Cast("char*") BytePointer name, int id, int idx/*=0*/);
            public Variable(NDArray arrayw, @Cast("char*") BytePointer name, int id) { super((Pointer)null); allocate(arrayw, name, id); }
            private native void allocate(NDArray arrayw, @Cast("char*") BytePointer name, int id);
            public Variable(NDArray array/*=nullptr*/, @Cast("char*") String name/*=nullptr*/) { super((Pointer)null); allocate(array, name); }
            private native void allocate(NDArray array/*=nullptr*/, @Cast("char*") String name/*=nullptr*/);
            public Variable() { super((Pointer)null); allocate(); }
            private native void allocate();
            public Variable(NDArray array/*=nullptr*/, @Cast("char*") BytePointer name/*=nullptr*/) { super((Pointer)null); allocate(array, name); }
            private native void allocate(NDArray array/*=nullptr*/, @Cast("char*") BytePointer name/*=nullptr*/);

// #ifndef __JAVACPP_HACK__
// #endif

            public native Variable clone();

            public native @Cast("bool") boolean hasNDArray();
            public native NDArray getNDArray();
            public native void setNDArray(NDArray array);

            public native @Cast("bool") boolean hasNDArrayList();
            public native NDArrayList getNDArrayList();
            public native void setNDArrayList(NDArrayList list);

            public native @Cast("bool") boolean isExternal();
            public native @Cast("bool") boolean isReadOnly();
            public native @Cast("bool") boolean isEmpty();
            public native @Cast("bool") boolean isRemovable();

            public native @Cast("bool") boolean isPlaceholder();

            public native @Cast("sd::graph::VariableType") int variableType();
            public native void setVariableType(@Cast("sd::graph::VariableType") int variableType);

            /**
             * This method returns InputType of this variable  
             */
            //InputType variableType() {
            //    return _variableType;
            //}

            public native void markExternal(@Cast("bool") boolean reallyExternal);
            public native void markReadOnly(@Cast("bool") boolean reallyReadOnly);
            public native void markRemovable(@Cast("bool") boolean reallyRemovable);

            public native int id();
            public native int index();
            public native void setIndex(int index);
            public native void setId(int id);
            public native void setId(int id, int idx);

            public native @StdString @Cast({"char*", "std::string*"}) BytePointer getName();
            public native void setName(@StdString @Cast({"char*", "std::string*"}) BytePointer name);

            public native @Cast("Nd4jLong*") @StdVector LongPointer shape();

// #ifndef __JAVACPP_HACK__
// #endif
        }
    



// #endif //LIBND4J_VARIABLE_H


// Parsed from graph/VariablesSet.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 15/11/17.
//

// #ifndef LIBND4J_VARIABLESSET_H
// #define LIBND4J_VARIABLESSET_H

// #include 
// #include 
// #include 
// #include 
// #include 
        @Namespace("sd::graph") @NoOffset public static class VariablesSet extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public VariablesSet(Pointer p) { super(p); }
        
            public VariablesSet(@Cast("Nd4jStatus") int status/*=ND4J_STATUS_OK*/) { super((Pointer)null); allocate(status); }
            private native void allocate(@Cast("Nd4jStatus") int status/*=ND4J_STATUS_OK*/);
            public VariablesSet() { super((Pointer)null); allocate(); }
            private native void allocate();

            public native @Cast("Nd4jStatus") int status();

            public native int size();

            public native void push_back(Variable variable);

            public native Variable at(int index);

        }
    




// #endif //LIBND4J_VARIABLESSET_H


// Parsed from graph/FlowPath.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 16/11/17.
//

// #ifndef LIBND4J_FLOWPATH_H
// #define LIBND4J_FLOWPATH_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
        @Namespace("sd::graph") @NoOffset public static class FlowPath extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public FlowPath(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public FlowPath(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public FlowPath position(long position) {
                return (FlowPath)super.position(position);
            }
            @Override public FlowPath getPointer(long i) {
                return new FlowPath((Pointer)this).position(position + i);
            }
        
            public FlowPath() { super((Pointer)null); allocate(); }
            private native void allocate();

            public native void setInnerTime(int nodeId, @Cast("Nd4jLong") long time);
            public native void setOuterTime(int nodeId, @Cast("Nd4jLong") long time);

            public native @Cast("Nd4jLong") long innerTime(int nodeId);
            public native @Cast("Nd4jLong") long outerTime(int nodeId);

            public native @Cast("bool") boolean isNodeActive(int nodeId);
            public native void markNodeActive(int nodeId, @Cast("bool") boolean isActive);

            public native @Cast("bool") boolean wasExecuted(int nodeId);
            public native void markExecuted(int nodeId, @Cast("bool") boolean wasExecuted);

            public native int branch(int nodeId);
            public native void markBranch(int nodeId, int index);

            // Frame-related methods

            public native void registerFrame(@Cast("Nd4jLong") long frameId);
            public native void forgetFrame(@Cast("Nd4jLong") long frameId);

            public native @Cast("bool") boolean isFrameActive(@Cast("Nd4jLong") long frameId);
            public native void markFrameActive(@Cast("Nd4jLong") long frameId, @Cast("bool") boolean isActive);

            public native @Cast("bool") boolean isRewindPlanned(@Cast("Nd4jLong") long frameId);
            public native void planRewind(@Cast("Nd4jLong") long frameId, @Cast("bool") boolean reallyRewind);

            public native int getRewindPosition(@Cast("Nd4jLong") long frameId);
            public native void setRewindPosition(@Cast("Nd4jLong") long frameId, int _position);
            public native void setRewindPositionOnce(@Cast("Nd4jLong") long frameId, int _position);

            public native void incrementNumberOfCycles(@Cast("Nd4jLong") long frameId);
            public native @Cast("Nd4jLong") long getNumberOfCycles(@Cast("Nd4jLong") long frameId);

            public native GraphProfile profile();
        }
    



// #endif //LIBND4J_FLOWPATH_H


// Parsed from graph/Intervals.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by [email protected] on 24.10.2017.
//

// #ifndef LIBND4J_INTERVALS_H
// #define LIBND4J_INTERVALS_H

// #include 
// #include 
// #include 
// #include 

    @Namespace("sd") @NoOffset public static class Intervals extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public Intervals(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public Intervals(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public Intervals position(long position) {
            return (Intervals)super.position(position);
        }
        @Override public Intervals getPointer(long i) {
            return new Intervals((Pointer)this).position(position + i);
        }
    

        // default constructor
        public Intervals() { super((Pointer)null); allocate(); }
        private native void allocate();
        
        // constructor
        public Intervals(@Const @ByRef LongVectorVector content ) { super((Pointer)null); allocate(content); }
        private native void allocate(@Const @ByRef LongVectorVector content );
        
        // accessing operator
        public native @Cast("Nd4jLong*") @StdVector @Name("operator []") LongPointer get(@Cast("const Nd4jLong") long i);

        // returns size of _content
        public native int size();

    }




// #endif //LIBND4J_INTERVALS_H


// Parsed from graph/Stash.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_STASH_H
// #define LIBND4J_STASH_H

//#include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
        @Namespace("sd::graph") @NoOffset public static class KeyPair extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public KeyPair(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public KeyPair(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public KeyPair position(long position) {
                return (KeyPair)super.position(position);
            }
            @Override public KeyPair getPointer(long i) {
                return new KeyPair((Pointer)this).position(position + i);
            }
        
            public KeyPair(int node/*=0*/, @Cast("char*") String name/*=nullptr*/) { super((Pointer)null); allocate(node, name); }
            private native void allocate(int node/*=0*/, @Cast("char*") String name/*=nullptr*/);
            public KeyPair() { super((Pointer)null); allocate(); }
            private native void allocate();
            public KeyPair(int node/*=0*/, @Cast("char*") BytePointer name/*=nullptr*/) { super((Pointer)null); allocate(node, name); }
            private native void allocate(int node/*=0*/, @Cast("char*") BytePointer name/*=nullptr*/);

            public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef KeyPair other);

            public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef KeyPair other);

            public native int key();
            public native @StdString BytePointer name();
        }
    


// #ifndef __JAVACPP_HACK__

// #endif
        @Namespace("sd::graph") @NoOffset public static class Stash extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Stash(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Stash(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Stash position(long position) {
                return (Stash)super.position(position);
            }
            @Override public Stash getPointer(long i) {
                return new Stash((Pointer)this).position(position + i);
            }
        
            public Stash() { super((Pointer)null); allocate(); }
            private native void allocate();

            //void storeArray(sd::graph::Block& block, const char *name, sd::NDArray *array);
            public native void storeArray(int nodeId, @Cast("char*") String name, NDArray array);
            public native void storeArray(int nodeId, @Cast("char*") BytePointer name, NDArray array);

            //bool checkStash(sd::graph::Block& block, const char *name);
            public native @Cast("bool") boolean checkStash(int nodeId, @Cast("char*") String name);
            public native @Cast("bool") boolean checkStash(int nodeId, @Cast("char*") BytePointer name);

            //sd::NDArray* extractArray(sd::graph::Block& block, const char *name);
            public native NDArray extractArray(int nodeId, @Cast("char*") String name);
            public native NDArray extractArray(int nodeId, @Cast("char*") BytePointer name);

            public native void clear();
        }
    






// #endif //LIBND4J_STASH_H


// Parsed from graph/GraphState.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 23.01.18.
//

// #ifndef LIBND4J_GRAPHSTATE_H
// #define LIBND4J_GRAPHSTATE_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

    @Namespace("sd::graph") @NoOffset public static class GraphState extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public GraphState(Pointer p) { super(p); }
    
        public GraphState(@Cast("Nd4jLong") long id) { super((Pointer)null); allocate(id); }
        private native void allocate(@Cast("Nd4jLong") long id);

        /**
         *
         * @return
         */
        public native @Cast("Nd4jLong") long id();

        /**
         * This method adds scope to this state tracker
         *
         * @param scopeId
         * @return
         */
        public native @Cast("Nd4jStatus") int registerScope(int scopeId);

        /**
         * This method cheks if scope with given ID exists
         * 
         * @param scopeId - ID of the scope
         * @return - TRUE if scope exists, FALSE otherwise
         */
        public native @Cast("bool") boolean hasScope(int scopeId);

        /**
         * This method removes specified scope from this state tracker
         *
         * @param scopeId
         * @return
         */
        public native @Cast("Nd4jStatus") int forgetScope(int scopeId);

// #ifndef __JAVACPP_HACK__
// #endif
        /**
         * This method adds given op to the end of specified scope
         *
         * @param scopeId
         * @param opNum
         * @param type
         * @return
         */
        public native @Cast("Nd4jStatus") int attachOpToScope(int scopeId, @Cast("Nd4jLong") long opNum, int type, @ByVal ArgumentsList inputs);

        /**
         * This method adds return statement to specified scope
         *
         * PLEASE NOTE: should be used only in body scopes
         *
         * @param scopeId
         * @param nodeId
         * @param args
         * @return
         */
        public native @Cast("Nd4jStatus") int defineReturn(int scopeId, int nodeId, @ByVal ArgumentsList args);

        /**
         * This method returns current variable space of this state holder
         *
         * @return
         */
        public native VariableSpace variableSpace();
    }





// #endif //LIBND4J_GRAPHSTATE_H


// Parsed from graph/VariableSpace.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_VARIABLESPACE_H
// #define LIBND4J_VARIABLESPACE_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
        @Namespace("sd::graph") @NoOffset public static class VariableSpace extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public VariableSpace(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public VariableSpace(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public VariableSpace position(long position) {
                return (VariableSpace)super.position(position);
            }
            @Override public VariableSpace getPointer(long i) {
                return new VariableSpace((Pointer)this).position(position + i);
            }
        
            public VariableSpace() { super((Pointer)null); allocate(); }
            private native void allocate();

            public native @ByRef @Name("operator =") VariableSpace put(@Const @ByRef VariableSpace other);

            public native int numberOfPlaceholders();
            public native @Cast("sd::graph::Variable**") @StdVector PointerPointer getPlaceholders();
            public native void setWorkspace(Workspace workspace);

            public native LaunchContext launchContext();

            public native @Cast("bool") boolean hasExternalVariable(int it);
            public native @Cast("bool") boolean hasExternalVariable(@ByRef IntIntPair pair);
            public native @Cast("bool") boolean hasExternalVariable(@StdString @Cast({"char*", "std::string*"}) BytePointer symbol);

            public native @Cast("bool") boolean hasVariable(int id);
            public native @Cast("bool") boolean hasVariable(int id, int idx);
            public native @Cast("bool") boolean hasVariable(@ByRef IntIntPair pair);
            public native @Cast("bool") boolean hasVariable(@StdString @Cast({"char*", "std::string*"}) BytePointer symbol);

            public native Variable getVariable(int id);
            public native Variable getVariable(int id, int idx);
            public native Variable getVariable(@ByRef IntIntPair pair);
            public native Variable getVariable(@StdString @Cast({"char*", "std::string*"}) BytePointer symbol);

            public native @Cast("sd::graph::Variable**") @StdVector PointerPointer getVariables();

            public native Variable putVariable(@ByRef IntIntPair pair, NDArray array);
            public native void putVariable(@ByRef IntIntPair pair, Variable variable);
            public native void putVariable(int id, Variable variable);
            public native void putVariable(int id, NDArray array);
            public native Variable putVariable(int id, int idx, NDArray array);
            public native void putVariable(int id, int idx, Variable array);

            public native void dropVariable(@ByRef IntIntPair pair);
            public native void dropVariable(int id, int idx);

            public native void trackList(NDArrayList list);

            public native void putOutputVariable(Variable variable);

            public native void replaceVariable(Variable variable);

            // memory-related statistics
            public native @Cast("Nd4jLong") long externalMemory();
            public native @Cast("Nd4jLong") long internalMemory();
            public native @Cast("Nd4jLong") long totalMemory();

            public native int externalEntries();
            public native int internalEntries();
            public native int totalEntries();

            public native VariableSpace clone();

            public native @Cast("sd::graph::Variable**") @StdVector PointerPointer handles();


            public native VariableSpace asT();
            public native void injectVariable(@ByRef IntIntPair pair, Variable variable);

            public native Stash getStash();

            public native @Cast("sd::graph::Variable**") @StdVector PointerPointer getExternalVariables();

            public native void setFlowPath(FlowPath timers);
            public native FlowPath flowPath();
        }
    



// #endif //LIBND4J_VARIABLESPACE_H


// Parsed from helpers/helper_generator.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_HELPER_GENERATOR_H
// #define LIBND4J_HELPER_GENERATOR_H

// #include 
// #include 
// #include 
// #include 

// #ifdef _MSC_VER
// include for uint64_t on MSVC
// #include 
// #elif ANDROID
// #include 

// #ifndef UINT64_C
// #if defined(__LP64__)
// #define UINT64_C(c)     c ## UL
// #else
// #define UINT64_C(c)     c ## ULL
// #endif //LP64
// #endif // UINT64

// #endif // MSVC/ANDROID


// #ifdef __GNUC__
// #include 
// #endif

// #ifdef __CUDACC__
// #else
        @Namespace("sd::random") @NoOffset public static class RandomBuffer extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public RandomBuffer(Pointer p) { super(p); }
        
            /**
             * This method allocates buffer of size * sizeof(Nd4jLong)
             *
             * @param size
             * @return
             */
// #ifdef __CUDACC__
// #endif
            public RandomBuffer(@Cast("Nd4jLong") long seed, @Cast("Nd4jLong") long size, @Cast("uint64_t*") LongPointer buffer) { super((Pointer)null); allocate(seed, size, buffer); }
            private native void allocate(@Cast("Nd4jLong") long seed, @Cast("Nd4jLong") long size, @Cast("uint64_t*") LongPointer buffer);
            public RandomBuffer(@Cast("Nd4jLong") long seed, @Cast("Nd4jLong") long size, @Cast("uint64_t*") LongBuffer buffer) { super((Pointer)null); allocate(seed, size, buffer); }
            private native void allocate(@Cast("Nd4jLong") long seed, @Cast("Nd4jLong") long size, @Cast("uint64_t*") LongBuffer buffer);
            public RandomBuffer(@Cast("Nd4jLong") long seed, @Cast("Nd4jLong") long size, @Cast("uint64_t*") long[] buffer) { super((Pointer)null); allocate(seed, size, buffer); }
            private native void allocate(@Cast("Nd4jLong") long seed, @Cast("Nd4jLong") long size, @Cast("uint64_t*") long[] buffer);

            public native @Cast("uint64_t*") LongPointer getBuffer();

            public native @Cast("uint64_t*") LongPointer getDeviceBuffer();

// #ifdef __CUDACC__
// #endif

            public native @Cast("Nd4jLong") long getSize();

            public native @Cast("Nd4jLong") long getSeed();

            public native void setSeed(@Cast("Nd4jLong") long seed);

            public native @Cast("Nd4jLong") long getAllocatedSize();

            public native @Cast("Nd4jLong") long getOffset();

            public native void setOffset(@Cast("Nd4jLong") long offset);

            public native void reSeed(@Cast("Nd4jLong") long amplifier);

            public native @Cast("uint64_t") long getElement(@Cast("Nd4jLong") long _position);

            public native @Cast("uint64_t") long next64(@Cast("uint64_t") long shiftedSeed);

            public native @Cast("uint64_t") long rotl(@Cast("const uint64_t") long x, @Cast("uint64_t") long k);

            public native @Cast("uint64_t") long safeShift(@Cast("uint64_t") long x, @Cast("uint64_t") long y);

            public native @Cast("uint64_t") long seedConv(@Cast("Nd4jLong") long seed);

            public native void incrementGeneration();

            public native @Cast("Nd4jLong") long getNextIndex();

            public native @Cast("uint64_t") long getNextElement();


            /**
             * This method skips X elements from buffer
             *
             * @param numberOfElements number of elements to skip
             */
// #ifdef __CUDACC__
// #endif
            public native void rewindH(@Cast("Nd4jLong") long numberOfElements);

            /**
            * This method returns random int in range [0..MAX_INT]
            * @return
            */
            public native int nextInt();

            public native @Cast("uint64_t") long nextUInt64();

            /**
             * This method returns random int in range [0..to]
             * @param to
             * @return
             */
            public native int nextInt(int to);

            /**
             * This method returns random int in range [from..to]
             * @param from
             * @param to
             * @return
             */
            public native int nextInt(int from, int to);


            /**
             * This method returns random T in range of [0..1]
             * @return
             */

            /**
             * This method returns random T in range of [0..to]
             * @param to
             * @return
             */

            /**
             * This method returns random T in range [from..to]
             * @param from
             * @param to
             * @return
             */

            public native @Cast("uint64_t") long relativeUInt64(@Cast("Nd4jLong") long index);

            /**
             *  relative methods are made as workaround for lock-free concurrent execution
             */
            public native int relativeInt(@Cast("Nd4jLong") long index);

            /**
             * This method returns random int within [0..to]
             *
             * @param index
             * @param to
             * @return
             */
            public native int relativeInt(@Cast("Nd4jLong") long index, int to);

            /**
             * This method returns random int within [from..to]
             *
             * @param index
             * @param to
             * @param from
             * @return
             */
            public native int relativeInt(@Cast("Nd4jLong") long index, int from, int to);

            /**
             * This method returns random T within [0..1]
             *
             * @param index
             * @return
             */

/**
 * This method returns random T within [0..to]
 *
 * @param index
 * @param to
 * @return
 */

/**
 * This method returns random T within [from..to]
 *
 * @param index
 * @param from
 * @param to
 * @return
 */

        }

        @Namespace("sd::random") @NoOffset public static class IGenerator extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public IGenerator(Pointer p) { super(p); }
        


            public native RandomBuffer getBuffer();

            public native void setOffset(@Cast("Nd4jLong") long offset);

            public native @Cast("Nd4jLong") long getElementAbsolute(@Cast("Nd4jLong") long _position);

            public native @Cast("Nd4jLong") long getElementRelative(@Cast("Nd4jLong") long _position);

            public native void refreshBuffer();
        }



        @Namespace("sd::random") @NoOffset public static class Xoroshiro128 extends IGenerator {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Xoroshiro128(Pointer p) { super(p); }
        
            public Xoroshiro128(RandomBuffer buffer) { super((Pointer)null); allocate(buffer); }
            private native void allocate(RandomBuffer buffer);

            public native void refreshBuffer();
        }
    

// #endif //LIBND4J_HELPER_GENERATOR_H


// Parsed from graph/profiling/GraphProfile.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef ND4J_GRAPH_PROFILE_H
// #define ND4J_GRAPH_PROFILE_H

// #include "NodeProfile.h"
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
        @Namespace("sd::graph") @NoOffset public static class GraphProfile extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public GraphProfile(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public GraphProfile(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public GraphProfile position(long position) {
                return (GraphProfile)super.position(position);
            }
            @Override public GraphProfile getPointer(long i) {
                return new GraphProfile((Pointer)this).position(position + i);
            }
        
            public GraphProfile() { super((Pointer)null); allocate(); }
            private native void allocate();

            /**
             * These methods just adding amount of bytes to various counters
             */
            public native void addToTotal(@Cast("Nd4jLong") long bytes);
            public native void addToActivations(@Cast("Nd4jLong") long bytes);
            public native void addToTemporary(@Cast("Nd4jLong") long bytes);
            public native void addToObjects(@Cast("Nd4jLong") long bytes);

            /**
             * This method allows to set graph construction (i.e. deserialization) time in nanoseconds
             */
            public native void setBuildTime(@Cast("Nd4jLong") long nanos);

            /**
             * This method sets graph execution time in nanoseconds.
             */
            public native void setExecutionTime(@Cast("Nd4jLong") long nanos);

            public native void startEvent(@Cast("char*") String name);
            public native void startEvent(@Cast("char*") BytePointer name);
            public native void recordEvent(@Cast("char*") String name);
            public native void recordEvent(@Cast("char*") BytePointer name);
            public native void deleteEvent(@Cast("char*") String name);
            public native void deleteEvent(@Cast("char*") BytePointer name);

            /**
             * This method saves time as delta from last saved time
             */
            public native void spotEvent(@Cast("char*") String name);
            public native void spotEvent(@Cast("char*") BytePointer name);

            /**
             * This method returns pointer to NodeProfile by ID
             * PLEASE NOTE: this method will create new NodeProfile if there's none
             */
            public native NodeProfile nodeById(int id, @Cast("char*") String name/*=nullptr*/);
            public native NodeProfile nodeById(int id);
            public native NodeProfile nodeById(int id, @Cast("char*") BytePointer name/*=nullptr*/);
            public native @Cast("bool") boolean nodeExists(int id);

            /**
             * This method merges values from other profile report
             * @param other
             */
            public native void merge(GraphProfile other);
            public native void assign(GraphProfile other);

            /**
             * These methods are just utility methods for time
             */
            public native @Cast("Nd4jLong") long currentTime();
            public native @Cast("Nd4jLong") long relativeTime(@Cast("Nd4jLong") long time);

            public native void printOut();
        }
    


// #endif

// Parsed from graph/profiling/NodeProfile.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_NODE_PROFILE_H
// #define LIBND4J_NODE_PROFILE_H

// #include 
// #include 
// #include 
// #include 
        @Namespace("sd::graph") @NoOffset public static class NodeProfile extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public NodeProfile(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public NodeProfile(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public NodeProfile position(long position) {
                return (NodeProfile)super.position(position);
            }
            @Override public NodeProfile getPointer(long i) {
                return new NodeProfile((Pointer)this).position(position + i);
            }
        
            public NodeProfile() { super((Pointer)null); allocate(); }
            private native void allocate();

            public NodeProfile(int id, @Cast("char*") String name) { super((Pointer)null); allocate(id, name); }
            private native void allocate(int id, @Cast("char*") String name);
            public NodeProfile(int id, @Cast("char*") BytePointer name) { super((Pointer)null); allocate(id, name); }
            private native void allocate(int id, @Cast("char*") BytePointer name);

            public native void setBuildTime(@Cast("Nd4jLong") long time);
            public native void setPreparationTime(@Cast("Nd4jLong") long time);
            public native void setExecutionTime(@Cast("Nd4jLong") long time);
            public native void setTotalTime(@Cast("Nd4jLong") long time);
            public native void setShapeFunctionTime(@Cast("Nd4jLong") long time);
            public native void setArrayTime(@Cast("Nd4jLong") long time);
            public native void setInputTime(@Cast("Nd4jLong") long time);

            public native void setActivationsSize(@Cast("Nd4jLong") long bytes);
            public native void setTemporarySize(@Cast("Nd4jLong") long bytes);
            public native void setObjectsSize(@Cast("Nd4jLong") long bytes);
            public native void setTotalSize(@Cast("Nd4jLong") long bytes);

            public native void addInputShape(@Cast("const Nd4jLong*") LongPointer shapeInfo);
            public native void addInputShape(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
            public native void addInputShape(@Cast("const Nd4jLong*") long[] shapeInfo);
            public native void addOutputShape(@Cast("const Nd4jLong*") LongPointer shapeInfo);
            public native void addOutputShape(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
            public native void addOutputShape(@Cast("const Nd4jLong*") long[] shapeInfo);

            public native @Cast("Nd4jLong") long getActivationsSize();
            public native @Cast("Nd4jLong") long getTemporarySize();
            public native @Cast("Nd4jLong") long getObjectsSize();
            public native @Cast("Nd4jLong") long getTotalSize();

            public native @Cast("Nd4jLong") long getExecutionTime();

            public native @StdString @ByRef @Cast({"char*", "std::string*"}) BytePointer name();

            public native void merge(NodeProfile other);
            public native void assign(NodeProfile other);

            public native void printOut();
        }
    


// #endif

// Parsed from graph/Context.h

/*
 *  ******************************************************************************
 *  *
 *  *
 *  * This program and the accompanying materials are made available under the
 *  * terms of the Apache License, Version 2.0 which is available at
 *  * https://www.apache.org/licenses/LICENSE-2.0.
 *  *
 *  *  See the NOTICE file distributed with this work for additional
 *  *  information regarding copyright ownership.
 *  * Unless required by applicable law or agreed to in writing, software
 *  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 *  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 *  * License for the specific language governing permissions and limitations
 *  * under the License.
 *  *
 *  * SPDX-License-Identifier: Apache-2.0
 *  *****************************************************************************
 */

//
// @author [email protected]
//

// #ifndef LIBND4J_CONTEXT_H
// #define LIBND4J_CONTEXT_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

// CUDA-specific includes
// #ifdef __CUDACC__
// #endif
        /**
         * This class defines input desired for any given node/operation within graph
         */
        @Namespace("sd::graph") @NoOffset public static class Context extends ContextPrototype {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Context(Pointer p) { super(p); }
        
            public Context(ContextPrototype prototype, VariableSpace variableSpace) { super((Pointer)null); allocate(prototype, variableSpace); }
            private native void allocate(ContextPrototype prototype, VariableSpace variableSpace);

            public Context(int nodeId, VariableSpace variableSpace/*=nullptr*/) { super((Pointer)null); allocate(nodeId, variableSpace); }
            private native void allocate(int nodeId, VariableSpace variableSpace/*=nullptr*/);
            public Context(int nodeId) { super((Pointer)null); allocate(nodeId); }
            private native void allocate(int nodeId);
            public Context(int nodeId, VariableSpace variableSpace, @Cast("bool") boolean isInplace) { super((Pointer)null); allocate(nodeId, variableSpace, isInplace); }
            private native void allocate(int nodeId, VariableSpace variableSpace, @Cast("bool") boolean isInplace);

            // default destructor

            // these methods are for execution timing
            public native void setOuterTime(@Cast("Nd4jLong") long time);
            public native void setInnerTime(@Cast("Nd4jLong") long time);
            public native @Cast("Nd4jLong") long getOuterTime();
            public native @Cast("Nd4jLong") long getInnerTime();

            public native @Cast("sd::DataType") int dataType();

            public native @Cast("sd::DataType") int dataType(int index);
            public native void setDataType(int index, @Cast("sd::DataType") int type);
            // these methods are related to Workspace abstraction
            public native @Cast("bool") boolean hasWorkspaceProvided();
            public native void attachWorkspace(Workspace workspace);
            public native void forgetWorkspace();

            // these methods return full-time workspace
            public native Workspace getWorkspace();
            public native Workspace workspace();
            public native Workspace fWorkspace();

            // this method returns workspace for temporary allocations
            public native Workspace tWorkspace();

            // this method returns workspace for object allocations
            public native Workspace oWorkspace();

            public native void setVariableSpace(VariableSpace variableSpace);

            public native RandomBuffer getRNG();
            public native void setRNG(RandomBuffer rng);

            public native void setTargetEngine(@Cast("samediff::Engine") int engine);

            public native VariableSpace getVariableSpace();

            public native LaunchContext launchContext();

            // these fields define, if we can execute specific node in-place, without generating new array


            // these variables are only for Divergent Nodes
            public native int getBranch();
            public native void setBranch(int branch);

            /**
             *
             * @return
             */
            public native Stash getStash();

            /**
             *
             */
            public native void trackList(NDArrayList list);


            /**
             * This method returns variable for a given input index for this block
             * @param idx
             * @return
             */
            public native Variable getVariable(int idx);
            public native Variable variable(int idx);

            /**
             * This method is shortcut to getVariable(int idx);
             *
             * + it check fastpath for array availability (preferred)
             * @return
             */
            public native NDArray getNDArray(int idx);
            public native NDArray array(int idx);


            /**
             * This method fetches variable from VariableSpace DIRECTLY
             * @param p
             * @return
             */
            public native Variable variable(int node, int index);
            public native Variable variable(@ByRef IntIntPair p);


            public native void pushNDArrayToVariableSpace(int nodeId, int index, NDArray array, @Cast("bool") boolean removable/*=true*/);
            public native void pushNDArrayToVariableSpace(int nodeId, int index, NDArray array);
            public native void pushNDArrayToVariableSpace(@ByRef IntIntPair pair, NDArray array, @Cast("bool") boolean removable/*=true*/);
            public native void pushNDArrayToVariableSpace(@ByRef IntIntPair pair, NDArray array);

            public native void pushNDArrayListToVariableSpace(int nodeId, int index, NDArrayList list, @Cast("bool") boolean track/*=true*/);
            public native void pushNDArrayListToVariableSpace(int nodeId, int index, NDArrayList list);
            public native void pushNDArrayListToVariableSpace(@ByRef IntIntPair pair, NDArrayList list, @Cast("bool") boolean track/*=true*/);
            public native void pushNDArrayListToVariableSpace(@ByRef IntIntPair pair, NDArrayList list);

            public native @Cast("bool") boolean isValueAvailable(int idx/*=0*/);
            public native @Cast("bool") boolean isValueAvailable();

            public native Variable ensureVariable(int idx/*=0*/);
            public native Variable ensureVariable();

            public native @Cast("unsigned long") long width();

            // methods used in java interop
            /**
             * This method checks if Context uses fastpath variable access
             * @return
             */
            public native @Cast("bool") boolean isFastPath();

            /**
             * Method allows to forbid FastPath execution
             * @param reallyForbid
             */
            public native void forbidFastPath(@Cast("bool") boolean reallyForbid);

// #ifndef __JAVACPP_HACK__
// #endif

            public native void setInputArray(int index, NDArray array, @Cast("bool") boolean removable/*=false*/);
            public native void setInputArray(int index, NDArray array);
            public native void setInputArray(int index, Pointer buffer, @Const Pointer shapeInfo, Pointer specialBuffer, @Const Pointer specialShapeInfo);
            public native void setInputArray(int index, Pointer databuffer, @Const Pointer shapeInfo, @Const Pointer specialShapeInfo);

            public native void setOutputArray(int index, NDArray array, @Cast("bool") boolean removable/*=false*/);
            public native void setOutputArray(int index, NDArray array);
            public native void setOutputArray(int index, Pointer buffer, @Const Pointer shapeInfo, Pointer specialBuffer, @Const Pointer specialShapeInfo);
            public native void setOutputArray(int index, Pointer databuffer, @Const Pointer shapeInfo, @Const Pointer specialShapeInfo);

            public native void setTArguments(DoublePointer arguments, int numberOfArguments);
            public native void setTArguments(DoubleBuffer arguments, int numberOfArguments);
            public native void setTArguments(double[] arguments, int numberOfArguments);
            public native void setIArguments(@Cast("Nd4jLong*") LongPointer arguments, int numberOfArguments);
            public native void setIArguments(@Cast("Nd4jLong*") LongBuffer arguments, int numberOfArguments);
            public native void setIArguments(@Cast("Nd4jLong*") long[] arguments, int numberOfArguments);
            public native void setBArguments(@Cast("bool*") BooleanPointer arguments, int numberOfArguments);
            public native void setBArguments(@Cast("bool*") boolean[] arguments, int numberOfArguments);
            public native void setDArguments(@Cast("sd::DataType*") IntPointer arguments, int numberOfArguments);
            public native void setDArguments(@Cast("sd::DataType*") IntBuffer arguments, int numberOfArguments);
            public native void setDArguments(@Cast("sd::DataType*") int[] arguments, int numberOfArguments);

            public native void setTArguments(@StdVector DoublePointer tArgs);
            public native void setTArguments(@StdVector DoubleBuffer tArgs);
            public native void setTArguments(@StdVector double[] tArgs);
            public native void setIArguments(@Cast("Nd4jLong*") @StdVector LongPointer tArgs);
            public native void setIArguments(@Cast("Nd4jLong*") @StdVector LongBuffer tArgs);
            public native void setIArguments(@Cast("Nd4jLong*") @StdVector long[] tArgs);
            public native void setBArguments(@Cast("bool*") @StdVector BooleanPointer tArgs);
            public native void setBArguments(@Cast("bool*") @StdVector boolean[] tArgs);
            public native void setDArguments(@Cast("sd::DataType*") @StdVector IntPointer dArgs);
            public native void setDArguments(@Cast("sd::DataType*") @StdVector IntBuffer dArgs);
            public native void setDArguments(@Cast("sd::DataType*") @StdVector int[] dArgs);

            /**
             * This method purges fastpath in/out contents and releases all the handles.
             *
             * PLEASE NOTE: I/T/B/D args will stay intact
             */
            public native void clearFastPath();

            public native void setCudaContext(@Cast("Nd4jPointer") Pointer cudaStream, @Cast("Nd4jPointer") Pointer reductionPointer, @Cast("Nd4jPointer") Pointer allocationPointer);

            public native void allowHelpers(@Cast("bool") boolean reallyAllow);
            public native @Cast("bool") boolean helpersAllowed();

            public native void setShapeFunctionOverride(@Cast("bool") boolean reallyOverride);
            public native @Cast("bool") boolean shapeFunctionOverride();

            public native @Cast("samediff::ExecutionMode") int executionMode();
            public native void setExecutionMode(@Cast("samediff::ExecutionMode") int executionMode);

            public native @Cast("bool") boolean isTraining();
            public native @Cast("bool") boolean isInference();
        }
    



// #endif //LIBND4J_BLOCK_H


// Parsed from graph/ContextPrototype.h

/*******************************************************************************
 * Copyright (c) 2015-2018 Skymind, Inc.
 * Copyright (c) 2019-2020 Konduit K.K.
 *
/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef ND4J_CONTEXT_PROTOTYPE_H
// #define ND4J_CONTEXT_PROTOTYPE_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

// #ifndef __STANDALONE_BUILD__
// #include 
// #endif

        @Namespace("sd::graph") @NoOffset public static class ContextPrototype extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public ContextPrototype(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public ContextPrototype(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public ContextPrototype position(long position) {
                return (ContextPrototype)super.position(position);
            }
            @Override public ContextPrototype getPointer(long i) {
                return new ContextPrototype((Pointer)this).position(position + i);
            }
        
            public ContextPrototype(OpDescriptor opDescriptor/*=nullptr*/, int nodeId/*=1*/, @Cast("bool") boolean inPlace/*=false*/) { super((Pointer)null); allocate(opDescriptor, nodeId, inPlace); }
            private native void allocate(OpDescriptor opDescriptor/*=nullptr*/, int nodeId/*=1*/, @Cast("bool") boolean inPlace/*=false*/);
            public ContextPrototype() { super((Pointer)null); allocate(); }
            private native void allocate();

            public native int getNodeId();
            public native int nodeId();

            // this method returns true, if inputs are defined
            public native @Cast("bool") boolean hasVariablesFilled();

            public native void setOpDescriptor(OpDescriptor opDescriptor);

            public native @Cast("sd::DataType") int dataType();
            public native @Cast("sd::DataType") int dataType(int index);
            public native void setDataType(int index, @Cast("sd::DataType") int type);

            public native @Cast("bool") boolean isInplace();
            public native void markInplace(@Cast("bool") boolean reallyInplace);

            public native void pickInput(int input);
            public native void pickInput(int input, int index);
            public native void pickInput(@ByRef IntIntPair p);
            public native void fillInputs(@StdVector IntPointer inputs);
            public native void fillInputs(@StdVector IntBuffer inputs);
            public native void fillInputs(@StdVector int[] inputs);
            public native @StdVector IntIntPair inputs();

            public native @StdVector DoublePointer getTArguments();
            public native @StdVector IntPointer getIArguments();
            public native @Cast("bool*") @StdVector BooleanPointer getBArguments();
            public native @Cast("sd::DataType*") @StdVector IntPointer getDArguments();
            public native @StdVector IntPointer getAxis();

            public native @Cast("samediff::Engine") int engine();

            public native @Cast("size_t") long numT();
            public native @Cast("size_t") long numI();
            public native @Cast("size_t") long numB();
            public native @Cast("size_t") long numD();

            public native IntIntPair input(int idx);

            public native int opNum();
            public native void setOpNum(int opNum);

            public native @Cast("bool") boolean isUseMKLDNN();
            public native void setUseMKLDNN(@Cast("bool") boolean useMKLDNN);

            /**
             * This method returns number of inputs available in this block
             * @return
             */
            public native @Cast("unsigned long") long width();

            // just a clone
            public native ContextPrototype clone();

            public native @ByRef RandomGenerator randomGenerator();
            public native @Const @ByRef RandomGenerator getRng();
            public native void setRng(@Const @ByRef RandomGenerator anotherRng);
            public native void setRandomGenerator(@Const @ByRef RandomGenerator anotherRng);
            public native @Cast("uint64_t") long randomSeed();
            public native void setRandomSeed(@Cast("uint64_t") long seed);
        }
    


// #endif //ND4J_CONTEXT_PROTOTYPE_H


// Parsed from graph/ResultWrapper.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 11/06/18.
//

// #ifndef LIBND4J_RESULTWRAPPER_H
// #define LIBND4J_RESULTWRAPPER_H

// #include 
// #include 
// #include 
        @Namespace("sd::graph") @NoOffset public static class ResultWrapper extends org.nd4j.nativeblas.ResultWrapperAbstraction {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public ResultWrapper(Pointer p) { super(p); }
        
            public ResultWrapper(@Cast("Nd4jLong") long size, @Cast("Nd4jPointer") Pointer ptr) { super((Pointer)null); allocate(size, ptr); }
            private native void allocate(@Cast("Nd4jLong") long size, @Cast("Nd4jPointer") Pointer ptr);

            public native @Cast("Nd4jLong") long size();

            public native @Cast("Nd4jPointer") Pointer pointer();
        }
    



// #endif //LIBND4J_RESULTWRAPPER_H


// Parsed from helpers/shape.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

/*
 * shape.h
 *
 *  Created on: Dec 28, 2015
 *      Author: agibsonccc
 */

// #ifndef SHAPE_H_
// #define SHAPE_H_

// #include 
// #include 
// #include "system/dll.h"
// #include "system/nd4jmalloc.h"
// #include "math/templatemath.h"
// #include "../helpers/logger.h"
// #include "system/pointercast.h"
// #include "../cnpy/cnpy.h"
// #include 

public static final int MAX_DIMENSION = 0x7fffffff;
public static final int MAX_NUM_THREADS =  1024;
public static final int MAX_RANK = 32;
public static final int MAX_SHAPEINFOLENGTH = 2*MAX_RANK+4;
public static final int MAX_COORD = 3;
public static final int PREALLOC_SIZE = 33554432;
// #ifdef __CUDACC__
// #endif


// #ifdef __CUDACC__
// #else
// #define INLINEDEF inline
// #endif

// #include "system/pairwise_util.h"
// #include 
// #include 

/**
 * Shape information approximating
 * the information on an ndarray
 */
    @Namespace("shape") @NoOffset public static class ShapeInformation extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public ShapeInformation(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public ShapeInformation(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public ShapeInformation position(long position) {
            return (ShapeInformation)super.position(position);
        }
        @Override public ShapeInformation getPointer(long i) {
            return new ShapeInformation((Pointer)this).position(position + i);
        }
    
        public ShapeInformation(@Cast("Nd4jLong*") LongPointer shape_/*=nullptr*/, @Cast("Nd4jLong*") LongPointer stride_/*=nullptr*/, char order_/*=0*/, int rank_/*=0*/, int offset_/*=0*/, int elementWiseStride_/*=0*/) { super((Pointer)null); allocate(shape_, stride_, order_, rank_, offset_, elementWiseStride_); }
        private native void allocate(@Cast("Nd4jLong*") LongPointer shape_/*=nullptr*/, @Cast("Nd4jLong*") LongPointer stride_/*=nullptr*/, char order_/*=0*/, int rank_/*=0*/, int offset_/*=0*/, int elementWiseStride_/*=0*/);
        public ShapeInformation() { super((Pointer)null); allocate(); }
        private native void allocate();
        public ShapeInformation(@Cast("Nd4jLong*") LongBuffer shape_/*=nullptr*/, @Cast("Nd4jLong*") LongBuffer stride_/*=nullptr*/, char order_/*=0*/, int rank_/*=0*/, int offset_/*=0*/, int elementWiseStride_/*=0*/) { super((Pointer)null); allocate(shape_, stride_, order_, rank_, offset_, elementWiseStride_); }
        private native void allocate(@Cast("Nd4jLong*") LongBuffer shape_/*=nullptr*/, @Cast("Nd4jLong*") LongBuffer stride_/*=nullptr*/, char order_/*=0*/, int rank_/*=0*/, int offset_/*=0*/, int elementWiseStride_/*=0*/);
        public ShapeInformation(@Cast("Nd4jLong*") long[] shape_/*=nullptr*/, @Cast("Nd4jLong*") long[] stride_/*=nullptr*/, char order_/*=0*/, int rank_/*=0*/, int offset_/*=0*/, int elementWiseStride_/*=0*/) { super((Pointer)null); allocate(shape_, stride_, order_, rank_, offset_, elementWiseStride_); }
        private native void allocate(@Cast("Nd4jLong*") long[] shape_/*=nullptr*/, @Cast("Nd4jLong*") long[] stride_/*=nullptr*/, char order_/*=0*/, int rank_/*=0*/, int offset_/*=0*/, int elementWiseStride_/*=0*/);

        public native @Cast("Nd4jLong*") LongPointer shape(); public native ShapeInformation shape(LongPointer setter);
        public native @Cast("Nd4jLong*") LongPointer stride(); public native ShapeInformation stride(LongPointer setter);
        public native char order(); public native ShapeInformation order(char setter);
        public native int rank(); public native ShapeInformation rank(int setter);
        public native int offset(); public native ShapeInformation offset(int setter);
        public native int elementWiseStride(); public native ShapeInformation elementWiseStride(int setter);
    }

/**
 * Indexing information
 * for bounds checking
 */
    @Namespace("shape") public static class CurrentIndexing extends Pointer {
        static { Loader.load(); }
        /** Default native constructor. */
        public CurrentIndexing() { super((Pointer)null); allocate(); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public CurrentIndexing(long size) { super((Pointer)null); allocateArray(size); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public CurrentIndexing(Pointer p) { super(p); }
        private native void allocate();
        private native void allocateArray(long size);
        @Override public CurrentIndexing position(long position) {
            return (CurrentIndexing)super.position(position);
        }
        @Override public CurrentIndexing getPointer(long i) {
            return new CurrentIndexing((Pointer)this).position(position + i);
        }
    
        public native int numElementsPerThread(); public native CurrentIndexing numElementsPerThread(int setter);
        public native int blockStartingIndex(); public native CurrentIndexing blockStartingIndex(int setter);
        public native int startingThreadIndex(); public native CurrentIndexing startingThreadIndex(int setter);
        public native int endingThreadIndex(); public native CurrentIndexing endingThreadIndex(int setter);

    }



    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(int shape1Rank, @Cast("const Nd4jLong*") LongPointer shape1, int shape2Rank, @Cast("const Nd4jLong*") LongPointer shape2);
    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(int shape1Rank, @Cast("const Nd4jLong*") LongBuffer shape1, int shape2Rank, @Cast("const Nd4jLong*") LongBuffer shape2);
    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(int shape1Rank, @Cast("const Nd4jLong*") long[] shape1, int shape2Rank, @Cast("const Nd4jLong*") long[] shape2);

    @Namespace("shape") public native @Cast("const Nd4jLong*") LongPointer detachShape(@Cast("const Nd4jLong*") LongPointer originalShape);
    @Namespace("shape") public native @Cast("const Nd4jLong*") LongBuffer detachShape(@Cast("const Nd4jLong*") LongBuffer originalShape);
    @Namespace("shape") public native @Cast("const Nd4jLong*") long[] detachShape(@Cast("const Nd4jLong*") long[] originalShape);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer copyShape(@Cast("const Nd4jLong*") LongPointer originalShape);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer copyShape(@Cast("const Nd4jLong*") LongBuffer originalShape);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] copyShape(@Cast("const Nd4jLong*") long[] originalShape);

    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(@Cast("const Nd4jLong*") LongPointer shapeInfo1, @Cast("const Nd4jLong*") LongPointer shapeInfo2);
    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(@Cast("const Nd4jLong*") LongBuffer shapeInfo1, @Cast("const Nd4jLong*") LongBuffer shapeInfo2);
    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(@Cast("const Nd4jLong*") long[] shapeInfo1, @Cast("const Nd4jLong*") long[] shapeInfo2);

    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(@Cast("const Nd4jLong*") LongPointer shapeInfo1, @Cast("const Nd4jLong*") LongPointer shapeInfo2, @Cast("const Nd4jLong*") LongPointer shapeInfo3);
    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(@Cast("const Nd4jLong*") LongBuffer shapeInfo1, @Cast("const Nd4jLong*") LongBuffer shapeInfo2, @Cast("const Nd4jLong*") LongBuffer shapeInfo3);
    @Namespace("shape") public native @Cast("bool") boolean shapeEquals(@Cast("const Nd4jLong*") long[] shapeInfo1, @Cast("const Nd4jLong*") long[] shapeInfo2, @Cast("const Nd4jLong*") long[] shapeInfo3);

    @Namespace("shape") public native @Cast("bool") boolean strideEquals(int shape1Rank,@Cast("const Nd4jLong*") LongPointer shape1,int shape2Rank, @Cast("const Nd4jLong*") LongPointer shape2);
    @Namespace("shape") public native @Cast("bool") boolean strideEquals(int shape1Rank,@Cast("const Nd4jLong*") LongBuffer shape1,int shape2Rank, @Cast("const Nd4jLong*") LongBuffer shape2);
    @Namespace("shape") public native @Cast("bool") boolean strideEquals(int shape1Rank,@Cast("const Nd4jLong*") long[] shape1,int shape2Rank, @Cast("const Nd4jLong*") long[] shape2);

    @Namespace("shape") public native @Cast("bool") boolean strideEquals(@Cast("const Nd4jLong*") LongPointer shapeInfo1, @Cast("const Nd4jLong*") LongPointer shapeInfo2);
    @Namespace("shape") public native @Cast("bool") boolean strideEquals(@Cast("const Nd4jLong*") LongBuffer shapeInfo1, @Cast("const Nd4jLong*") LongBuffer shapeInfo2);
    @Namespace("shape") public native @Cast("bool") boolean strideEquals(@Cast("const Nd4jLong*") long[] shapeInfo1, @Cast("const Nd4jLong*") long[] shapeInfo2);

    @Namespace("shape") public native @Cast("bool") boolean strideEquals(@Cast("const Nd4jLong*") LongPointer stride1,int rank1, @Cast("const Nd4jLong*") LongPointer stride2, int rank2);
    @Namespace("shape") public native @Cast("bool") boolean strideEquals(@Cast("const Nd4jLong*") LongBuffer stride1,int rank1, @Cast("const Nd4jLong*") LongBuffer stride2, int rank2);
    @Namespace("shape") public native @Cast("bool") boolean strideEquals(@Cast("const Nd4jLong*") long[] stride1,int rank1, @Cast("const Nd4jLong*") long[] stride2, int rank2);

    @Namespace("shape") public native @Cast("bool") boolean equalsSoft(@Cast("const Nd4jLong*") LongPointer shapeA, @Cast("const Nd4jLong*") LongPointer shapeB);
    @Namespace("shape") public native @Cast("bool") boolean equalsSoft(@Cast("const Nd4jLong*") LongBuffer shapeA, @Cast("const Nd4jLong*") LongBuffer shapeB);
    @Namespace("shape") public native @Cast("bool") boolean equalsSoft(@Cast("const Nd4jLong*") long[] shapeA, @Cast("const Nd4jLong*") long[] shapeB);

    @Namespace("shape") public native @Cast("bool") boolean equalsTypesAndShapesSoft(@Cast("const Nd4jLong*") LongPointer shapeA, @Cast("const Nd4jLong*") LongPointer shapeB);
    @Namespace("shape") public native @Cast("bool") boolean equalsTypesAndShapesSoft(@Cast("const Nd4jLong*") LongBuffer shapeA, @Cast("const Nd4jLong*") LongBuffer shapeB);
    @Namespace("shape") public native @Cast("bool") boolean equalsTypesAndShapesSoft(@Cast("const Nd4jLong*") long[] shapeA, @Cast("const Nd4jLong*") long[] shapeB);

    @Namespace("shape") public native @Cast("bool") boolean equalsStrict(@Cast("const Nd4jLong*") LongPointer shapeA, @Cast("const Nd4jLong*") LongPointer shapeB);
    @Namespace("shape") public native @Cast("bool") boolean equalsStrict(@Cast("const Nd4jLong*") LongBuffer shapeA, @Cast("const Nd4jLong*") LongBuffer shapeB);
    @Namespace("shape") public native @Cast("bool") boolean equalsStrict(@Cast("const Nd4jLong*") long[] shapeA, @Cast("const Nd4jLong*") long[] shapeB);

    // returns true if ranks, shapes and strides are the same
    @Namespace("shape") public native @Cast("bool") boolean haveSameShapeAndStrides(@Cast("const Nd4jLong*") LongPointer shapeInfo1, @Cast("const Nd4jLong*") LongPointer shapeInfo2);
    @Namespace("shape") public native @Cast("bool") boolean haveSameShapeAndStrides(@Cast("const Nd4jLong*") LongBuffer shapeInfo1, @Cast("const Nd4jLong*") LongBuffer shapeInfo2);
    @Namespace("shape") public native @Cast("bool") boolean haveSameShapeAndStrides(@Cast("const Nd4jLong*") long[] shapeInfo1, @Cast("const Nd4jLong*") long[] shapeInfo2);
    @Namespace("shape") public native @Cast("bool") boolean haveSameShapeAndStrides(@Cast("const Nd4jLong*") LongPointer shapeInfo1, @Cast("const Nd4jLong*") LongPointer shapeInfo2, @Cast("const Nd4jLong*") LongPointer shapeInfo3);
    @Namespace("shape") public native @Cast("bool") boolean haveSameShapeAndStrides(@Cast("const Nd4jLong*") LongBuffer shapeInfo1, @Cast("const Nd4jLong*") LongBuffer shapeInfo2, @Cast("const Nd4jLong*") LongBuffer shapeInfo3);
    @Namespace("shape") public native @Cast("bool") boolean haveSameShapeAndStrides(@Cast("const Nd4jLong*") long[] shapeInfo1, @Cast("const Nd4jLong*") long[] shapeInfo2, @Cast("const Nd4jLong*") long[] shapeInfo3);

    @Namespace("shape") public native int sizeAt(@Cast("const Nd4jLong*") LongPointer shapeInfo, int dim);
    @Namespace("shape") public native int sizeAt(@Cast("const Nd4jLong*") LongBuffer shapeInfo, int dim);
    @Namespace("shape") public native int sizeAt(@Cast("const Nd4jLong*") long[] shapeInfo, int dim);
    @Namespace("shape") public native @Cast("Nd4jLong") long strideAt(@Cast("const Nd4jLong*") LongPointer shapeInfo, int dim);
    @Namespace("shape") public native @Cast("Nd4jLong") long strideAt(@Cast("const Nd4jLong*") LongBuffer shapeInfo, int dim);
    @Namespace("shape") public native @Cast("Nd4jLong") long strideAt(@Cast("const Nd4jLong*") long[] shapeInfo, int dim);

    @Namespace("shape") public native void traceNew(int id);


    @Namespace("shape") public native int tadIndexForLinear(int linearIndex, int tadLength);

    @Namespace("shape") public native @Cast("Nd4jLong") long tadLength(@Cast("const Nd4jLong*") LongPointer shapeInfo, IntPointer dimension, int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long tadLength(@Cast("const Nd4jLong*") LongBuffer shapeInfo, IntBuffer dimension, int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long tadLength(@Cast("const Nd4jLong*") long[] shapeInfo, int[] dimension, int dimensionLength);

    @Namespace("shape") public native @Cast("bool") boolean canReshape(int oldRank, @Cast("Nd4jLong*") LongPointer oldShape, int newRank, @Cast("Nd4jLong*") LongPointer newShape, @Cast("bool") boolean isFOrder);
    @Namespace("shape") public native @Cast("bool") boolean canReshape(int oldRank, @Cast("Nd4jLong*") LongBuffer oldShape, int newRank, @Cast("Nd4jLong*") LongBuffer newShape, @Cast("bool") boolean isFOrder);
    @Namespace("shape") public native @Cast("bool") boolean canReshape(int oldRank, @Cast("Nd4jLong*") long[] oldShape, int newRank, @Cast("Nd4jLong*") long[] newShape, @Cast("bool") boolean isFOrder);

    @Namespace("shape") public native @Cast("bool") boolean reshapeC(@Cast("const Nd4jLong*") LongPointer oldShapeInfo, byte newOrder, int newRank, @Cast("const Nd4jLong*") LongPointer newShape, @Cast("Nd4jLong*") LongPointer newShapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean reshapeC(@Cast("const Nd4jLong*") LongBuffer oldShapeInfo, byte newOrder, int newRank, @Cast("const Nd4jLong*") LongBuffer newShape, @Cast("Nd4jLong*") LongBuffer newShapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean reshapeC(@Cast("const Nd4jLong*") long[] oldShapeInfo, byte newOrder, int newRank, @Cast("const Nd4jLong*") long[] newShape, @Cast("Nd4jLong*") long[] newShapeInfo);
    /**
    * newShapeInfo contains rank, shape and order only, no strides/ews/type
    */
    @Namespace("shape") public native @Cast("bool") boolean reshapeC(@Cast("const Nd4jLong*") LongPointer oldShapeInfo, @Cast("Nd4jLong*") LongPointer newShapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean reshapeC(@Cast("const Nd4jLong*") LongBuffer oldShapeInfo, @Cast("Nd4jLong*") LongBuffer newShapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean reshapeC(@Cast("const Nd4jLong*") long[] oldShapeInfo, @Cast("Nd4jLong*") long[] newShapeInfo);

    /**
    * Get the shape info buffer
    * for the given rank and shape.
    */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer shapeBuffer(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongPointer shape);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer shapeBuffer(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongBuffer shape);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] shapeBuffer(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") long[] shape);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer shapeBuffer(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer shapeBuffer(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] shapeBuffer(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] buffer);

    /**
    * Get the shape info buffer
    * for the given rank and shape.
     */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer shapeBufferFortran(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongPointer shape);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer shapeBufferFortran(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongBuffer shape);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] shapeBufferFortran(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") long[] shape);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer shapeBufferFortran(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer output);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer shapeBufferFortran(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer output);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] shapeBufferFortran(int rank, @Cast("sd::DataType") int dtype, @Cast("const Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] output);

// #ifdef __CUDACC__
// #endif



/**
 * Computes the standard packed array strides for a given shape.
 *
 * @param shape    the shape of a matrix:
 * @param startNum the start number for the strides
 * @return the strides for a matrix of n dimensions
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer calcStridesFortran(@Cast("const Nd4jLong*") LongPointer shape, int rank);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer calcStridesFortran(@Cast("const Nd4jLong*") LongBuffer shape, int rank);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] calcStridesFortran(@Cast("const Nd4jLong*") long[] shape, int rank);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer calcStridesFortran(@Cast("const Nd4jLong*") LongPointer shape, int rank, @Cast("Nd4jLong*") LongPointer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer calcStridesFortran(@Cast("const Nd4jLong*") LongBuffer shape, int rank, @Cast("Nd4jLong*") LongBuffer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] calcStridesFortran(@Cast("const Nd4jLong*") long[] shape, int rank, @Cast("Nd4jLong*") long[] ret);

/**
 * Computes the standard packed array strides for a given shape.
 *
 * @param shape    the shape of a matrix:
 * @param startNum the start number for the strides
 * @return the strides for a matrix of n dimensions
 */

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer calcStrides(@Cast("const Nd4jLong*") LongPointer shape, int rank);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer calcStrides(@Cast("const Nd4jLong*") LongBuffer shape, int rank);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] calcStrides(@Cast("const Nd4jLong*") long[] shape, int rank);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer calcStrides(@Cast("const Nd4jLong*") LongPointer shape, int rank, @Cast("Nd4jLong*") LongPointer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer calcStrides(@Cast("const Nd4jLong*") LongBuffer shape, int rank, @Cast("Nd4jLong*") LongBuffer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] calcStrides(@Cast("const Nd4jLong*") long[] shape, int rank, @Cast("Nd4jLong*") long[] ret);

    @Namespace("shape") public native void updateStrides(@Cast("Nd4jLong*") LongPointer shape, byte order);
    @Namespace("shape") public native void updateStrides(@Cast("Nd4jLong*") LongBuffer shape, byte order);
    @Namespace("shape") public native void updateStrides(@Cast("Nd4jLong*") long[] shape, byte order);
    @Namespace("shape") public native void updateStrides(int rank, @Cast("const Nd4jLong*") LongPointer shapeOnly, @Cast("Nd4jLong*") LongPointer stridesOnly, byte order);
    @Namespace("shape") public native void updateStrides(int rank, @Cast("const Nd4jLong*") LongBuffer shapeOnly, @Cast("Nd4jLong*") LongBuffer stridesOnly, byte order);
    @Namespace("shape") public native void updateStrides(int rank, @Cast("const Nd4jLong*") long[] shapeOnly, @Cast("Nd4jLong*") long[] stridesOnly, byte order);


// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1

/**
 * Computes the standard packed array strides for a given shape.
 *
 * @param shape    the shape of a matrix:
 * @param startNum the start number for the strides
 * @return the strides for a matrix of n dimensions
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer calcStridesFortran(@Cast("const Nd4jLong*") LongPointer shape, int rank, int startNum);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer calcStridesFortran(@Cast("const Nd4jLong*") LongBuffer shape, int rank, int startNum);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] calcStridesFortran(@Cast("const Nd4jLong*") long[] shape, int rank, int startNum);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer calcStridesFortran(@Cast("const Nd4jLong*") LongPointer shape, int rank, int startNum, @Cast("Nd4jLong*") LongPointer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer calcStridesFortran(@Cast("const Nd4jLong*") LongBuffer shape, int rank, int startNum, @Cast("Nd4jLong*") LongBuffer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] calcStridesFortran(@Cast("const Nd4jLong*") long[] shape, int rank, int startNum, @Cast("Nd4jLong*") long[] ret);

/**
 * Computes the standard packed array strides for a given shape.
 *
 * @param shape    the shape of a matrix:
 * @param startNum the start number for the strides
 * @return the strides for a matrix of n dimensions
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer calcStrides(@Cast("const Nd4jLong*") LongPointer shape, int rank, int startNum);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer calcStrides(@Cast("const Nd4jLong*") LongBuffer shape, int rank, int startNum);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] calcStrides(@Cast("const Nd4jLong*") long[] shape, int rank, int startNum);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer calcStrides(@Cast("const Nd4jLong*") LongPointer shape, int rank, int startNum, @Cast("Nd4jLong*") LongPointer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer calcStrides(@Cast("const Nd4jLong*") LongBuffer shape, int rank, int startNum, @Cast("Nd4jLong*") LongBuffer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] calcStrides(@Cast("const Nd4jLong*") long[] shape, int rank, int startNum, @Cast("Nd4jLong*") long[] ret);

/**
 * @param toCopy the shape to copy
 * @return a copy of the original struct
 */
    @Namespace("shape") public native ShapeInformation shapeCopy( ShapeInformation toCopy);


    @Namespace("shape") public native @Cast("bool") boolean strideDescendingCAscendingF(@Cast("const Nd4jLong*") LongPointer shapeBuffer);
    @Namespace("shape") public native @Cast("bool") boolean strideDescendingCAscendingF(@Cast("const Nd4jLong*") LongBuffer shapeBuffer);
    @Namespace("shape") public native @Cast("bool") boolean strideDescendingCAscendingF(@Cast("const Nd4jLong*") long[] shapeBuffer);

    @Namespace("shape") public native @Cast("bool") boolean isContiguous(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean isContiguous(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean isContiguous(@Cast("const Nd4jLong*") long[] shapeInfo);


/**
 * copy-past from java hasDefaultStridesForShape function
 * check whether array is not permuted and has contiguous elements in memory
 */
    @Namespace("shape") public native @Cast("bool") boolean areStridesDefault(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean areStridesDefault(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean areStridesDefault(@Cast("const Nd4jLong*") long[] shapeInfo);


/**
 * Compute the element wise stride
 * for a given shape/stride configuration
 * @param rank the rank of the shape/stride
 * @param shape the shape
 * @param stride the stride
 * @param isFOrder 0 or 1 for whether the array is f
 * ordered or not
 * @return 0 if there is no element wise stride the
 * element wise stride of reshape(1,length) otherwise
 */
    @Namespace("shape") public native int computeElementWiseStride(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer stride, int isFOrder);
    @Namespace("shape") public native int computeElementWiseStride(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer stride, int isFOrder);
    @Namespace("shape") public native int computeElementWiseStride(int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] stride, int isFOrder);

/**
 * Compute the element wise stride
 * for a given shape/stride configuration
 * @param rank the rank of the shape/stride
 * @param shape the shape
 * @param stride the stride
 * @param isFOrder 0 or 1 for whether the array is f
 * ordered or not
 * @return 0 if there is no element wise stride the
 * element wise stride of reshape(1,length) otherwise
 */
    @Namespace("shape") public native int computeElementWiseStride(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer stride, int isFOrder, @Cast("const Nd4jLong*") LongPointer dimension, int dimensionLength);
    @Namespace("shape") public native int computeElementWiseStride(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer stride, int isFOrder, @Cast("const Nd4jLong*") LongBuffer dimension, int dimensionLength);
    @Namespace("shape") public native int computeElementWiseStride(int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] stride, int isFOrder, @Cast("const Nd4jLong*") long[] dimension, int dimensionLength);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer shapeInfoOnlyShapeAndStride(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("Nd4jLong*") LongPointer dimension, int dimensionLength,@Cast("bool") boolean reverseCopyStride);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer shapeInfoOnlyShapeAndStride(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("Nd4jLong*") LongBuffer dimension, int dimensionLength,@Cast("bool") boolean reverseCopyStride);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] shapeInfoOnlyShapeAndStride(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("Nd4jLong*") long[] dimension, int dimensionLength,@Cast("bool") boolean reverseCopyStride);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer shapeInfoOnlyShapeAndStride(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("Nd4jLong*") LongPointer dimension, int dimensionLength,@Cast("bool") boolean reverseCopyStride, @Cast("Nd4jLong*") LongPointer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer shapeInfoOnlyShapeAndStride(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("Nd4jLong*") LongBuffer dimension, int dimensionLength,@Cast("bool") boolean reverseCopyStride, @Cast("Nd4jLong*") LongBuffer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] shapeInfoOnlyShapeAndStride(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("Nd4jLong*") long[] dimension, int dimensionLength,@Cast("bool") boolean reverseCopyStride, @Cast("Nd4jLong*") long[] buffer);
/**
 *
 * @param length
 * @param shape
 * @param rearrange
 * @return
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer doPermuteSwap(int length, @Cast("Nd4jLong*") LongPointer shape, IntPointer rearrange);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer doPermuteSwap(int length, @Cast("Nd4jLong*") LongBuffer shape, IntBuffer rearrange);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] doPermuteSwap(int length, @Cast("Nd4jLong*") long[] shape, int[] rearrange);



/**
 * In place permute swap
 * @param length
 * @param shape
 * @param rearrange
 */
    @Namespace("shape") public native void doPermuteSwap(int length, @Cast("Nd4jLong**") PointerPointer shape, IntPointer rearrange);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer permuteShapeBuffer(@Cast("const Nd4jLong*") LongPointer shapeBuffer, IntPointer rearrange);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer permuteShapeBuffer(@Cast("const Nd4jLong*") LongBuffer shapeBuffer, IntBuffer rearrange);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] permuteShapeBuffer(@Cast("const Nd4jLong*") long[] shapeBuffer, int[] rearrange);

    @Namespace("shape") public native void permuteShapeBufferInPlace(@Cast("Nd4jLong*") LongPointer shapeBuffer, IntPointer rearrange, @Cast("Nd4jLong*") LongPointer out);
    @Namespace("shape") public native void permuteShapeBufferInPlace(@Cast("Nd4jLong*") LongBuffer shapeBuffer, IntBuffer rearrange, @Cast("Nd4jLong*") LongBuffer out);
    @Namespace("shape") public native void permuteShapeBufferInPlace(@Cast("Nd4jLong*") long[] shapeBuffer, int[] rearrange, @Cast("Nd4jLong*") long[] out);

    @Namespace("shape") public native void doPermuteShapeInfo(@Cast("Nd4jLong*") LongPointer shapeBuffer, @Const IntPointer rearrange, @Cast("Nd4jLong") long len/*=-1*/);
    @Namespace("shape") public native void doPermuteShapeInfo(@Cast("Nd4jLong*") LongPointer shapeBuffer, @Const IntPointer rearrange);
    @Namespace("shape") public native void doPermuteShapeInfo(@Cast("Nd4jLong*") LongBuffer shapeBuffer, @Const IntBuffer rearrange, @Cast("Nd4jLong") long len/*=-1*/);
    @Namespace("shape") public native void doPermuteShapeInfo(@Cast("Nd4jLong*") LongBuffer shapeBuffer, @Const IntBuffer rearrange);
    @Namespace("shape") public native void doPermuteShapeInfo(@Cast("Nd4jLong*") long[] shapeBuffer, @Const int[] rearrange, @Cast("Nd4jLong") long len/*=-1*/);
    @Namespace("shape") public native void doPermuteShapeInfo(@Cast("Nd4jLong*") long[] shapeBuffer, @Const int[] rearrange);

    /**
     * Rearrange the permute indexes
     * according to which  dimensions are specified.
     *
     * For example, dimension is implicitly:
     * 0,1,2
     *
     * If you want to do a reduce along dimensions 0 and 1,
     * you need to permute the indexes to be:
     * 2,0,1
     *
     * which will give us the ability to ierate along an element
     * wise stride.
     */

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer createPermuteIndexes(int originalRank, IntPointer dimension,int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer createPermuteIndexes(int originalRank, IntBuffer dimension,int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] createPermuteIndexes(int originalRank, int[] dimension,int dimensionLength);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer computeResultShape(@Cast("const Nd4jLong*") LongPointer originalShapeBuffer, IntPointer dimension,int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer computeResultShape(@Cast("const Nd4jLong*") LongBuffer originalShapeBuffer, IntBuffer dimension,int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] computeResultShape(@Cast("const Nd4jLong*") long[] originalShapeBuffer, int[] dimension,int dimensionLength);

    /**
     * This method does inplace transpose of given shapeBuffer
     *
     * @param shapeBuffer
     */
    @Namespace("shape") public native void transposeInplace(@Cast("Nd4jLong*") LongPointer shapeBuffer);
    @Namespace("shape") public native void transposeInplace(@Cast("Nd4jLong*") LongBuffer shapeBuffer);
    @Namespace("shape") public native void transposeInplace(@Cast("Nd4jLong*") long[] shapeBuffer);


/**
 * Get the ordering for the device
 * @param length
 * @param shape
 * @param stride
 * @param elementStride
 * @return
 */
    @Namespace("shape") public native char getOrder(int length, @Cast("Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer stride, int elementStride);
    @Namespace("shape") public native char getOrder(int length, @Cast("Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer stride, int elementStride);
    @Namespace("shape") public native char getOrder(int length, @Cast("Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] stride, int elementStride);

/**
 * Ensure that every value in the re arrange
 * array is unique
 * @param arr
 * @param shape
 * @param arrLength
 * @param shapeLength
 * @return
 */

/**
 * Permute the shape information
 * @param info the shape information to permute
 * @param rearrange the order to re arrange
 * @param rank the rank of the rearrange array
 */
    @Namespace("shape") public native void permute(@Cast("shape::ShapeInformation**") PointerPointer info, IntPointer rearrange, int rank);
    @Namespace("shape") public native void permute(@ByPtrPtr ShapeInformation info, IntPointer rearrange, int rank);
    @Namespace("shape") public native void permute(@ByPtrPtr ShapeInformation info, IntBuffer rearrange, int rank);
    @Namespace("shape") public native void permute(@ByPtrPtr ShapeInformation info, int[] rearrange, int rank);

/**
 * Returns whether the
 * given shape is a vector or not
 * @param shape the shape of the array
 * @param rank the rank of cthe shape
 */
    @Namespace("shape") public native int isVector(@Cast("const Nd4jLong*") LongPointer shape, int rank);
    @Namespace("shape") public native int isVector(@Cast("const Nd4jLong*") LongBuffer shape, int rank);
    @Namespace("shape") public native int isVector(@Cast("const Nd4jLong*") long[] shape, int rank);


    /**
     * When 1 dimension is the whole length of the
     * array
     */
    @Namespace("shape") public native int oneDimEqualToLength(@Cast("Nd4jLong*") LongPointer shape, int rank);
    @Namespace("shape") public native int oneDimEqualToLength(@Cast("Nd4jLong*") LongBuffer shape, int rank);
    @Namespace("shape") public native int oneDimEqualToLength(@Cast("Nd4jLong*") long[] shape, int rank);

    @Namespace("shape") public native int oneDimEqualToLength(@Cast("Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native int oneDimEqualToLength(@Cast("Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native int oneDimEqualToLength(@Cast("Nd4jLong*") long[] shapeInfo);

    @Namespace("shape") public native int isVector(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native int isVector(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native int isVector(@Cast("const Nd4jLong*") long[] shapeInfo);

    @Namespace("shape") public native @Cast("bool") boolean isLikeVector(@Cast("const Nd4jLong*") LongPointer shapeInfo, @ByRef IntPointer posOfNonUnityDim);
    @Namespace("shape") public native @Cast("bool") boolean isLikeVector(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @ByRef IntBuffer posOfNonUnityDim);
    @Namespace("shape") public native @Cast("bool") boolean isLikeVector(@Cast("const Nd4jLong*") long[] shapeInfo, @ByRef int[] posOfNonUnityDim);

    @Namespace("shape") public native @Cast("bool") boolean isCommonVector(@Cast("const Nd4jLong*") LongPointer shapeInfo, @ByRef IntPointer posOfNonUnityDim);
    @Namespace("shape") public native @Cast("bool") boolean isCommonVector(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @ByRef IntBuffer posOfNonUnityDim);
    @Namespace("shape") public native @Cast("bool") boolean isCommonVector(@Cast("const Nd4jLong*") long[] shapeInfo, @ByRef int[] posOfNonUnityDim);

    @Namespace("shape") public native @Cast("bool") boolean isRowVector(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean isRowVector(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean isRowVector(@Cast("const Nd4jLong*") long[] shapeInfo);

    @Namespace("shape") public native @Cast("bool") boolean isColumnVector(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean isColumnVector(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean isColumnVector(@Cast("const Nd4jLong*") long[] shapeInfo);

    /**
    * shape - input inShape is shape only, not shapeInfo
    * returns number of non-unity dimensions in inShape
    */
    @Namespace("shape") public native int numOfNonUnitDims(int rank, @Cast("const Nd4jLong*") LongPointer inShape);
    @Namespace("shape") public native int numOfNonUnitDims(int rank, @Cast("const Nd4jLong*") LongBuffer inShape);
    @Namespace("shape") public native int numOfNonUnitDims(int rank, @Cast("const Nd4jLong*") long[] inShape);

    /**
 * Returns whether the
 * given shape is a vector or not
 * @param shape the shape of the array
 * @param rank the rank of the shape
 */

    @Namespace("shape") public native int isMatrix(@Cast("const Nd4jLong*") LongPointer shape, int rank);
    @Namespace("shape") public native int isMatrix(@Cast("const Nd4jLong*") LongBuffer shape, int rank);
    @Namespace("shape") public native int isMatrix(@Cast("const Nd4jLong*") long[] shape, int rank);

    @Namespace("shape") public native int isMatrix(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native int isMatrix(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native int isMatrix(@Cast("const Nd4jLong*") long[] shapeInfo);
/**
 * Returns the shape portion of an information
 * buffer
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer shapeOf(@Cast("Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer shapeOf(@Cast("Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] shapeOf(@Cast("Nd4jLong*") long[] shapeInfo);

/**
 * Return a copy of a buffer.
 * This buffer allocates memory
 * that must be freed elsewhere.
 */

    /**
 * Return a copy of a buffer.
 * This buffer allocates memory
 * that must be freed elsewhere.
 */
    /**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
    @Namespace("shape") public native void copyTo(int length, @Cast("const Nd4jLong*") LongPointer from, @Cast("Nd4jLong*") LongPointer to, @Cast("Nd4jLong*") LongPointer indexes);
    @Namespace("shape") public native void copyTo(int length, @Cast("const Nd4jLong*") LongBuffer from, @Cast("Nd4jLong*") LongBuffer to, @Cast("Nd4jLong*") LongBuffer indexes);
    @Namespace("shape") public native void copyTo(int length, @Cast("const Nd4jLong*") long[] from, @Cast("Nd4jLong*") long[] to, @Cast("Nd4jLong*") long[] indexes);

/**
 * Permute the given strides
 * in the given rearrange order
 * @param toPermute the buffer to permute
 * @param shapeRank the length of the buffer to permute
 * @param rearrange the rearrange order (must be 0 based indexes
 * and all must be filled in)
 * @return the rearranged array
 */
    //ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange);

/**
 * Return the slice (shape + 1 in pointer arithmetic)
 * @param shape the shape to take the slice of
 * @return the shape array - the first entry
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer slice(@Cast("Nd4jLong*") LongPointer shape);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer slice(@Cast("Nd4jLong*") LongBuffer shape);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] slice(@Cast("Nd4jLong*") long[] shape);

    @Namespace("shape") public native int slices(@Cast("Nd4jLong*") LongPointer shapeBuffer);
    @Namespace("shape") public native int slices(@Cast("Nd4jLong*") LongBuffer shapeBuffer);
    @Namespace("shape") public native int slices(@Cast("Nd4jLong*") long[] shapeBuffer);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer sliceOfShapeBuffer(@Cast("Nd4jLong") long sliceIdx, @Cast("Nd4jLong*") LongPointer shapeBuffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer sliceOfShapeBuffer(@Cast("Nd4jLong") long sliceIdx, @Cast("Nd4jLong*") LongBuffer shapeBuffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] sliceOfShapeBuffer(@Cast("Nd4jLong") long sliceIdx, @Cast("Nd4jLong*") long[] shapeBuffer);
/**
 * Returns the length of the
 * shape information buffer:
 * rank * 2 + 3
 * @param rank the rank to get the shape
 * info length for
 * @return rank * 2 + 4
 */
    @Namespace("shape") public native int shapeInfoLength(int rank);

    @Namespace("shape") public native int shapeInfoLength(@Cast("Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native int shapeInfoLength(@Cast("Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native int shapeInfoLength(@Cast("Nd4jLong*") long[] shapeInfo);

    @Namespace("shape") public native @Cast("size_t") long shapeInfoByteLength(int rank);

    @Namespace("shape") public native @Cast("size_t") long shapeInfoByteLength(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("size_t") long shapeInfoByteLength(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("size_t") long shapeInfoByteLength(@Cast("const Nd4jLong*") long[] shapeInfo);

/**
 * Returns the rank portion of
 * an information buffer
 */
    @Namespace("shape") public native int rank(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native int rank(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native int rank(@Cast("const Nd4jLong*") long[] shapeInfo);
    @Namespace("shape") public native int rank(@Const IntPointer shapeInfo);
    @Namespace("shape") public native int rank(@Const IntBuffer shapeInfo);
    @Namespace("shape") public native int rank(@Const int[] shapeInfo);

    /**
    *  returns pointer on elementWiseStride
    */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer ews(@Cast("Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer ews(@Cast("Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] ews(@Cast("Nd4jLong*") long[] shapeInfo);

/**
 * Converts a raw int buffer of the layout:
 * rank
 * shape
 * stride
 * offset
 * elementWiseStride
 *
 * where shape and stride are both straight int pointers
 */
    @Namespace("shape") public native ShapeInformation infoFromBuffer(@Cast("Nd4jLong*") LongPointer buffer);
    @Namespace("shape") public native ShapeInformation infoFromBuffer(@Cast("Nd4jLong*") LongBuffer buffer);
    @Namespace("shape") public native ShapeInformation infoFromBuffer(@Cast("Nd4jLong*") long[] buffer);

/**
 * Returns the stride portion of an information
 * buffer
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer stride(@Cast("Nd4jLong*") LongPointer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer stride(@Cast("Nd4jLong*") LongBuffer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] stride(@Cast("Nd4jLong*") long[] buffer);

/**
 * Compute the length of the given shape
 */
    @Namespace("shape") public native @Cast("bool") boolean isEmpty(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean isEmpty(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("bool") boolean isEmpty(@Cast("const Nd4jLong*") long[] shapeInfo);

    @Namespace("shape") public native @Cast("Nd4jLong") long length(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long length(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long length(@Cast("const Nd4jLong*") long[] shapeInfo);

/***
 * Returns the offset portion of an information buffer
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long offset(@Cast("Nd4jLong*") LongPointer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong") long offset(@Cast("Nd4jLong*") LongBuffer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong") long offset(@Cast("Nd4jLong*") long[] buffer);

    @Namespace("shape") public native @Cast("Nd4jLong*") @ByRef LongPointer extra(@Cast("Nd4jLong*") LongPointer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") @ByRef LongBuffer extra(@Cast("Nd4jLong*") LongBuffer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") @ByRef long[] extra(@Cast("Nd4jLong*") long[] buffer);

/**
 * Returns the ordering
 * for this shape information buffer
 */
    @Namespace("shape") public native char order(@Cast("const Nd4jLong*") LongPointer buffer);
    @Namespace("shape") public native char order(@Cast("const Nd4jLong*") LongBuffer buffer);
    @Namespace("shape") public native char order(@Cast("const Nd4jLong*") long[] buffer);

/**
 * Returns the type
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long type(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long type(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long type(@Cast("const Nd4jLong*") long[] shapeInfo);

/**
 * Returns the element wise stride for this information
 * buffer
 */
   @Namespace("shape") public native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongPointer shapeInfo);
   @Namespace("shape") public native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
   @Namespace("shape") public native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") long[] shapeInfo);


    /**
 * Returns the element wise stride for this information
 * buffer
     * relative to a dimension and ordering for a reduction index
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long reductionIndexElementWiseStride(@Cast("Nd4jLong*") LongPointer buffer, IntPointer dimension, int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long reductionIndexElementWiseStride(@Cast("Nd4jLong*") LongBuffer buffer, IntBuffer dimension, int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long reductionIndexElementWiseStride(@Cast("Nd4jLong*") long[] buffer, int[] dimension, int dimensionLength);

/**
 * Returns whether
 * the given shape info buffer
 * represents a scalar shape
 */
    @Namespace("shape") public native int isScalar(@Cast("const Nd4jLong*") LongPointer info);
    @Namespace("shape") public native int isScalar(@Cast("const Nd4jLong*") LongBuffer info);
    @Namespace("shape") public native int isScalar(@Cast("const Nd4jLong*") long[] info);

/**
 * Returns whether
 * the given shape information
 * represents a scalar
 * shape or not
 */
    @Namespace("shape") public native int isScalar(ShapeInformation info);

/**
 * Return a copy of this array with the
 * given index omitted
 *
 * @param data  the data to copy
 * @param indexes the index of the item to remove
 * @param dataLength the length of the data array
 * @param indexesLength the length of the data array
 * @return the new array with the omitted
 *
 * item
 */

    /**
 * Return a copy of this array with the
 * given index omitted
 *
 * @param data  the data to copy
 * @param indexes the index of the item to remove
 * @param dataLength the length of the data array
 * @param indexesLength the length of the data array
 * @return the new array with the omitted
 *
 * item
 */

    /**
     * Iterate over a given set of indexes
     * the begin and end indexes are 0 based.
     * 1 padding is automatically assumed for the ending.
     *
     * For example if you want to iterate over 0 to 4
     * it will go to 4 rather than 3.
     *
     * indexes should be the indexes to exclude
     * indexes length should be the length of indexes
     */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer everyIndexBut(@Cast("const Nd4jLong*") LongPointer indexes,int indexesLength,int begin,int end);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer everyIndexBut(@Cast("const Nd4jLong*") LongBuffer indexes,int indexesLength,int begin,int end);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] everyIndexBut(@Cast("const Nd4jLong*") long[] indexes,int indexesLength,int begin,int end);

/**
 * Computes the offset for accessing
 * a global element given the shape information
 * and the offset to be read.
 */
//#ifdef __CUDACC__
//    __device__
//#endif
//    ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset);

/**
 * Returns a shape
 * forces the given length to be 2.
 * @param shape the shape to modify
 * @param dimension the dimension (row or column)
 * for the shape to be returned as
 * @return the new shape
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer ensureVectorShape(@Cast("Nd4jLong*") LongPointer shape);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer ensureVectorShape(@Cast("Nd4jLong*") LongBuffer shape);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] ensureVectorShape(@Cast("Nd4jLong*") long[] shape);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer createScalarShapeInfo();

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer createScalarShapeInfo(@Cast("Nd4jLong*") LongPointer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer createScalarShapeInfo(@Cast("Nd4jLong*") LongBuffer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] createScalarShapeInfo(@Cast("Nd4jLong*") long[] ret);

/**
 * Generate an int buffer
 * up to the given length
 * at the specified increment
 *
 */

/**
 * Range between from and two with an
 * increment of 1
 */

/**
 * Keep the given indexes
 * in the data
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer keep(@Cast("Nd4jLong*") LongPointer data, @Const IntPointer index, int indexLength, int dataLength);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer keep(@Cast("Nd4jLong*") LongBuffer data, @Const IntBuffer index, int indexLength, int dataLength);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] keep(@Cast("Nd4jLong*") long[] data, @Const int[] index, int indexLength, int dataLength);

/**
 * Generate reverse copy of the data
 * @param data
 * @param length
 * @return
 */
/**
 *
 * @param arr1
 * @param arr1Length
 * @param arr2
 * @param arr2Length
 * @return
 */

/**
 *
 * @param numArrays
 * @param numTotalElements
 * @param arr
 * @param lengths
 * @return
 */

/**
 * Get the length per slice of the
 * given shape and the dimension
 * @param rank the rank of the shape
 * @param shape the shape of to get
 * the length per slice for
 * @param dimension the dimension to
 * get the length per slice for
 * @param dimensionLength the length of the dimension array
 * @return the length per slice of the given shape
 * along the given dimension
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long lengthPerSlice(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Const IntPointer dimension, int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long lengthPerSlice(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Const IntBuffer dimension, int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long lengthPerSlice(int rank, @Cast("const Nd4jLong*") long[] shape, @Const int[] dimension, int dimensionLength);

/**
 * calculates the offset for a tensor
 * @param index
 * @param arr
 * @param tensorShape
 * @return
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long sliceOffsetForTensor(int rank,
                                           int index,
                                           @Cast("const Nd4jLong*") LongPointer shape,
                                           @Cast("const Nd4jLong*") LongPointer tensorShape,
                                           int tensorShapeLength,
                                           @Const IntPointer dimension,
                                           int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long sliceOffsetForTensor(int rank,
                                           int index,
                                           @Cast("const Nd4jLong*") LongBuffer shape,
                                           @Cast("const Nd4jLong*") LongBuffer tensorShape,
                                           int tensorShapeLength,
                                           @Const IntBuffer dimension,
                                           int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long sliceOffsetForTensor(int rank,
                                           int index,
                                           @Cast("const Nd4jLong*") long[] shape,
                                           @Cast("const Nd4jLong*") long[] tensorShape,
                                           int tensorShapeLength,
                                           @Const int[] dimension,
                                           int dimensionLength);

/**
 * calculates the offset for a tensor
 * @param index
 * @param arr
 * @param tensorShape
 * @return
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2);
/**
 * Computes the tensor along dimension
 * offset
 * @param index the index to get the offset for the tad for
 * @param rank the rank of the shapes and strides
 * @param info the shape information to use for tad
 * @param dimension the dimensions to use for computing the tensor along dimensions
 */
//    ND4J_EXPORT _CUDA_HD int offset(int index,
//                         int rank,
//                         shape::ShapeInformation *info,
//                         Nd4jLong *dimension,
//                         int dimensionLength);


/**
 * Computes the number
 * of tensors along
 * a given dimension
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long tensorsAlongDimension(int rank,
                                            int length,
                                            @Cast("Nd4jLong*") LongPointer shape,
                                            IntPointer dimension,
                                            int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long tensorsAlongDimension(int rank,
                                            int length,
                                            @Cast("Nd4jLong*") LongBuffer shape,
                                            IntBuffer dimension,
                                            int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long tensorsAlongDimension(int rank,
                                            int length,
                                            @Cast("Nd4jLong*") long[] shape,
                                            int[] dimension,
                                            int dimensionLength);

/**
 * Computes the number
 * of tensors along
 * a given dimension
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long tensorsAlongDimension(@Cast("Nd4jLong*") LongPointer shapeInfo, IntPointer dimension, int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long tensorsAlongDimension(@Cast("Nd4jLong*") LongBuffer shapeInfo, IntBuffer dimension, int dimensionLength);
    @Namespace("shape") public native @Cast("Nd4jLong") long tensorsAlongDimension(@Cast("Nd4jLong*") long[] shapeInfo, int[] dimension, int dimensionLength);



/**
 * Returns the tensor along dimension
 * for the given block index
 * @param blockSize
 * @param blockIdx
 * @param i
 * @return
 */
    @Namespace("shape") public native int tadForBlockIndex(int blockSize, int blockIdx, int i);

/**
 * Computes the number of tads per block
 *
 */
    @Namespace("shape") public native int tadsPerBlock(int blockSize, int tads);

//    ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension,
//                                int dimensionLength);

/**
 * Returns a shape buffer
 * for the shape information metadata.
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer toShapeBuffer( ShapeInformation info);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer toShapeBuffer( ShapeInformation info, @Cast("Nd4jLong*") LongPointer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer toShapeBuffer( ShapeInformation info, @Cast("Nd4jLong*") LongBuffer ret);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] toShapeBuffer( ShapeInformation info, @Cast("Nd4jLong*") long[] ret);

/**
 * Returns the number of elements per thread
 */
//#ifdef __CUDACC__
//    __device__
//#endif
//    int numElementsPerThread(int N);

/**
 * Returns the block starting index
 */
//#ifdef __CUDACC__
//    __device__
//#endif
//    int blockStartingIndex(int N);

/**
 * Returns the thread starting index
 */
//#ifdef __CUDACC__
//    __device__
//#endif
//    int threadStartingIndex(int N, int stride, int offset);

/**
 * Returns the thread ending index
 */
//#ifdef __CUDACC__
//    __device__
//#endif
//    int threadEndingIndex(int N, int stride, int offset);

/**
 * Returns indexing information
 * for the current kernel invocation
 */
//#ifdef __CUDACC__
//    __device__
//#endif
//    CurrentIndexing *currentIndex(int N, int offset, int stride);

/** Given an linear index, element wise stride
 * and the length of each tad
 * map a linear index to a tad
 * @param i the index to map
 * @param the element wise stride for the tads
 * @param numElementsPerTad the number of elements
 * per tad
 */
    @Namespace("shape") public native int tadIndex(int i, int elementWiseStride, int numElementsPerTad);

/**
 * Map a tad to a
 * reduction index.
 * @param tadIndexForOriginal the original tad index for the
 * split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
 * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
 * @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
 */
    @Namespace("shape") public native int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
                                 int tadsForOriginal);

/**
 * Computes the number of tads
 * per reduce index for the
 * reduction tad.
 */
    @Namespace("shape") public native int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal);

/**
 * Maps a linear index to a reduction index
 * @param i the linear index to map
 * @param elementWiseStride the element wise stride
 * for the multiple problem
 * @param tadNum the number of tads for the shrunken problem
 * @param originalTadNum the tad number for the reduced version of the problem
 */
    @Namespace("shape") public native int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
                                    int tadNum, int originalTadNum);

/**
 * Returns the prod of the data
 * up to the given length
 */
    @Namespace("shape") public native @Cast("Nd4jLong") long prodLong(@Cast("const Nd4jLong*") LongPointer data, int length);
    @Namespace("shape") public native @Cast("Nd4jLong") long prodLong(@Cast("const Nd4jLong*") LongBuffer data, int length);
    @Namespace("shape") public native @Cast("Nd4jLong") long prodLong(@Cast("const Nd4jLong*") long[] data, int length);

    /**
     * Returns the rear most left over item not present in
     * the dimension array. This assumes that the dimension array is sorted.
     *
     * For example, given a dimension array of:
     * 0,2
     *
     * and
     *
     * 12,4,2,1 in data
     *
     * You end up with 1 (data[3])
     * since the first item won't match
     * the last item of the dimension array
     */

//    ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength);

    /**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/

    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer coords, @Cast("Nd4jLong") long baseOffset/*=0*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer coords, @Cast("Nd4jLong") long baseOffset/*=0*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] coords, @Cast("Nd4jLong") long baseOffset/*=0*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Const IntPointer coords, @Cast("Nd4jLong") long baseOffset/*=0*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Const IntPointer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Const IntBuffer coords, @Cast("Nd4jLong") long baseOffset/*=0*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Const IntBuffer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") long[] shapeInfo, @Const int[] coords, @Cast("Nd4jLong") long baseOffset/*=0*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") long[] shapeInfo, @Const int[] coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Const IntPointer coords, @Const IntPointer dims);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Const IntBuffer coords, @Const IntBuffer dims);
    @Namespace("shape") public native @Cast("Nd4jLong") long getOffset(@Cast("const Nd4jLong*") long[] shapeInfo, @Const int[] coords, @Const int[] dims);     // length of dims is equal to rank of shapeInfo

    // all three arrays should have same rank
    // all three arrays should have same dimensions or some of them are 1 (that is satisfy broadcasting principle), strides may be different
    // shapeInfo1 - first array should have max length compared to rest of two arrays
    @Namespace("shape") public native void getOffsetBroadcast(@Cast("const Nd4jLong") long startInd, @Cast("const Nd4jLong") long ind,
                                                    @Cast("const Nd4jLong*") LongPointer shapeInfo1, @Cast("const Nd4jLong*") LongPointer shapeInfo2, @Cast("const Nd4jLong*") LongPointer shapeInfo3,
                                                    @Cast("const bool") boolean sameOffsets12, @Cast("const bool") boolean sameOffsets13,
                                                    IntPointer coords,
                                                    @Cast("Nd4jLong*") @ByRef LongPointer offset1, @Cast("Nd4jLong*") @ByRef LongPointer offset2, @Cast("Nd4jLong*") @ByRef LongPointer offset3);
    @Namespace("shape") public native void getOffsetBroadcast(@Cast("const Nd4jLong") long startInd, @Cast("const Nd4jLong") long ind,
                                                    @Cast("const Nd4jLong*") LongBuffer shapeInfo1, @Cast("const Nd4jLong*") LongBuffer shapeInfo2, @Cast("const Nd4jLong*") LongBuffer shapeInfo3,
                                                    @Cast("const bool") boolean sameOffsets12, @Cast("const bool") boolean sameOffsets13,
                                                    IntBuffer coords,
                                                    @Cast("Nd4jLong*") @ByRef LongBuffer offset1, @Cast("Nd4jLong*") @ByRef LongBuffer offset2, @Cast("Nd4jLong*") @ByRef LongBuffer offset3);
    @Namespace("shape") public native void getOffsetBroadcast(@Cast("const Nd4jLong") long startInd, @Cast("const Nd4jLong") long ind,
                                                    @Cast("const Nd4jLong*") long[] shapeInfo1, @Cast("const Nd4jLong*") long[] shapeInfo2, @Cast("const Nd4jLong*") long[] shapeInfo3,
                                                    @Cast("const bool") boolean sameOffsets12, @Cast("const bool") boolean sameOffsets13,
                                                    int[] coords,
                                                    @Cast("Nd4jLong*") @ByRef long[] offset1, @Cast("Nd4jLong*") @ByRef long[] offset2, @Cast("Nd4jLong*") @ByRef long[] offset3);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer createShapeInfo(@Cast("Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer stride, int rank);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer createShapeInfo(@Cast("Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer stride, int rank);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] createShapeInfo(@Cast("Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] stride, int rank);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer createShapeInfo(@Cast("Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer stride, int rank, @Cast("Nd4jLong*") LongPointer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer createShapeInfo(@Cast("Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer stride, int rank, @Cast("Nd4jLong*") LongBuffer buffer);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] createShapeInfo(@Cast("Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] stride, int rank, @Cast("Nd4jLong*") long[] buffer);

    /**
    * Convert a linear index to the corresponding coordinates
    * for example if shape is {2, 4}, then index 5 corresponds to coordinates [1, 1]
    */
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer shapeInfo,  @Cast("Nd4jLong*") LongPointer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer shapeInfo,  @Cast("Nd4jLong*") LongBuffer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") long[] shapeInfo,  @Cast("Nd4jLong*") long[] coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer shapeInfo,  IntPointer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer shapeInfo,  IntBuffer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") long[] shapeInfo,  int[] coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongPointer shape, IntPointer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongBuffer shape, IntBuffer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") long[] shape, int[] coords);

    @Namespace("shape") public native void index2coordsCPU(@Cast("const Nd4jLong") long startIndex, @Cast("const Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("Nd4jLong*") LongPointer coords);
    @Namespace("shape") public native void index2coordsCPU(@Cast("const Nd4jLong") long startIndex, @Cast("const Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("Nd4jLong*") LongBuffer coords);
    @Namespace("shape") public native void index2coordsCPU(@Cast("const Nd4jLong") long startIndex, @Cast("const Nd4jLong") long index, @Cast("const Nd4jLong*") long[] shapeInfo, @Cast("Nd4jLong*") long[] coords);
    @Namespace("shape") public native void index2coordsCPU(@Cast("const Nd4jLong") long startIndex, @Cast("const Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer shapeInfo, IntPointer coords);
    @Namespace("shape") public native void index2coordsCPU(@Cast("const Nd4jLong") long startIndex, @Cast("const Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer shapeInfo, IntBuffer coords);
    @Namespace("shape") public native void index2coordsCPU(@Cast("const Nd4jLong") long startIndex, @Cast("const Nd4jLong") long index, @Cast("const Nd4jLong*") long[] shapeInfo, int[] coords);
    // ND4J_EXPORT _CUDA_HD void index2coordsCPU(const Nd4jLong& startIndex, const Nd4jLong& index, const Nd4jLong *shapeInfo, const int* dims, Nd4jLong *coords);

    /**
    * take into account only dimensions stored in tadDims, tadDims must be sorted in increasing order!
    */
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer shapeInfo, @Const IntPointer dims, int dimsLen, IntPointer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer shapeInfo, @Const IntBuffer dims, int dimsLen, IntBuffer coords);
    @Namespace("shape") public native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") long[] shapeInfo, @Const int[] dims, int dimsLen, int[] coords);

    /**
    * Convert coordinates to the corresponding linear index (sequence number in other words)
    * for example if shape is {2, 4} and coordinates [1, 1] then index 5 is returned
    */
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Const IntPointer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Const IntBuffer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") long[] shapeInfo, @Const int[] coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Const IntPointer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Const IntBuffer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") long[] shape, @Const int[] coords);
    /**
    * take into account only dimensions stored in tadDims, tadDims must be sorted in increasing order!
    */
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Const IntPointer dims, int dimsSize, @Const IntPointer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Const IntBuffer dims, int dimsSize, @Const IntBuffer coords);
    @Namespace("shape") public native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") long[] shapeInfo, @Const int[] dims, int dimsSize, @Const int[] coords);

   /**
   * increment n-dimensional array by one iteration by changing coord appropriately
   * for example we have array with shape {2, 3}:
   * - if input coord = {0,1}, then output coord = {0,2}
   * - if input coord = {0,2}, then output coord = {1,0}
   * so the aim is to produce following subsequence of coord: {0,0}, {0,1}, {0,2}, {1,0}, {1,1}, {1,2}
   */

   /* calculates an array buffer offset for given "index" using following formula: offset = coord_0*stride_0 + coord_1*stride_1 + ... + coord_{rank-1}*stride_{rank-1}
   */
    @Namespace("shape") public native @Cast("uint") int getIndexOffset(@Cast("uint") int index, @Cast("const uint*") IntPointer shapeInfo);
    @Namespace("shape") public native @Cast("uint") int getIndexOffset(@Cast("uint") int index, @Cast("const uint*") IntBuffer shapeInfo);
    @Namespace("shape") public native @Cast("uint") int getIndexOffset(@Cast("uint") int index, @Cast("const uint*") int[] shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long getIndexOffset(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long getIndexOffset(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long getIndexOffset(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") long[] shapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long indexOffset(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer lShapeInfo, @Cast("const uint*") IntPointer uShapeInfo, @Cast("const bool") boolean useUnsigned);
    @Namespace("shape") public native @Cast("Nd4jLong") long indexOffset(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer lShapeInfo, @Cast("const uint*") IntBuffer uShapeInfo, @Cast("const bool") boolean useUnsigned);
    @Namespace("shape") public native @Cast("Nd4jLong") long indexOffset(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") long[] lShapeInfo, @Cast("const uint*") int[] uShapeInfo, @Cast("const bool") boolean useUnsigned);

    @Namespace("shape") public native void printShapeInfo(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native void printShapeInfo(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native void printShapeInfo(@Cast("const Nd4jLong*") long[] shapeInfo);

    @Namespace("shape") public native void printShapeInfoLinear(@Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("const Nd4jLong*") long[] shapeInfo);

    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") String msg, @Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") BytePointer msg, @Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") String msg, @Cast("const Nd4jLong*") long[] shapeInfo);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") BytePointer msg, @Cast("const Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") String msg, @Cast("const Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") BytePointer msg, @Cast("const Nd4jLong*") long[] shapeInfo);

    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") String msg, int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer strides);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") BytePointer msg, int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer strides);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") String msg, int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] strides);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") BytePointer msg, int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer strides);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") String msg, int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer strides);
    @Namespace("shape") public native void printShapeInfoLinear(@Cast("char*") BytePointer msg, int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] strides);

    @Namespace("shape") public native void printIntArray(@Cast("const Nd4jLong*") LongPointer arr, int length);
    @Namespace("shape") public native void printIntArray(@Cast("const Nd4jLong*") LongBuffer arr, int length);
    @Namespace("shape") public native void printIntArray(@Cast("const Nd4jLong*") long[] arr, int length);
    @Namespace("shape") public native void printIntArray(@Const IntPointer arr, int length);
    @Namespace("shape") public native void printIntArray(@Const IntBuffer arr, int length);
    @Namespace("shape") public native void printIntArray(@Const int[] arr, int length);

    @Namespace("shape") public native void printArray(FloatPointer arr,int length);
    @Namespace("shape") public native void printArray(FloatBuffer arr,int length);
    @Namespace("shape") public native void printArray(float[] arr,int length);

    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer shapeBufferOfNpy(int rank, @Cast("unsigned int*") IntPointer shape,@Cast("bool") boolean fortranOrder);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer shapeBufferOfNpy(int rank, @Cast("unsigned int*") IntBuffer shape,@Cast("bool") boolean fortranOrder);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] shapeBufferOfNpy(int rank, @Cast("unsigned int*") int[] shape,@Cast("bool") boolean fortranOrder);

//    ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer);


   // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
    // also sort input array of dimensions, this operation is also necessary for creating TAD object
    @Namespace("shape") public native void checkDimensions(int rank, @StdVector IntPointer dimensions);
    @Namespace("shape") public native void checkDimensions(int rank, @StdVector IntBuffer dimensions);
    @Namespace("shape") public native void checkDimensions(int rank, @StdVector int[] dimensions);

    // function calculates linear index of array min, min is sub-array of max, index to be returned is min-array's index and corresponds to maxIdx of max array
    // dimsToExclude - should be sorted in increasing order
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayIndex(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo, @Const IntPointer dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayIndex(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayIndex(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo, @Const IntBuffer dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayIndex(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayIndex(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo, @Const int[] dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayIndex(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo);

    // function calculates absolute offset of min array, min is sub-array of max, offset to be returned corresponds to maxIdx of max array
    // dimsToExclude - should be sorted in increasing order
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayOffset(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo, @Const IntPointer dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayOffset(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayOffset(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo, @Const IntBuffer dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayOffset(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayOffset(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo, @Const int[] dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native @Cast("Nd4jLong") long subArrayOffset(@Cast("const Nd4jLong") long maxIdx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo);

    // max array is outer for min array, min array is sub-array of max array
    // function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
    // dimsToExclude - should be sorted in increasing order
    // dimsLen - length of dimsToExclude, if not set (= -1), then it is calculated as maxRank - minRank
    @Namespace("shape") public native void maxIndToMinInd(IntPointer maxIdxs, IntPointer minIdxs, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo, @Const IntPointer dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native void maxIndToMinInd(IntPointer maxIdxs, IntPointer minIdxs, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo);
    @Namespace("shape") public native void maxIndToMinInd(IntBuffer maxIdxs, IntBuffer minIdxs, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo, @Const IntBuffer dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native void maxIndToMinInd(IntBuffer maxIdxs, IntBuffer minIdxs, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo);
    @Namespace("shape") public native void maxIndToMinInd(int[] maxIdxs, int[] minIdxs, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo, @Const int[] dimsToExclude/*=nullptr*/, int dimsLen/*=-1*/);
    @Namespace("shape") public native void maxIndToMinInd(int[] maxIdxs, int[] minIdxs, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo);

    // calculate indexes of max-array, these output indexes correspond to one minIdx index of min-array which is sub-array of max-array
    // dimsToExclude - should be sorted in increasing order
    @Namespace("shape") public native int outerArrayIndexes(IntPointer maxIdxs, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo, @Const IntPointer dimsToExclude/*=nullptr*/);
    @Namespace("shape") public native int outerArrayIndexes(IntPointer maxIdxs, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo);
    @Namespace("shape") public native int outerArrayIndexes(IntBuffer maxIdxs, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo, @Const IntBuffer dimsToExclude/*=nullptr*/);
    @Namespace("shape") public native int outerArrayIndexes(IntBuffer maxIdxs, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo);
    @Namespace("shape") public native int outerArrayIndexes(int[] maxIdxs, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo, @Const int[] dimsToExclude/*=nullptr*/);
    @Namespace("shape") public native int outerArrayIndexes(int[] maxIdxs, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo);

    // calculate offsets of max-array, these offsets correspond to one minIdx index of min-array which is sub-array of max-array
    // maxOffsets - will contain calculated offsets of max-array, buffer for maxOffsets should be allocated beforehand
    // dimsToExclude - should be sorted in increasing order
    // memBuff - auxiliary memory buffer (size = 2 * max_rank) for coordinates and increments storing, should be allocated beforehand
    @Namespace("shape") public native int outerArrayOffsets(@Cast("Nd4jLong*") LongPointer maxOffsets, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo, IntPointer memBuff, @Const IntPointer dimsToExclude/*=nullptr*/);
    @Namespace("shape") public native int outerArrayOffsets(@Cast("Nd4jLong*") LongPointer maxOffsets, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("const Nd4jLong*") LongPointer minShapeInfo, IntPointer memBuff);
    @Namespace("shape") public native int outerArrayOffsets(@Cast("Nd4jLong*") LongBuffer maxOffsets, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo, IntBuffer memBuff, @Const IntBuffer dimsToExclude/*=nullptr*/);
    @Namespace("shape") public native int outerArrayOffsets(@Cast("Nd4jLong*") LongBuffer maxOffsets, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("const Nd4jLong*") LongBuffer minShapeInfo, IntBuffer memBuff);
    @Namespace("shape") public native int outerArrayOffsets(@Cast("Nd4jLong*") long[] maxOffsets, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo, int[] memBuff, @Const int[] dimsToExclude/*=nullptr*/);
    @Namespace("shape") public native int outerArrayOffsets(@Cast("Nd4jLong*") long[] maxOffsets, @Cast("const Nd4jLong") long minIdx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("const Nd4jLong*") long[] minShapeInfo, int[] memBuff);

    // calculates offsets for entities (elements or sub-arrays), shape in context of sub-array means dimensions excluded from outer array
    // rank is equal to size of shape
    @Namespace("shape") public native void calcOffsets(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer strides, @Cast("Nd4jLong*") LongPointer offsets, byte order/*='c'*/);
    @Namespace("shape") public native void calcOffsets(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer strides, @Cast("Nd4jLong*") LongPointer offsets);
    @Namespace("shape") public native void calcOffsets(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer strides, @Cast("Nd4jLong*") LongBuffer offsets, byte order/*='c'*/);
    @Namespace("shape") public native void calcOffsets(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer strides, @Cast("Nd4jLong*") LongBuffer offsets);
    @Namespace("shape") public native void calcOffsets(int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] strides, @Cast("Nd4jLong*") long[] offsets, byte order/*='c'*/);
    @Namespace("shape") public native void calcOffsets(int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] strides, @Cast("Nd4jLong*") long[] offsets);
    @Namespace("shape") public native void calcOffsets(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("Nd4jLong*") LongPointer offsets, byte order/*='c'*/);
    @Namespace("shape") public native void calcOffsets(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("Nd4jLong*") LongPointer offsets);
    @Namespace("shape") public native void calcOffsets(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("Nd4jLong*") LongBuffer offsets, byte order/*='c'*/);
    @Namespace("shape") public native void calcOffsets(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("Nd4jLong*") LongBuffer offsets);
    @Namespace("shape") public native void calcOffsets(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("Nd4jLong*") long[] offsets, byte order/*='c'*/);
    @Namespace("shape") public native void calcOffsets(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("Nd4jLong*") long[] offsets);
    // ND4J_EXPORT void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const char order = 'c');
    // ND4J_EXPORT void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const Nd4jLong* zShapeInfo, Nd4jLong*& zOffsets, const char order = 'c');
    @Namespace("shape") public native void shapeOldScalar(@Cast("sd::DataType") int dtype, @Cast("Nd4jLong*const") LongPointer buffer, byte order);
    @Namespace("shape") public native void shapeOldScalar(@Cast("sd::DataType") int dtype, @Cast("Nd4jLong*const") LongBuffer buffer, byte order);
    @Namespace("shape") public native void shapeOldScalar(@Cast("sd::DataType") int dtype, @Cast("Nd4jLong*const") long[] buffer, byte order);

    // deduce order and element-wise stride
    // if array is scalar or unit length vector then ews = 1 and order is preserved
    // if array is common vector then ews = stride of non-unity dimension and order is preserved
    // if strides are normal/contiguous then ews = 1 and corresponding order is set, otherwise ews = 0 and order is preserved
    @Namespace("shape") public native void checkStridesEwsAndOrder(@Cast("Nd4jLong*") LongPointer shapeInfo, byte proposedOrder, int numOfNonUnitDims, @Cast("const Nd4jLong*") LongPointer shapeNoUnities, @Cast("const Nd4jLong*") LongPointer stridesNoUnities);
    @Namespace("shape") public native void checkStridesEwsAndOrder(@Cast("Nd4jLong*") LongBuffer shapeInfo, byte proposedOrder, int numOfNonUnitDims, @Cast("const Nd4jLong*") LongBuffer shapeNoUnities, @Cast("const Nd4jLong*") LongBuffer stridesNoUnities);
    @Namespace("shape") public native void checkStridesEwsAndOrder(@Cast("Nd4jLong*") long[] shapeInfo, byte proposedOrder, int numOfNonUnitDims, @Cast("const Nd4jLong*") long[] shapeNoUnities, @Cast("const Nd4jLong*") long[] stridesNoUnities);
    @Namespace("shape") public native void checkStridesEwsAndOrder(@Cast("Nd4jLong*") LongPointer shapeInfo);
    @Namespace("shape") public native void checkStridesEwsAndOrder(@Cast("Nd4jLong*") LongBuffer shapeInfo);
    @Namespace("shape") public native void checkStridesEwsAndOrder(@Cast("Nd4jLong*") long[] shapeInfo);

    /**
    * processes whole set of sub-arrays
    * evaluates shapeInfo of sub-arrays (all sub-arrays have the same shapeInfo) and their buffer offsets (each sub-array has its own unique offset from original this-buffer)
    * arguments:
    * wholeShapeInfo - original shapeInfo of whole array
    * numOfSubArrs - number of sub-arrays, size of subArrOffsets is equal to numOfSubArrs
    * dimsSize - size of dimsToExclude, if dimsSize = array rank or dimsSize = 0 it means sub-array is whole array, copy of wholeShapeInfo and one zero offset will be returned
    * dimsToExclude - MUST BE SORTED, dimensions to evaluate sub-array along, i.e. when shape is [2,3,4,5] and dimsToExclude={0,2}, then there will be 8 sub-arrays with shape [3,5]
    * subArrShapeInfo    - output argument, contains shapeInfo (same for all sub-arrays)
    * subArrOffsets      - output argument, contains successive sub-arrays offsets from original this-buffer
    * keepUnitiesInShape - if false then eliminate unities from sub-array shapeInfo, for example {1,a,1,b} -> {a,b}
    */
    @Namespace("shape") public native void calcSubArrsShapeInfoAndOffsets(@Cast("const Nd4jLong*") LongPointer wholeShapeInfo, @Cast("const Nd4jLong") long numOfSubArrs, int dimsSize, @Const IntPointer dimsToExclude, @Cast("Nd4jLong*") LongPointer subArrShapeInfo, @Cast("Nd4jLong*") LongPointer subArrOffsets, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
    @Namespace("shape") public native void calcSubArrsShapeInfoAndOffsets(@Cast("const Nd4jLong*") LongPointer wholeShapeInfo, @Cast("const Nd4jLong") long numOfSubArrs, int dimsSize, @Const IntPointer dimsToExclude, @Cast("Nd4jLong*") LongPointer subArrShapeInfo, @Cast("Nd4jLong*") LongPointer subArrOffsets);
    @Namespace("shape") public native void calcSubArrsShapeInfoAndOffsets(@Cast("const Nd4jLong*") LongBuffer wholeShapeInfo, @Cast("const Nd4jLong") long numOfSubArrs, int dimsSize, @Const IntBuffer dimsToExclude, @Cast("Nd4jLong*") LongBuffer subArrShapeInfo, @Cast("Nd4jLong*") LongBuffer subArrOffsets, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
    @Namespace("shape") public native void calcSubArrsShapeInfoAndOffsets(@Cast("const Nd4jLong*") LongBuffer wholeShapeInfo, @Cast("const Nd4jLong") long numOfSubArrs, int dimsSize, @Const IntBuffer dimsToExclude, @Cast("Nd4jLong*") LongBuffer subArrShapeInfo, @Cast("Nd4jLong*") LongBuffer subArrOffsets);
    @Namespace("shape") public native void calcSubArrsShapeInfoAndOffsets(@Cast("const Nd4jLong*") long[] wholeShapeInfo, @Cast("const Nd4jLong") long numOfSubArrs, int dimsSize, @Const int[] dimsToExclude, @Cast("Nd4jLong*") long[] subArrShapeInfo, @Cast("Nd4jLong*") long[] subArrOffsets, @Cast("bool") boolean keepUnitiesInShape/*=false*/);
    @Namespace("shape") public native void calcSubArrsShapeInfoAndOffsets(@Cast("const Nd4jLong*") long[] wholeShapeInfo, @Cast("const Nd4jLong") long numOfSubArrs, int dimsSize, @Const int[] dimsToExclude, @Cast("Nd4jLong*") long[] subArrShapeInfo, @Cast("Nd4jLong*") long[] subArrOffsets);

    /**
    * processes only one sub-array, evaluates shapeInfo of sub-array and its buffer offset from original array
    * arguments:
    * idx - input argument, intervals of indexes which define the sub-array to point on,
    *        when isStrided = false then idx has form {dim0Start,dim0End,  dim1Start,dim1End, ....} and length (2 * maxRank)
    *        when isStrided = true  then idx has form {dim0Start,dim0End,dim0Stride,  dim1Start,dim1End,dim1Stride, ....} and length (3 * maxRank)
    *        when (dimStart == dimEnd) then whole range will be used for current dimension
    * maxShapeInfo - input argument, shapeInfo of original array
    * minShapeInfo - output argument, shapeInfo of sub-array to be deduced
    * minOffset - output argument, offset of sub-array buffer offsets from original buffer
    * keepUnitiesInShape - input argument, if false then eliminate unities from sub-array shapeInfo, for example {1,a,1,b} -> {a,b}
    * isStrided - input argument, if true then idx has length (3 * this->rankOf()) and contains additional stride numbers which correspond to stride between dimStart and dimEnd,
    * numOfUntiesInMinShape - input argument, number of occurrences in idx when (dimEnd - dimStart) = 1
    */
    @Namespace("shape") public native void calcSubArrShapeInfoAndOffset(@Cast("const Nd4jLong*") LongPointer idx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("Nd4jLong*") LongPointer minShapeInfo, @Cast("Nd4jLong*") @ByRef LongPointer minOffset, @Cast("const bool") boolean keepUnitiesInShape/*=false*/, @Cast("const bool") boolean isStrided/*=false*/, int numOfUntiesInMinShape/*=0*/);
    @Namespace("shape") public native void calcSubArrShapeInfoAndOffset(@Cast("const Nd4jLong*") LongPointer idx, @Cast("const Nd4jLong*") LongPointer maxShapeInfo, @Cast("Nd4jLong*") LongPointer minShapeInfo, @Cast("Nd4jLong*") @ByRef LongPointer minOffset);
    @Namespace("shape") public native void calcSubArrShapeInfoAndOffset(@Cast("const Nd4jLong*") LongBuffer idx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("Nd4jLong*") LongBuffer minShapeInfo, @Cast("Nd4jLong*") @ByRef LongBuffer minOffset, @Cast("const bool") boolean keepUnitiesInShape/*=false*/, @Cast("const bool") boolean isStrided/*=false*/, int numOfUntiesInMinShape/*=0*/);
    @Namespace("shape") public native void calcSubArrShapeInfoAndOffset(@Cast("const Nd4jLong*") LongBuffer idx, @Cast("const Nd4jLong*") LongBuffer maxShapeInfo, @Cast("Nd4jLong*") LongBuffer minShapeInfo, @Cast("Nd4jLong*") @ByRef LongBuffer minOffset);
    @Namespace("shape") public native void calcSubArrShapeInfoAndOffset(@Cast("const Nd4jLong*") long[] idx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("Nd4jLong*") long[] minShapeInfo, @Cast("Nd4jLong*") @ByRef long[] minOffset, @Cast("const bool") boolean keepUnitiesInShape/*=false*/, @Cast("const bool") boolean isStrided/*=false*/, int numOfUntiesInMinShape/*=0*/);
    @Namespace("shape") public native void calcSubArrShapeInfoAndOffset(@Cast("const Nd4jLong*") long[] idx, @Cast("const Nd4jLong*") long[] maxShapeInfo, @Cast("Nd4jLong*") long[] minShapeInfo, @Cast("Nd4jLong*") @ByRef long[] minOffset);

    /**
    * for example inShapeInfo is {3, 2,1,4, 4,4,1, 16384,1,99}
    * then output shapeNoUnities will contain {2,4, 4,1} - that is only shape and strides, no rank/type/ews/order
    * stridesNoUnities will point on strides in shapeNoUnities that is on {4,1}
    * returns number of non-unity dimensions in inShapeInfo
    * if there is no unities in inShapeInfo, then no copy procedure will be performed and shapeNoUnities/stridesNoUnities will point on corresponding places in inShapeInfo
    */
    @Namespace("shape") public native int excludeUnitiesFromShapeInfo(@Cast("const Nd4jLong*") LongPointer inShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef LongPointer shapeNoUnities, @Cast("Nd4jLong*&") @ByPtrRef LongPointer stridesNoUnities);
    @Namespace("shape") public native int excludeUnitiesFromShapeInfo(@Cast("const Nd4jLong*") LongBuffer inShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef LongBuffer shapeNoUnities, @Cast("Nd4jLong*&") @ByPtrRef LongBuffer stridesNoUnities);
    @Namespace("shape") public native int excludeUnitiesFromShapeInfo(@Cast("const Nd4jLong*") long[] inShapeInfo, @Cast("Nd4jLong*&") @ByPtrRef long[] shapeNoUnities, @Cast("Nd4jLong*&") @ByPtrRef long[] stridesNoUnities);

    /**
    * for example inShapeInfo is {3, 2,1,3,1,4,  12,12,4,4,1, 16384,1,99}, dimsToExclude(points on unity dimensions) = {1,3}, dimsSize = 2
    * then outShapeInfo will contain {3, 2,3,4, 12,4,1, 16384,1,99}
    */
    @Namespace("shape") public native void excludeUnitiesFromShapeInfo(@Cast("const Nd4jLong*") LongPointer inShapeInfo, @Const IntPointer dimsToExclude, int dimsSize, @Cast("Nd4jLong*") LongPointer outShapeInfo);
    @Namespace("shape") public native void excludeUnitiesFromShapeInfo(@Cast("const Nd4jLong*") LongBuffer inShapeInfo, @Const IntBuffer dimsToExclude, int dimsSize, @Cast("Nd4jLong*") LongBuffer outShapeInfo);
    @Namespace("shape") public native void excludeUnitiesFromShapeInfo(@Cast("const Nd4jLong*") long[] inShapeInfo, @Const int[] dimsToExclude, int dimsSize, @Cast("Nd4jLong*") long[] outShapeInfo);

    /**
    * get stride over contiguous axis (contiguous axis must have stride = 1)
    * for example when inShapeInfo is {4, 2,5,4,3,  60,1,5,20, 16384,0,99} then output is 5 (that is smallest stride in inShapeInfo except those equal to 1)
    */
    // INLINEDEF _CUDA_HD Nd4jLong strideOverContigAxis(const int axis, const Nd4jLong* inShapeInfo);






//END HEADERS


    //BEGIN IMPLEMENTATIONS



// #ifdef __CUDACC__
// #endif

/**
* Length of a tad given
* the shape information
*/



/**
 * Tad element wise stride:
 * given the inner most dimension (the sorted dimension of the last)
 * the element wise stride of the tad (disregarding order) is the
 * last dimension's stride.
 *
 * For a given singular dimension this will just be the only entry.
 * For example, given the following c order shape/stride:
 * 2,2,3,2
 * 12,6,2,1
 *
 * The tad element wise stride for 3 will be 1.
 * For zero it wil be 12
 *
 * For 2,3 it's 1
 *
 * Note here that the multi dimensional 2,3 case
 * is equivalent to the singular 3 case.
 *
 *
 * Note that this is for the dimension that ultimately
 * ends up removed.
 *
 * Again: this may not preserve ordering of the tad
 * but maybe used for reductions.
 */
    @Namespace("shape") public native int tadElementWiseStride(@Cast("Nd4jLong*") LongPointer shapeInfo, IntPointer dimension,int dimensionLength);
    @Namespace("shape") public native int tadElementWiseStride(@Cast("Nd4jLong*") LongBuffer shapeInfo, IntBuffer dimension,int dimensionLength);
    @Namespace("shape") public native int tadElementWiseStride(@Cast("Nd4jLong*") long[] shapeInfo, int[] dimension,int dimensionLength);

/**
 * Computes the standard packed array strides for a given shape.
 *
 * @param shape    the shape of a matrix:
 * @param startNum the start number for the strides
 * @return the strides for a matrix of n dimensions
 */

/**
 * Computes the standard packed array strides for a given shape.
 *
 * @param shape    the shape of a matrix:
 * @param startNum the start number for the strides
 * @return the strides for a matrix of n dimensions
 */

/**
 * Computes the standard packed array strides for a given shape.
 *
 * @param shape    the shape of a matrix:
 * @param startNum the start number for the strides
 * @return the strides for a matrix of n dimensions
 */

/**
 * Computes the standard packed array strides for a given shape.
 *
 * @param shape    the shape of a matrix:
 * @param startNum the start number for the strides
 * @return the strides for a matrix of n dimensions
 */

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////


// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1


/**
 * @param toCopy the shape to copy
 * @return a copy of the original struct
 */

/**
 * Get the shape info buffer
 * for the given rank and shape.
 */

    /**
     * This is special method, it returns ONLY 2D shapebuffer.
     *
     * This method is used only for SoftMax
     */

/**
* Get the shape info buffer
* for the given rank and shape.
*/

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////


// //////////////////////////////////////////////////////////////////////
//     INLINEDEF _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen) {

//         const Nd4jLong ews = shapeInfo[shapeInfo[0] + shapeInfo[0] + 2];

//         if(ews > 0 && order(shapeInfo) == 'c')
//            if (ews == 1)
//                return index;
//            else
//                return ews * index;

//         Nd4jLong offset = 0;
//         Nd4jLong rank = shapeInfo[0];
//         for(int i = 1; i <= shapeInfo[0]; ++i) {
//             arrLen /= shapeInfo[i];
//             if(arrLen > 0 && shapeInfo[i] > 1) {
//                 offset += (index / arrLen) * shapeInfo[i + rank];
//                 index %= arrLen;
//             }
//         }
//         return offset;
//     }

//     INLINEDEF _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen) {

//         const uint rank = shapeInfo[0];
//         const uint ews = shapeInfo[rank + rank + 2];

//         if(ews > 0 && shapeInfo[rank + rank + 3] == 99)
//            if (ews == 1)
//                return index;
//            else
//                return ews * index;

//         uint offset = 0;

//         for(uint i = 1; i <= rank; ++i) {
//             arrLen /= shapeInfo[i];
//             if(arrLen > 0 && shapeInfo[i] > 1) {
//                 offset += (index / arrLen) * shapeInfo[i + rank];
//                 index %= arrLen;
//             }
//         }
//         return offset;
//     }

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////

/**
 *
 * @param length
 * @param shape
 * @param rearrange
 * @return
 */

/**
 *
 * @param length
 * @param shape
 * @param rearrange
 * @return
 */

/**
 * Get the ordering for the device
 * @param length
 * @param shape
 * @param stride
 * @param elementStride
 * @return
 */





/**
 * Ensure that every value in the re arrange
 * array is unique
 * @param arr
 * @param shape
 * @param arrLength
 * @param shapeLength
 * @return
 */

/**
 * Permute the shape information
 * @param info the shape information to permute
 * @param rearrange the order to re arrange
 * @param rank the rank of the rearrange array
 */

/**
 * Returns whether the
 * given shape is a vector or not
 * @param shape the shape of the array
 * @param rank the rank of the shape
 */

//////////////////////////////////////////////////////////////////////

/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/

/**
 * Returns the shape portion of an information
 * buffer
 */

/**
 * Return a copy of a buffer.
 * This buffer allocates memory
 * that must be freed elsewhere.
 */

/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/

/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/

/**
 * Permute the given strides
 * in the given rearrange order
 * @param toPermute the buffer to permute
 * @param shapeRank the length of the buffer to permute
 * @param rearrange the rearrange order (must be 0 based indexes
 * and all must be filled in)
 * @return the rearranged array
 */
 /*
    INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) {
        Nd4jLong *strideCopy = copyOf(shapeRank, toPermute);
        checkArrangeArray(rearrange, shapeRank, shapeRank);
        Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange);
        delete[] strideCopy;
        return newStride;
    }
    */

/**
 * Return the slice (shape + 1 in pointer arithmetic)
 * @param shape the shape to take the slice of
 * @return the shape array - the first entry
 */

/**
 * Returns the length of the
 * shape information buffer:
 * rank * 2 + 3
 * @param rank the rank to get the shape
 * info length for
 * @return rank * 2 + 4
 */

/**
 * Returns the rank portion of
 * an information buffer
 */

/**
 * Converts a raw int buffer of the layout:
 * rank
 * shape
 * stride
 * offset
 * elementWiseStride
 *
 * where shape and stride are both straight int pointers
 */

/**
 * Returns the stride portion of an information
 * buffer
 */


/**
 * Compute the length of the given shape
 */

/***
 * Returns the offset
 * portion of an information buffer
 */


/**
 * Returns the ordering
 * for this shape information buffer
 */

/**
 * Returns type
 */

/**
 * Returns the element wise stride for this information
 * buffer
 */

/**
* Returns the element wise stride for this information
* buffer relative to a dimension and reduction index
*/

/**
 * Returns whether
 * the given shape info buffer
 * represents a scalar shape
 */

/**
 * Returns whether
 * the given shape information
 * represents a scalar
 * shape or not
 */

/**
 * Return a copy of this array with the
 * given index omitted
 *
 * @param data  the data to copy
 * @param indexes the index of the item to remove
 * @param dataLength the length of the data array
 * @param indexesLength the length of the data array
 * @return the new array with the omitted
 *
 * item
 */

    /**
 * Return a copy of this array with the
 * given index omitted
 *
 * @param data  the data to copy
 * @param indexes the index of the item to remove
 * @param dataLength the length of the data array
 * @param indexesLength the length of the data array
 * @return the new array with the omitted
 *
 * item
 */

/**
 * Computes the offset for accessing
 * a global element given the shape information
 * and the offset to be read.
 */
// #ifdef __CUDACC__
// #endif

/**
 * Returns a shape
 * forces the given length to be 2.
 * @param shape the shape to modify
 * @param dimension the dimension (row or column)
 * for the shape to be returned as
 * @return the new shape
 */
    @Namespace("shape") public native @Cast("Nd4jLong*") LongPointer ensureVectorShape(@Cast("Nd4jLong*") LongPointer shape, int dimension);
    @Namespace("shape") public native @Cast("Nd4jLong*") LongBuffer ensureVectorShape(@Cast("Nd4jLong*") LongBuffer shape, int dimension);
    @Namespace("shape") public native @Cast("Nd4jLong*") long[] ensureVectorShape(@Cast("Nd4jLong*") long[] shape, int dimension);

/**
 * Returns a shape
 * forces the given length to be 2.
 * @param shape the shape to modify
 * @param dimension the dimension (row or column)
 * for the shape to be returned as
 * @return the new shape
 */

    /**
     * This method does STRICT comparison for two shape buffers
     *
     * @param shape
     * @return
     */

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

    /**
     * This method does SOFT comparison for two shape buffers, we compare only rank & shapes
     *
     * @param shape
     * @return
     */

/**
 * Generate an int buffer
 * up to the given length
 * at the specified increment
 *
 */

/**
 * Generate a range
 * beginning at from and ending at to
 * incrementing by 1
 * @param from the start
 * @param to the end
 * @return the int array starting at from and ending at to
 */

/**
 * Keep the given indexes in the data
 * @param data
 * @param index
 * @param indexLength
 * @param dataLength
 * @return
 */

/**
 * Generate a reverse
 * copy of the data
 */

/**
 *
 * @param arr1
 * @param arr1Length
 * @param arr2
 * @param arr2Length
 * @return
 */

/**
 *
 * @param numArrays
 * @param numTotalElements
 * @param arr
 * @param lengths
 * @return
 */

/**
 * Get the length per slice of the
 * given shape and the dimension
 * @param rank the rank of the shape
 * @param shape the shape of to get
 * the length per slice for
 * @param dimension the dimension to
 * get the length per slice for
 * @param dimensionLength the length of the dimension array
 * @return the length per slice of the given shape
 * along the given dimension
 */

/**
 * calculates the offset for a tensor
 * @param index
 * @param arr
 * @param tensorShape
 * @return
 */

    /**
 * calculates the offset for a tensor
 * @param index
 * @param arr
 * @param tensorShape
 * @return
 */


// #ifdef __CUDACC__
// #endif





/**
 * Computes the number
 * of tensors along
 * a given dimension
 */

/**
 * Computes the number
 * of tensors along
 * a given dimension
 */




/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/

//////////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

/**
 * Returns the tensor along dimension
 * for the given block index
 * @param blockSize
 * @param blockIdx
 * @param i
 * @return
 */

/**
 * Computes the number of tads per block
 *
 */

/**
 * Returns a shape buffer
 * for the shape information metadata.
 */
/**
 * Given an linear index, element wise stride
 * and the length of each tad
 * map a linear index to a tad
 * @param i the index to map
 * @param the element wise stride for the tads
 * @param numElementsPerTad the number of elements
 * per tad
 */

/**
 * Map a tad to a
 * reduction index.
 * @param tadIndexForOriginal the original tad index for the
 * split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
 * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
 * @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
 */

/**
 * Tad index for linear
 * @param linearIndex
 * @param tadLength
 * @return
 */

/**
 * Computes the number of tads
 * per reduce index for the
 * reduction tad.
 */

/**
 * Maps a linear index to a reduction index
 * @param i the linear index to map
 * @param elementWiseStride the element wise stride
 * for the multiple problem
 * @param tadNum the number of tads for the shrunken problem
 * @param originalTadNum the tad number for the reduced version of the problem
 */


/**
 * Returns the prod of the data
 * up to the given length
 */

    @Namespace("shape") public native int rearMostLeftOverItem(@Cast("Nd4jLong*") LongPointer data, @Cast("Nd4jLong*") LongPointer dimension,int dimensionLength);
    @Namespace("shape") public native int rearMostLeftOverItem(@Cast("Nd4jLong*") LongBuffer data, @Cast("Nd4jLong*") LongBuffer dimension,int dimensionLength);
    @Namespace("shape") public native int rearMostLeftOverItem(@Cast("Nd4jLong*") long[] data, @Cast("Nd4jLong*") long[] dimension,int dimensionLength);

// #ifdef __CUDACC__
// #endif






//    INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) {
//        unsigned Nd4jLong *shape;
//        unsigned int ndims, wordSize;
//        bool fortranOrder;
//        cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder);
//        Nd4jLong * ret =  shape::shapeBufferOfNpy(ndims,shape,fortranOrder);
//        delete[] shape;
//        return ret;
//    }

//////////////////////////////////////////////////////////////////////////
// copy-past from java hasDefaultStridesForShape function

// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) {
//         int oldnd;
//         Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
//         Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
//         int np, op, last_stride;
//         int oi, oj, ok, ni, nj, nk;
//         Nd4jLong* newStrides = new Nd4jLong[newRank];
//         oldnd = 0;

//         /*
//          * Remove axes with dimension 1 from the old array. They have no effect
//          * but would need special cases since their strides do not matter.
//          */
//         for (oi = 0; oi < oldRank; oi++) {
//             if (shape::shapeOf(oldShape)[oi] != 1) {
//                 olddims[oldnd] = shape::shapeOf(oldShape)[oi];
//                 oldstrides[oldnd] = shape::stride(oldShape)[oi];
//                 oldnd++;
//             }
//         }

//         np = 1;
//         for (ni = 0; ni < newRank; ni++) {
//             np *= newShapeOf[ni];
//         }
//         op = 1;
//         for (oi = 0; oi < oldnd; oi++) {
//             op *= olddims[oi];
//         }
//         if (np != op) {
//             /* different total sizes; no hope */
//             delete[] olddims;
//             delete[] oldstrides;
//             delete[] newStrides;

//             return false;
//         }

//         if (np == 0) {
//             /* the current code does not handle 0-sized arrays, so give up */
//             delete[] olddims;
//             delete[] oldstrides;
//             delete[] newStrides;

//             return false;
//         }

//         /* oi to oj and ni to nj give the axis ranges currently worked with */
//         oi = 0;
//         oj = 1;
//         ni = 0;
//         nj = 1;

//         while (ni < newRank && oi < oldnd) {
//             np = newShapeOf[ni];
//             op = olddims[oi];

//             while (np != op) {
//                 if (np < op) {
//                     /* Misses trailing 1s, these are handled later */
//                     np *= newShapeOf[nj++];
//                 } else {
//                     op *= olddims[oj++];
//                 }
//             }

//             /* Check whether the original axes can be combined */
//             for (ok = oi; ok < oj - 1; ok++) {
//                 if (isFOrder) {
//                     if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
//                         /* not contiguous enough */
//                         delete[] olddims;
//                         delete[] oldstrides;
//                         delete[] newStrides;

//                         return false;
//                     }
//                 } else {
//                     /* C order */
//                     if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
//                         /* not contiguous enough */
//                         delete[] olddims;
//                         delete[] oldstrides;
//                         delete[] newStrides;

//                         return false;
//                     }
//                 }
//             }

//             /* Calculate new strides for all axes currently worked with */
//             if (isFOrder) {
//                 newStrides[ni] = oldstrides[oi];
//                 for (nk = ni + 1; nk < nj; nk++) {
//                     newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
//                 }
//             } else {
//                 /* C order */
//                 newStrides[nj - 1] = oldstrides[oj - 1];
//                 for (nk = nj - 1; nk > ni; nk--) {
//                     newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
//                 }
//             }
//             ni = nj++;
//             oi = oj++;
//         }

//         if (ni >= 1) {
//             last_stride = newStrides[ni - 1];
//         } else {
//             last_stride = shape::elementWiseStride(oldShape);
//         }
//         if (isFOrder && ni >= 1) {
//             last_stride *= newShapeOf[ni - 1];
//         }
//         for (nk = ni; nk < newRank; nk++) {
//             newStrides[nk] = last_stride;
//         }

//         target[0] = newRank;
//         int cnt = 1;
//         for (int e = 0; e < newRank; e++)
//             target[cnt++] = newShapeOf[e];

//         for (int e = 0; e < newRank; e++)
//             target[cnt++] = newStrides[e];

//         target[shape::shapeInfoLength(newRank) - 3] = 0;
//         target[shape::shapeInfoLength(newRank) - 2] = 0;
//         target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99;
//         sd::ArrayOptions::setDataType(target, sd::ArrayOptions::dataType(oldShape));

//         delete[] olddims;
//         delete[] oldstrides;
//         delete[] newStrides;

//         return true;
//     }

//////////////////////////////////////////////////////////////////////
// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo) {

//         // PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements
//         // also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo

//         newShapeInfo[0] = newRank;
//         memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong));

//         Nd4jLong* newStrides       = shape::stride(newShapeInfo);
//         const Nd4jLong* oldShape   = shape::shapeOf(const_cast(oldShapeInfo));
//         const Nd4jLong* oldStrides = shape::stride(const_cast(oldShapeInfo));
//         Nd4jLong oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim;

//         while (newStart < newRank && oldStart < oldRank) {

//             newDim = newShape[newStart];
//             oldDim = oldShape[oldStart];

//             while (newDim != oldDim && newDim > 0 && oldDim > 0)
//                 if (newDim < oldDim) newDim *= newShape[newStop++];
//                 else                 oldDim *= oldShape[oldStop++];

//             // ------ Check whether the original axes can be combined ------ //
//             for (int step = 1, i = oldStart; i < oldStop - 1; ++i) {
//                 if(oldShape[i] == 1)                // skip unity-dimension and its stride
//                     continue;
//                 while((i + step) < oldRank && oldShape[i + step] == 1)
//                     ++step;                         // skip following unity-dimensions and its strides if such are present
//                 if((i + step) < oldRank && oldStrides[i] != oldShape[i + step] * oldStrides[i + step])
//                     return false;                   // not contiguous enough
//             }

//             newStrides[newStop - 1] = oldStrides[oldStop - 1];
//             for (int i = newStop - 1; i > newStart; --i)
//                 newStrides[i - 1] = newStrides[i] * newShape[i];

//             newStart = newStop++;
//             oldStart = oldStop++;
//         }

//         // rest of strides should be unities (if there is remainder in strides space, that is newStart < newRank)
//         for (int i = newStart; i < newRank; ++i)
//             newStrides[i] = 1;

//         newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo);                 // order
//         newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo);     // ews
//         newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo);                  // type

//         return true;
//     }

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

    // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
    // also it sorts input array of dimensions, this operation is also necessary for creating TAD object


// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)

    //////////////////////////////////////////////////////////////////////

    //////////////////////////////////////////////////////////////////////

    //////////////////////////////////////////////////////////////////////

    //////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////
// INLINEDEF _CUDA_HD void index2coordsCPU(const Nd4jLong& startIndex, const Nd4jLong& index, const Nd4jLong *shapeInfo, const int* dims, const int dimsLen, int *coords) {

//     if(startIndex == index) {
//         shape::index2coords(index, shapeInfo, dims, dimsLen, coords);
//     }
//     else {
//         int i = dimsLen - 1;
//         while(coords[dims[i]] == shape::sizeAt(shapeInfo, dims[i]) - 1)
//             coords[dims[i--]] = 0;
//         ++coords[dims[i]];
//     }
// }

//////////////////////////////////////////////////////////////////////
// INLINEDEF _CUDA_HD void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const Nd4jLong* zShapeInfo, Nd4jLong*& zOffsets, const char order) {

//     // we assume all array have same length
//     const Nd4jLong len = shape::length(xShapeInfo);

//     const Nd4jLong xEws = shape::elementWiseStride(xShapeInfo);
//     const Nd4jLong yEws = shape::elementWiseStride(yShapeInfo);
//     const Nd4jLong zEws = shape::elementWiseStride(zShapeInfo);

//     const char xOrder = shape::order(xShapeInfo);
//     const char yOrder = shape::order(yShapeInfo);
//     const char zOrder = shape::order(zShapeInfo);

//     const bool shapesSame = shape::shapeEquals(xShapeInfo, yShapeInfo, zShapeInfo);

//     if (xEws == 1 && yEws == 1 && zEws == 1 && xOrder == yOrder && xOrder == zOrder && (xOrder == 'c' || shapesSame)) {
//         xOffsets = yOffsets = zOffsets = nullptr;
//     }
//     else if(xEws == 1 && yEws == 1 && xOrder == yOrder && (xOrder == 'c' || shape::shapeEquals(xShapeInfo, yShapeInfo))) {
//         xOffsets = yOffsets = nullptr;
//         zOffsets = new Nd4jLong[len];
//         shape::calcOffsets(zShapeInfo, zOffsets, xOrder);
//     }
//     else if(xEws == 1 && zEws == 1 && xOrder == zOrder && (xOrder == 'c' || shape::shapeEquals(xShapeInfo, zShapeInfo))) {
//         xOffsets = zOffsets = nullptr;
//         yOffsets = new Nd4jLong[len];
//         shape::calcOffsets(yShapeInfo, yOffsets, xOrder);
//     }
//     else if(yEws == 1 && zEws == 1 && yOrder == zOrder && (yOrder == 'c' || shape::shapeEquals(yShapeInfo, zShapeInfo))) {
//         yOffsets = zOffsets = nullptr;
//         xOffsets = new Nd4jLong[len];
//         shape::calcOffsets(xShapeInfo, xOffsets, yOrder);
//     }
//     else if(xEws == 1) {
//         xOffsets = nullptr;
//         PRAGMA_OMP_PARALLEL_SECTIONS
//         {
//             PRAGMA_OMP_SECTION
//             {
//                 yOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(yShapeInfo, yOffsets, xOrder);
//             }
//             PRAGMA_OMP_SECTION
//             {
//                 zOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(zShapeInfo, zOffsets, xOrder);
//             }
//         }
//     }
//     else if(yEws == 1) {
//         yOffsets = nullptr;
//         PRAGMA_OMP_PARALLEL_SECTIONS
//         {
//             PRAGMA_OMP_SECTION
//             {
//                 xOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(xShapeInfo, xOffsets, yOrder);
//             }
//             PRAGMA_OMP_SECTION
//             {
//                 zOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(zShapeInfo, zOffsets, yOrder);
//             }
//         }
//     }
//     else if(zEws == 1) {
//         zOffsets = nullptr;
//         PRAGMA_OMP_PARALLEL_SECTIONS
//         {
//             PRAGMA_OMP_SECTION
//             {
//                 xOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(xShapeInfo, xOffsets, zOrder);
//             }
//             PRAGMA_OMP_SECTION
//             {
//                 yOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(yShapeInfo, yOffsets, zOrder);
//             }
//         }
//     }
//     else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo, zShapeInfo)) {
//         xOffsets = new Nd4jLong[len];
//         shape::calcOffsets(xShapeInfo, xOffsets);
//         yOffsets = zOffsets = xOffsets;
//     }
//     else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo)) {
//         PRAGMA_OMP_PARALLEL_SECTIONS
//         {
//             PRAGMA_OMP_SECTION
//             {
//                 xOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(xShapeInfo, xOffsets);
//             }
//             PRAGMA_OMP_SECTION
//             {
//                 zOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(zShapeInfo, zOffsets);
//             }
//         }
//         yOffsets = xOffsets;
//     }
//     else if(shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo)) {
//         PRAGMA_OMP_PARALLEL_SECTIONS
//         {
//             PRAGMA_OMP_SECTION
//             {
//                 xOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(xShapeInfo, xOffsets);
//             }
//             PRAGMA_OMP_SECTION
//             {
//                 yOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(yShapeInfo, yOffsets);
//             }
//         }
//         zOffsets = xOffsets;
//     }
//     else {
//         PRAGMA_OMP_PARALLEL_SECTIONS
//         {
//             PRAGMA_OMP_SECTION
//             {
//                 xOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(xShapeInfo, xOffsets);
//             }
//             PRAGMA_OMP_SECTION
//             {
//                 yOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(yShapeInfo, yOffsets);
//             }
//             PRAGMA_OMP_SECTION
//             {
//                 zOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(zShapeInfo, zOffsets);
//             }
//         }
//     }
// }

//////////////////////////////////////////////////////////////////////
// INLINEDEF _CUDA_HD void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const char order) {

//     // we assume all array have same length
//     const Nd4jLong len = shape::length(xShapeInfo);

//     const Nd4jLong xEws = shape::elementWiseStride(xShapeInfo);
//     const Nd4jLong yEws = shape::elementWiseStride(yShapeInfo);

//     const char xOrder = shape::order(xShapeInfo);
//     const char yOrder = shape::order(yShapeInfo);

//     const bool shapesSame = shape::shapeEquals(xShapeInfo, yShapeInfo);

//     if (xEws == 1 && yEws == 1 && xOrder == yOrder && (xOrder == 'c' || shapesSame)) {
//         xOffsets = yOffsets = nullptr;
//     }
//     else if(xEws == 1) {
//         xOffsets = nullptr;
//         yOffsets = new Nd4jLong[len];
//         shape::calcOffsets(yShapeInfo, yOffsets, xOrder);
//     }
//     else if(yEws == 1) {
//         yOffsets = nullptr;
//         xOffsets = new Nd4jLong[len];
//         shape::calcOffsets(xShapeInfo, xOffsets, yOrder);
//     }
//     else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo)) {
//         xOffsets = new Nd4jLong[len];
//         shape::calcOffsets(xShapeInfo, xOffsets);
//         yOffsets = xOffsets;
//     }
//     else {
//         PRAGMA_OMP_PARALLEL_SECTIONS
//         {
//             PRAGMA_OMP_SECTION
//             {
//                 xOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(xShapeInfo, xOffsets);
//             }
//             PRAGMA_OMP_SECTION
//             {
//                 yOffsets = new Nd4jLong[len];
//                 shape::calcOffsets(yShapeInfo, yOffsets);
//             }
//         }
//     }
// }


//////////////////////////////////////////////////////////////////////
// INLINEDEF _CUDA_HD Nd4jLong strideOverContigAxis(const int axis, const Nd4jLong* inShapeInfo) {

//     Nd4jLong result = 9223372036854775807LL;

//     for(uint i = 0; i < shape::rank(inShapeInfo); ++i) {

//         const auto currentStride = shape::stride(inShapeInfo)[i];

//         if(i == axis || shape::shapeOf(inShapeInfo)[i] == 1)
//             continue;

//         if(result > currentStride)
//             result = currentStride;
//     }

//     return result == 9223372036854775807LL ? 1 : result;
// }





// #endif /* SHAPE_H_ */


// Parsed from helpers/OpArgsHolder.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author Yurii Shyrma ([email protected]), created on 15.07.2018
//

// #ifndef LIBND4J_OPARGSHOLDER_H
// #define LIBND4J_OPARGSHOLDER_H


// #include 
// #include 

@Namespace("sd") @NoOffset public static class OpArgsHolder extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public OpArgsHolder(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public OpArgsHolder(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public OpArgsHolder position(long position) {
        return (OpArgsHolder)super.position(position);
    }
    @Override public OpArgsHolder getPointer(long i) {
        return new OpArgsHolder((Pointer)this).position(position + i);
    }


    // default constructor
	public OpArgsHolder() { super((Pointer)null); allocate(); }
	private native void allocate();

    // copy constructor
    public OpArgsHolder(@Const @ByRef OpArgsHolder other) { super((Pointer)null); allocate(other); }
    private native void allocate(@Const @ByRef OpArgsHolder other);

    // constructor
    public OpArgsHolder(@Const @ByRef NDArrayVector inArrs, @StdVector DoublePointer tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector LongPointer iArgs/*=std::vector()*/, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/) { super((Pointer)null); allocate(inArrs, tArgs, iArgs, bArgs); }
    private native void allocate(@Const @ByRef NDArrayVector inArrs, @StdVector DoublePointer tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector LongPointer iArgs/*=std::vector()*/, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/);
    public OpArgsHolder(@Const @ByRef NDArrayVector inArrs) { super((Pointer)null); allocate(inArrs); }
    private native void allocate(@Const @ByRef NDArrayVector inArrs);
    public OpArgsHolder(@Const @ByRef NDArrayVector inArrs, @StdVector DoubleBuffer tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs/*=std::vector()*/, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/) { super((Pointer)null); allocate(inArrs, tArgs, iArgs, bArgs); }
    private native void allocate(@Const @ByRef NDArrayVector inArrs, @StdVector DoubleBuffer tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs/*=std::vector()*/, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/);
    public OpArgsHolder(@Const @ByRef NDArrayVector inArrs, @StdVector double[] tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector long[] iArgs/*=std::vector()*/, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/) { super((Pointer)null); allocate(inArrs, tArgs, iArgs, bArgs); }
    private native void allocate(@Const @ByRef NDArrayVector inArrs, @StdVector double[] tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector long[] iArgs/*=std::vector()*/, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/);
    public OpArgsHolder(@Const @ByRef NDArrayVector inArrs, @StdVector DoublePointer tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector LongPointer iArgs/*=std::vector()*/, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/) { super((Pointer)null); allocate(inArrs, tArgs, iArgs, bArgs); }
    private native void allocate(@Const @ByRef NDArrayVector inArrs, @StdVector DoublePointer tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector LongPointer iArgs/*=std::vector()*/, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/);
    public OpArgsHolder(@Const @ByRef NDArrayVector inArrs, @StdVector DoubleBuffer tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs/*=std::vector()*/, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/) { super((Pointer)null); allocate(inArrs, tArgs, iArgs, bArgs); }
    private native void allocate(@Const @ByRef NDArrayVector inArrs, @StdVector DoubleBuffer tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs/*=std::vector()*/, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/);
    public OpArgsHolder(@Const @ByRef NDArrayVector inArrs, @StdVector double[] tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector long[] iArgs/*=std::vector()*/, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/) { super((Pointer)null); allocate(inArrs, tArgs, iArgs, bArgs); }
    private native void allocate(@Const @ByRef NDArrayVector inArrs, @StdVector double[] tArgs/*=std::vector()*/, @Cast("Nd4jLong*") @StdVector long[] iArgs/*=std::vector()*/, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/);

    // move constructor

    // assignment operator
    public native @ByRef @Name("operator =") OpArgsHolder put(@Const @ByRef OpArgsHolder other);

    // move assignment operator

    public native @Const @ByRef NDArrayVector getInArrs();

    public native @StdVector DoublePointer getTArgs();

    public native @Cast("Nd4jLong*") @StdVector LongPointer getIArgs();

    public native @Cast("bool*") @StdVector BooleanPointer getBArgs();

    public native @Cast("bool*") @StdVector BooleanPointer getAllocInfo();

    public native int getNumInArrs();

    public native int getNumTArgs();

    public native int getNumIArgs();

    public native int getNumBArgs();

    public native @ByVal OpArgsHolder createArgsHolderForBP(@Const @ByRef NDArrayVector inGradArrs, @Cast("const bool") boolean isInPlace/*=false*/);
    public native @ByVal OpArgsHolder createArgsHolderForBP(@Const @ByRef NDArrayVector inGradArrs);

}







// #endif //LIBND4J_OPARGSHOLDER_H


// Parsed from array/ShapeList.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_SHAPELIST_H
// #define LIBND4J_SHAPELIST_H

// #include 
// #include 
// #include 
    @Namespace("sd") @NoOffset public static class ShapeList extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public ShapeList(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public ShapeList(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public ShapeList position(long position) {
            return (ShapeList)super.position(position);
        }
        @Override public ShapeList getPointer(long i) {
            return new ShapeList((Pointer)this).position(position + i);
        }
    
        public ShapeList(@Cast("const Nd4jLong*") LongPointer shape/*=nullptr*/) { super((Pointer)null); allocate(shape); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer shape/*=nullptr*/);
        public ShapeList() { super((Pointer)null); allocate(); }
        private native void allocate();
        public ShapeList(@Cast("const Nd4jLong*") LongBuffer shape/*=nullptr*/) { super((Pointer)null); allocate(shape); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer shape/*=nullptr*/);
        public ShapeList(@Cast("const Nd4jLong*") long[] shape/*=nullptr*/) { super((Pointer)null); allocate(shape); }
        private native void allocate(@Cast("const Nd4jLong*") long[] shape/*=nullptr*/);
        public ShapeList(@Cast("const Nd4jLong**") @StdVector PointerPointer shapes, @Cast("bool") boolean isWorkspace) { super((Pointer)null); allocate(shapes, isWorkspace); }
        private native void allocate(@Cast("const Nd4jLong**") @StdVector PointerPointer shapes, @Cast("bool") boolean isWorkspace);
        public ShapeList(@Cast("const Nd4jLong**") @StdVector @ByPtrPtr LongPointer shapes, @Cast("bool") boolean isWorkspace) { super((Pointer)null); allocate(shapes, isWorkspace); }
        private native void allocate(@Cast("const Nd4jLong**") @StdVector @ByPtrPtr LongPointer shapes, @Cast("bool") boolean isWorkspace);
        public ShapeList(@Cast("const Nd4jLong**") @StdVector @ByPtrPtr LongBuffer shapes, @Cast("bool") boolean isWorkspace) { super((Pointer)null); allocate(shapes, isWorkspace); }
        private native void allocate(@Cast("const Nd4jLong**") @StdVector @ByPtrPtr LongBuffer shapes, @Cast("bool") boolean isWorkspace);
        public ShapeList(@Cast("const Nd4jLong**") @StdVector @ByPtrPtr long[] shapes, @Cast("bool") boolean isWorkspace) { super((Pointer)null); allocate(shapes, isWorkspace); }
        private native void allocate(@Cast("const Nd4jLong**") @StdVector @ByPtrPtr long[] shapes, @Cast("bool") boolean isWorkspace);
        public ShapeList(@Cast("const Nd4jLong**") @StdVector PointerPointer shapes) { super((Pointer)null); allocate(shapes); }
        private native void allocate(@Cast("const Nd4jLong**") @StdVector PointerPointer shapes);
        //ShapeList(bool autoRemovable);

        public native @Cast("const Nd4jLong**") @StdVector PointerPointer asVector();
        public native void destroy();
        public native int size();
        public native @Cast("const Nd4jLong*") LongPointer at(int idx);
        public native void push_back(@Cast("const Nd4jLong*") LongPointer shape);
        public native void push_back(@Cast("const Nd4jLong*") LongBuffer shape);
        public native void push_back(@Cast("const Nd4jLong*") long[] shape);

        /**
         * PLEASE NOTE: This method should be called ONLY if shapes were generated at workspaces. Otherwise you'll get memory leak
         */
        public native void detach();
    }



// #endif //LIBND4J_SHAPELIST_H


// Parsed from system/type_boilerplate.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver on 6/12/2018.
//

// #ifndef LIBND4J_TYPE_BOILERPLATE_H
// #define LIBND4J_TYPE_BOILERPLATE_H

// #define EXPAND(...) __VA_ARGS__
// #define EXPAND2(...) __VA_ARGS__
// #define EXPAND3(...) __VA_ARGS__
// #define EXTRACT(...) EXTRACT __VA_ARGS__
// #define NOTHING_EXTRACT
// #define PASTE(x, ...) x ## __VA_ARGS__
// #define PASTE2(x, ...) x ## __VA_ARGS__
// #define PASTE3(x, ...) x ## __VA_ARGS__
// #define EVALUATING_PASTE(x, ...) PASTE(x, __VA_ARGS__)
// #define EVALUATING_PASTE2(x, ...) PASTE2(x, __VA_ARGS__)
// #define EVALUATING_PASTE3(x, ...) PASTE3(x, __VA_ARGS__)
// #define UNPAREN(x) EVALUATING_PASTE(NOTHING_, EXTRACT x)
// #define UNPAREN2(x) EVALUATING_PASTE2(NOTHING_, EXTRACT x)
// #define UNPAREN3(x) EVALUATING_PASTE3(NOTHING_, EXTRACT x)
// #define EVAL( x ) x
// #define EVALX( x ) x
// #define EVAL0(...)  EVAL1(EVAL1(EVAL1(__VA_ARGS__)))
// #define EVAL1(...) EVAL2(EVAL2(EVAL2(__VA_ARGS__)))
// #define EVAL2(...) EVAL3(EVAL3(EVAL3(__VA_ARGS__)))
// #define EVAL3(...) EVAL4(EVAL4(EVAL4(__VA_ARGS__)))
// #define EVAL4(...) EVAL5(EVAL5(EVAL5(__VA_ARGS__)))
// #define EVAL5(...) __VA_ARGS__


// #define SEL_T_1(WHAT, NAME, SIGNATURE, TYPE_A) WHAT(NAME, SIGNATURE, TYPE_A)
// #define SEL_T_2(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_1(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_3(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_2(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_4(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_3(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_5(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_4(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_6(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_5(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_7(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_6(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_8(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_7(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_9(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_8(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_10(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_9(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_11(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_10(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_12(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_11(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_13(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_12(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_14(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_13(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_15(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_14(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_16(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_15(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_17(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_16(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_18(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_17(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_19(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_18(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define SEL_T_20(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(SEL_T_19(WHAT, NAME, SIGNATURE, __VA_ARGS__))


// #define SEL_TT1_1(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)
// #define SEL_TT1_2(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_1(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_3(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_2(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_4(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_3(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_5(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_4(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_6(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_5(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_7(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_6(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_8(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_7(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_9(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_8(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_10(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_9(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_11(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_10(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_12(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_11(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_13(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_12(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_14(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_13(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_15(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_14(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_16(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_15(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_17(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_16(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_18(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_17(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_19(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_18(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT1_20(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(YTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT1_19(WHAT, YTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))


// #define SEL_P1_1(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)
// #define SEL_P1_2(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_1(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_3(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_2(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_4(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_3(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_5(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_4(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_6(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_5(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_7(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_6(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_8(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_7(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_9(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_8(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_10(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_9(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_11(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_10(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_12(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_11(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_13(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_12(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_14(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_13(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_15(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_14(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_16(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_15(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_17(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_16(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_18(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_17(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_19(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_18(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P1_20(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P1_19(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))

// #define SEL_P2_1(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)
// #define SEL_P2_2(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_1(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_3(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_2(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_4(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_3(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_5(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_4(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_6(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_5(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_7(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_6(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_8(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_7(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_9(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_8(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_10(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_9(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_11(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_10(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_12(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_11(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_13(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_12(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_14(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_13(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_15(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_14(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_16(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_15(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_17(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_16(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_18(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_17(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_19(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_18(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_P2_20(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_P2_19(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))




// #define SEL_TT2_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)
// #define SEL_TT2_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_1(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_2(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_3(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_4(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_5(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_6(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_7(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_8(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_9(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_10(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_11(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_12(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_13(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_14(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_15(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_16(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_17(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_18(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define SEL_TT2_20(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(SEL_TT2_19(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))


// #define DS_1(WHAT, NAME, SIGNATURE, TYPE_A) WHAT(NAME, SIGNATURE, TYPE_A)
// #define DS_2(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_1(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_3(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_2(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_4(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_3(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_5(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_4(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_6(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_5(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_7(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_6(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_8(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_7(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_9(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_8(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_10(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_9(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_11(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_10(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_12(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_11(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_13(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_12(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_14(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_13(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_15(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_14(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_16(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_15(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_17(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_16(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_18(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_17(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_19(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_18(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DS_20(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DS_19(WHAT, NAME, SIGNATURE, __VA_ARGS__))


// #define DP_1(WHAT, NAME, SIGNATURE, TYPE_A) WHAT(NAME, SIGNATURE, TYPE_A)
// #define DP_2(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_1(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_3(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_2(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_4(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_3(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_5(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_4(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_6(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_5(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_7(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_6(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_8(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_7(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_9(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_8(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_10(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_9(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_11(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_10(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_12(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_11(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_13(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_12(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_14(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_13(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_15(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_14(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_16(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_15(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_17(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_16(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_18(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_17(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_19(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_18(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_20(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_19(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_21(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_20(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_22(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_21(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_23(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_22(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_24(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_23(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_25(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_24(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_26(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_25(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_27(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_26(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_28(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_27(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_29(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_28(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_30(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_29(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_31(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_30(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_32(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_31(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_33(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_32(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_34(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_33(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_35(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_34(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_36(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_35(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_37(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_36(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_38(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_37(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_39(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_38(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_40(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_39(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_41(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_40(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_42(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_41(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_43(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_42(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_44(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_43(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_45(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_44(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_46(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_45(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_47(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_46(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_48(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_47(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_49(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_48(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_50(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_49(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_51(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_50(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_52(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_51(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_53(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_52(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_54(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_53(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_55(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_54(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_56(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_55(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_57(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_56(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_58(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_57(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_59(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_58(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_60(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_59(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_61(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_60(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_62(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_61(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_63(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_62(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_64(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_63(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_65(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_64(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_66(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_65(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_67(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_66(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_68(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_67(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_69(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_68(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_70(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_69(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_71(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_70(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_72(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_71(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_73(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_72(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_74(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_73(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_75(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_74(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_76(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_75(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_77(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_76(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_78(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_77(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_79(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_78(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_80(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_79(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_81(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_80(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_82(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_81(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_83(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_82(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_84(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_83(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_85(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_84(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_86(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_85(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_87(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_86(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_88(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_87(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_89(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_88(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_90(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_89(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_91(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_90(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_92(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_91(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_93(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_92(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_94(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_93(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_95(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_94(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_96(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_95(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_97(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_96(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_98(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_97(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_99(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_98(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_100(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_99(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_101(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_100(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_102(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_101(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_103(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_102(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_104(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_103(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_105(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_104(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_106(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_105(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_107(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_106(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_108(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_107(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_109(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_108(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_110(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_109(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_111(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_110(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_112(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_111(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_113(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_112(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_114(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_113(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_115(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_114(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_116(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_115(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_117(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_116(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_118(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_117(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_119(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_118(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_120(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_119(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_121(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_120(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_122(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_121(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_123(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_122(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_124(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_123(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define DP_125(WHAT, NAME, SIGNATURE, TYPE_A, ...) WHAT(NAME, SIGNATURE, TYPE_A)EVAL(DP_124(WHAT, NAME, SIGNATURE, __VA_ARGS__))


// #define DT_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)
// #define DT_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_1(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_2(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_3(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_4(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_5(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_6(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_7(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_8(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_9(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_10(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_11(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_12(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_13(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_14(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_15(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_16(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_17(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_18(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT_20(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT_19(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))

// #define DT2_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)
// #define DT2_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_1(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_2(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_3(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_4(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_5(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_6(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_7(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_8(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_9(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_10(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_11(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_12(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_13(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_14(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_15(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_16(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_17(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_18(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define DT2_20(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B)EVAL(DT2_19(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))

// #define TTT1_1(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)
// #define TTT1_2(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_1(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_3(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_2(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_4(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_3(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_5(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_4(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_6(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_5(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_7(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_6(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_8(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_7(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_9(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_8(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_10(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_9(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_11(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_10(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_12(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_11(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_13(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_12(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_14(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_13(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_15(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_14(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_16(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_15(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_17(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_16(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_18(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_17(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_19(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_18(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT1_20(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT1_19(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))

// #define TTT2_1(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)
// #define TTT2_2(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_1(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_3(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_2(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_4(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_3(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_5(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_4(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_6(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_5(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_7(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_6(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_8(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_7(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_9(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_8(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_10(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_9(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_11(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_10(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_12(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_11(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_13(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_12(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_14(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_13(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_15(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_14(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_16(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_15(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_17(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_16(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_18(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_17(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_19(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_18(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT2_20(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT2_19(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))


// #define TTT3_1(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)
// #define TTT3_2(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_1(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_3(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_2(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_4(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_3(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_5(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_4(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_6(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_5(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_7(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_6(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_8(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_7(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_9(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_8(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_10(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_9(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_11(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_10(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_12(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_11(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_13(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_12(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_14(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_13(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_15(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_14(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_16(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_15(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_17(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_16(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_18(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_17(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_19(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_18(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TTT3_20(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TTT3_19(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))



// #define TT1_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)
// #define TT1_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT1_20(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT1_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))

// #define TT2_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)
// #define TT2_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT2_20(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT2_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))

// #define TT3_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)
// #define TT3_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_1(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_2(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_3(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_4(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_5(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_6(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_7(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_8(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_9(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_10(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_11(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_12(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_13(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_14(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_15(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_16(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_17(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_18(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))
// #define TT3_20(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C, ...) WHAT(NAME, SIGNATURE, TYPE_A, TYPE_B, TYPE_C)EVAL(TT3_19(WHAT, NAME, SIGNATURE, TYPE_A, TYPE_B, __VA_ARGS__))


// #define GET_MACRO_SEL_T(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_SEL_P1(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_SEL_P2(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_SEL_TT1(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_SEL_TT2(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_DS(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_DT(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_DP(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79, _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95, _96, _97, _98, _99, _100, _101, _102, _103, _104, _105, _106, _107, _108, _109, _110, _111, _112, _113, _114, _115, _116, _117, _118, _119, _120, _121, _122, _123, _124, _125, NAME,...) NAME
// #define GET_MACRO_DT2(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME


// #define GET_MACRO_TT1(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_TT2(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_TT3(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME

// #define GET_MACRO_TTT1(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_TTT2(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define GET_MACRO_TTT3(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME

// #define FOR_EACH_S1(WHAT, NAME, SIGNATURE, ...) EXPAND(GET_MACRO_SEL_T(__VA_ARGS__, SEL_T_20, SEL_T_19, SEL_T_18, SEL_T_17, SEL_T_16, SEL_T_15, SEL_T_14, SEL_T_13, SEL_T_12, SEL_T_11, SEL_T_10, SEL_T_9, SEL_T_8, SEL_T_7, SEL_T_6, SEL_T_5, SEL_T_4, SEL_T_3, SEL_T_2, SEL_T_1)(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define FOR_EACH_S2(WHAT, YTYPE, NAME, SIGNATURE, TYPES_A, ...) EXPAND(GET_MACRO_SEL_TT1(__VA_ARGS__, SEL_TT1_20, SEL_TT1_19, SEL_TT1_18, SEL_TT1_17, SEL_TT1_16, SEL_TT1_15, SEL_TT1_14, SEL_TT1_13, SEL_TT1_12, SEL_TT1_11, SEL_TT1_10, SEL_TT1_9, SEL_TT1_8, SEL_TT1_7, SEL_TT1_6, SEL_TT1_5, SEL_TT1_4, SEL_TT1_3, SEL_TT1_2, SEL_TT1_1)(WHAT, YTYPE, NAME, SIGNATURE, TYPES_A, __VA_ARGS__))
// #define FOR_EACH_P1(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_A, ...) EXPAND(GET_MACRO_SEL_P1(__VA_ARGS__, SEL_P1_20, SEL_P1_19, SEL_P1_18, SEL_P1_17, SEL_P1_16, SEL_P1_15, SEL_P1_14, SEL_P1_13, SEL_P1_12, SEL_P1_11, SEL_P1_10, SEL_P1_9, SEL_P1_8, SEL_P1_7, SEL_P1_6, SEL_P1_5, SEL_P1_4, SEL_P1_3, SEL_P1_2, SEL_P1_1)(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_A, __VA_ARGS__))
// #define FOR_EACH_P2(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_A, ...) EXPAND2(GET_MACRO_SEL_P2(__VA_ARGS__, SEL_P2_20, SEL_P2_19, SEL_P2_18, SEL_P2_17, SEL_P2_16, SEL_P2_15, SEL_P2_14, SEL_P2_13, SEL_P2_12, SEL_P2_11, SEL_P2_10, SEL_P2_9, SEL_P2_8, SEL_P2_7, SEL_P2_6, SEL_P2_5, SEL_P2_4, SEL_P2_3, SEL_P2_2, SEL_P2_1)(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_A, __VA_ARGS__))
// #define FOR_EACH_S3(WHAT, NAME, SIGNATURE, TYPE_A, ...) EXPAND(GET_MACRO_SEL_TT2(__VA_ARGS__, SEL_TT2_20, SEL_TT2_19, SEL_TT2_18, SEL_TT2_17, SEL_TT2_16, SEL_TT2_15, SEL_TT2_14, SEL_TT2_13, SEL_TT2_12, SEL_TT2_11, SEL_TT2_10, SEL_TT2_9, SEL_TT2_8, SEL_TT2_7, SEL_TT2_6, SEL_TT2_5, SEL_TT2_4, SEL_TT2_3, SEL_TT2_2, SEL_TT2_1)(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define FOR_EACH_DS(WHAT, NAME, SIGNATURE, ...) EXPAND(GET_MACRO_DS(__VA_ARGS__, DS_20, DS_19, DS_18, DS_17, DS_16, DS_15, DS_14, DS_13, DS_12, DS_11, DS_10, DS_9, DS_8, DS_7, DS_6, DS_5, DS_4, DS_3, DS_2, DS_1)(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define FOR_EACH_DT(WHAT, NAME, SIGNATURE, TYPES_A, ...) EXPAND(GET_MACRO_DT(__VA_ARGS__, DT_20, DT_19, DT_18, DT_17, DT_16, DT_15, DT_14, DT_13, DT_12, DT_11, DT_10, DT_9, DT_8, DT_7, DT_6, DT_5, DT_4, DT_3, DT_2, DT_1)(WHAT, NAME, SIGNATURE, TYPES_A, __VA_ARGS__))
// #define FOR_EACH_DT2(WHAT, NAME, SIGNATURE, TYPE_A, ...) EXPAND(GET_MACRO_DT2(__VA_ARGS__, DT2_20, DT2_19, DT2_18, DT2_17, DT2_16, DT2_15, DT2_14, DT2_13, DT2_12, DT2_11, DT2_10, DT2_9, DT2_8, DT2_7, DT2_6, DT2_5, DT2_4, DT2_3, DT2_2, DT2_1)(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define FOR_EACH_DP(WHAT, NAME, SIGNATURE, ...) EXPAND(GET_MACRO_DP(__VA_ARGS__, DP_125, DP_124, DP_123, DP_122, DP_121, DP_120, DP_119, DP_118, DP_117, DP_116, DP_115, DP_114, DP_113, DP_112, DP_111, DP_110, DP_109, DP_108, DP_107, DP_106, DP_105, DP_104, DP_103, DP_102, DP_101, DP_100, DP_99, DP_98, DP_97, DP_96, DP_95, DP_94, DP_93, DP_92, DP_91, DP_90, DP_89, DP_88, DP_87, DP_86, DP_85, DP_84, DP_83, DP_82, DP_81, DP_80, DP_79, DP_78, DP_77, DP_76, DP_75, DP_74, DP_73, DP_72, DP_71, DP_70, DP_69, DP_68, DP_67, DP_66, DP_65, DP_64, DP_63, DP_62, DP_61, DP_60, DP_59, DP_58, DP_57, DP_56, DP_55, DP_54, DP_53, DP_52, DP_51, DP_50, DP_49, DP_48, DP_47, DP_46, DP_45, DP_44, DP_43, DP_42, DP_41, DP_40, DP_39, DP_38, DP_37, DP_36, DP_35, DP_34, DP_33, DP_32, DP_31, DP_30, DP_29, DP_28, DP_27, DP_26, DP_25, DP_24, DP_23, DP_22, DP_21, DP_20, DP_19, DP_18, DP_17, DP_16, DP_15, DP_14, DP_13, DP_12, DP_11, DP_10, DP_9, DP_8, DP_7, DP_6, DP_5, DP_4, DP_3, DP_2, DP_1)(WHAT, NAME, SIGNATURE, __VA_ARGS__))


// #define FOR_EACH_TT1(WHAT, NAME, SIGNATURE, TYPES_X, TYPES_Y, ...) EXPAND(GET_MACRO_TT1(__VA_ARGS__, TT1_20, TT1_19, TT1_18, TT1_17, TT1_16, TT1_15, TT1_14, TT1_13, TT1_12, TT1_11, TT1_10, TT1_9, TT1_8, TT1_7, TT1_6, TT1_5, TT1_4, TT1_3, TT1_2, TT1_1)(WHAT, NAME, SIGNATURE, TYPES_X, TYPES_Y, __VA_ARGS__))
// #define FOR_EACH_TT2(WHAT, NAME, SIGNATURE, TYPE_Z, TYPES_X,  ...) EXPAND(GET_MACRO_TT2(__VA_ARGS__, TT2_20, TT2_19, TT2_18, TT2_17, TT2_16, TT2_15, TT2_14, TT2_13, TT2_12, TT2_11, TT2_10, TT2_9, TT2_8, TT2_7, TT2_6, TT2_5, TT2_4, TT2_3, TT2_2, TT2_1)(WHAT, NAME, SIGNATURE, TYPE_Z, TYPES_X, __VA_ARGS__))
// #define FOR_EACH_TT3(WHAT, NAME, SIGNATURE, TYPE_Z, TYPE_Y, ...) EXPAND(GET_MACRO_TT3(__VA_ARGS__, TT3_20, TT3_19, TT3_18, TT3_17, TT3_16, TT3_15, TT3_14, TT3_13, TT3_12, TT3_11, TT3_10, TT3_9, TT3_8, TT3_7, TT3_6, TT3_5, TT3_4, TT3_3, TT3_2, TT3_1)(WHAT, NAME, SIGNATURE, TYPE_Z, TYPE_Y, __VA_ARGS__))

// #define FOR_EACH_TTT1(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_Z, TYPES_Y, ...) EXPAND(GET_MACRO_TTT1(__VA_ARGS__, TTT1_20, TTT1_19, TTT1_18, TTT1_17, TTT1_16, TTT1_15, TTT1_14, TTT1_13, TTT1_12, TTT1_11, TTT1_10, TTT1_9, TTT1_8, TTT1_7, TTT1_6, TTT1_5, TTT1_4, TTT1_3, TTT1_2, TTT1_1)(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_Z, TYPES_Y, __VA_ARGS__))
// #define FOR_EACH_TTT2(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPES_Z, ...) EXPAND(GET_MACRO_TTT2(__VA_ARGS__, TTT2_20, TTT2_19, TTT2_18, TTT2_17, TTT2_16, TTT2_15, TTT2_14, TTT2_13, TTT2_12, TTT2_11, TTT2_10, TTT2_9, TTT2_8, TTT2_7, TTT2_6, TTT2_5, TTT2_4, TTT2_3, TTT2_2, TTT2_1)(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPES_Z, __VA_ARGS__))
// #define FOR_EACH_TTT3(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPE_Y, ...) EXPAND(GET_MACRO_TTT3(__VA_ARGS__, TTT3_20, TTT3_19, TTT3_18, TTT3_17, TTT3_16, TTT3_15, TTT3_14, TTT3_13, TTT3_12, TTT3_11, TTT3_10, TTT3_9, TTT3_8, TTT3_7, TTT3_6, TTT3_5, TTT3_4, TTT3_3, TTT3_2, TTT3_1)(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPE_Y, __VA_ARGS__))

// #define _EXEC_SELECTOR_T(WHAT, NAME, SIGNATURE, ...) EVAL(FOR_EACH_S1(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define _EXEC_SELECTOR_P_1(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_A, ...) EVAL(FOR_EACH_P1(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_A, __VA_ARGS__))
// #define _EXEC_SELECTOR_P_2(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, ...) EVAL(FOR_EACH_P2(WHAT, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define _EXEC_SELECTOR_TT_1(WHAT, YTYPE, NAME, SIGNATURE, TYPES_A, ...) EVAL(FOR_EACH_S2(WHAT, YTYPE, NAME, SIGNATURE, TYPES_A, __VA_ARGS__))
// #define _EXEC_SELECTOR_TT_2(WHAT, NAME, SIGNATURE, TYPE_A, ...) EVAL(FOR_EACH_S3(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define _EXEC_SINGLE_T(WHAT, NAME, SIGNATURE, ...) EVAL(FOR_EACH_DS(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define _EXEC_DOUBLE_T(WHAT, NAME, SIGNATURE, TYPES_A, ...) EVAL(FOR_EACH_DT(WHAT, NAME, SIGNATURE, LIST(TYPES_A), __VA_ARGS__))
// #define _EXEC_DOUBLE_T2(WHAT, NAME, SIGNATURE, TYPE_A, ...) EVAL(FOR_EACH_DT2(WHAT, NAME, SIGNATURE, TYPE_A, __VA_ARGS__))
// #define _EXEC_DOUBLE_P(WHAT, NAME, SIGNATURE, ...) EVAL(FOR_EACH_DP(WHAT, NAME, SIGNATURE, __VA_ARGS__))

// #define _EXEC_SELECTOR_TTT_1(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_Z, TYPES_Y, ...) EVAL(FOR_EACH_TTT1(WHAT, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_Z, TYPES_Y, __VA_ARGS__))
// #define _EXEC_SELECTOR_TTT_2(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPES_Z, ...) EVAL(FOR_EACH_TTT2(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPES_Z, __VA_ARGS__))
// #define _EXEC_SELECTOR_TTT_3(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPE_Y, ...) EVAL(FOR_EACH_TTT3(WHAT, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPE_Y, __VA_ARGS__))

// #define _EXEC_TRIPLE_T3(WHAT, NAME, SIGNATURE, TYPE_Z, TYPE_Y, ...) EVAL(FOR_EACH_TT3(WHAT, NAME, SIGNATURE, TYPE_Z, TYPE_Y, __VA_ARGS__))
// #define _EXEC_TRIPLE_T2(WHAT, NAME, SIGNATURE, TYPE_Z, TYPES_X, ...) EVAL(FOR_EACH_TT2(WHAT, NAME, SIGNATURE, TYPE_Z, LIST(TYPES_X), __VA_ARGS__))
// #define _EXEC_TRIPLE_T1(WHAT, NAME, SIGNATURE, TYPES_X, TYPES_Y, ...) EVAL(FOR_EACH_TT1(WHAT, NAME, SIGNATURE, LIST(TYPES_X), LIST(TYPES_Y), __VA_ARGS__))

// #define DISPATCH_PAIRWISE(NAME, SIGNATURE, TYPE, TYPES_B) EVAL(_EXEC_DOUBLE_T2(RANDOMPAIRWISE2, NAME, SIGNATURE, TYPE, TYPES_B))
// #define DISPATCH_PAIRWISE2(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE, ...) EVAL(_EXEC_SELECTOR_P_2(SELECTOR_PAIRWISE_2, XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE, __VA_ARGS__))

// #define DISPATCH_DTYPES(NAME, SIGNATURE, TYPE, TYPES_B) EVAL(_EXEC_DOUBLE_T2(RANDOMDOUBLE2, NAME, SIGNATURE, TYPE, TYPES_B))
// #define DISPATCH_DTYPES2(NAME, SIGNATURE, TYPE, ...) EVAL(_EXEC_SELECTOR_TT_2(SELECTOR_DOUBLE_2, NAME, SIGNATURE, TYPE, __VA_ARGS__))

// #define DISPATCH_TTYPES2(ZTYPE, NAME, SIGNATURE, TYPE_X, TYPES_Z, ...) EVAL(_EXEC_SELECTOR_TTT_2(SELECTOR_TRIPLE_2, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPES_Z, __VA_ARGS__))
// #define DISPATCH_TTYPES3(ZTYPE, NAME, SIGNATURE, TYPE_X, TYPE_Y, ...) EVAL(_EXEC_SELECTOR_TTT_3(SELECTOR_TRIPLE_3, ZTYPE, NAME, SIGNATURE, TYPE_X, TYPE_Y, __VA_ARGS__))


// #ifndef __CLION_IDE__
// #define BUILD_SINGLE_UNCHAINED_TEMPLATE(NAME, SIGNATURE, TYPES) EVAL(_EXEC_SINGLE_T(RANDOMSINGLEU, NAME, (SIGNATURE), TYPES))
// #define BUILD_SINGLE_TEMPLATE(NAME, SIGNATURE, TYPES) EVAL(_EXEC_SINGLE_T(RANDOMSINGLE, NAME, (SIGNATURE), TYPES))
// #define BUILD_SINGLE_TEMPLATE_TWICE(NAME, SIGNATURE, TYPES) EVAL(_EXEC_SELECTOR_T(TEMPLATE_SINGLE_TWICE, NAME, SIGNATURE, TYPES))
// #define BUILD_DOUBLE_TEMPLATE(NAME, SIGNATURE, TYPES_A, TYPES_B) EVAL(_EXEC_DOUBLE_T(RANDOMDOUBLE, NAME, (SIGNATURE), (TYPES_A), TYPES_B))
// #define BUILD_SINGLE_SELECTOR(XTYPE, NAME, SIGNATURE, TYPES) switch(XTYPE) { EVAL(_EXEC_SELECTOR_T(SELECTOR_SINGLE, NAME, SIGNATURE, TYPES)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d", XTYPE, __FILE__, __LINE__);  fflush(stdout); throw std::runtime_error("bad data type");}}
// #define BUILD_SINGLE_SELECTOR_TWICE(XTYPE, NAME, SIGNATURE, TYPES) switch(XTYPE) { EVAL(_EXEC_SELECTOR_T(SELECTOR_SINGLE_TWICE, NAME, SIGNATURE, TYPES)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d", XTYPE, __FILE__, __LINE__);  fflush(stdout); throw std::runtime_error("bad data type");}}
// #define BUILD_SINGLE_SELECTOR_THRICE(XTYPE, NAME, SIGNATURE, TYPES) switch(XTYPE) { EVAL(_EXEC_SELECTOR_T(SELECTOR_SINGLE_THRICE, NAME, SIGNATURE, TYPES)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d", XTYPE, __FILE__, __LINE__);  fflush(stdout); throw std::runtime_error("bad data type");}}


// #define BUILD_SINGLE_PARTIAL_SELECTOR(XTYPE, NAME, SIGNATURE, TYPES) switch(XTYPE) { EVAL(_EXEC_SELECTOR_T(SELECTOR_PARTIAL_SINGLE, NAME, SIGNATURE, TYPES)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d", XTYPE, __FILE__, __LINE__);  fflush(stdout); throw std::runtime_error("bad data type"); }}
// #define BUILD_DOUBLE_SELECTOR(XTYPE, YTYPE, NAME, SIGNATURE, TYPES_A, TYPES_B) switch(XTYPE) { EVAL(_EXEC_SELECTOR_TT_1(SELECTOR_DOUBLE, YTYPE, NAME, (SIGNATURE), (TYPES_B), TYPES_A)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d", XTYPE, __FILE__, __LINE__); fflush(stdout); throw std::runtime_error("bad data type");}}
// #define BUILD_TRIPLE_SELECTOR(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_X, TYPES_Y, TYPES_Z) switch(XTYPE) { EVAL(_EXEC_SELECTOR_TTT_1(SELECTOR_TRIPLE, YTYPE, ZTYPE, NAME, SIGNATURE, (TYPES_Z), (TYPES_Y), TYPES_X)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d", XTYPE, __FILE__, __LINE__);  fflush(stdout); throw std::runtime_error("bad data type"); } }
// #define BUILD_TRIPLE_TEMPLATE(NAME, SIGNATURE, TYPES_X, TYPES_Y, TYPES_Z) EVAL(_EXEC_TRIPLE_T1(RANDOMTRIPLE, NAME, (SIGNATURE), (TYPES_X), (TYPES_Y), TYPES_Z))
// #define BUILD_PAIRWISE_TEMPLATE(NAME, SIGNATURE, TYPES_A) EVAL(_EXEC_DOUBLE_P(RANDOMPAIRWISE, NAME, (SIGNATURE), TYPES_A))
// #define BUILD_PAIRWISE_SELECTOR(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_A, TYPES_B) switch(XTYPE) { EVAL(_EXEC_SELECTOR_P_1(SELECTOR_PAIRWISE, XTYPE, YTYPE, ZTYPE, NAME, (SIGNATURE), (TYPES_B), TYPES_A)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d", XTYPE, __FILE__, __LINE__);  fflush(stdout); throw std::runtime_error("bad data type"); }}
// #else
// #define BUILD_SINGLE_UNCHAINED_TEMPLATE(NAME, SIGNATURE, TYPES)
// #define BUILD_SINGLE_TEMPLATE(NAME, SIGNATURE, TYPES)
// #define BUILD_SINGLE_TEMPLATE_TWICE(NAME, SIGNATURE, TYPES)
// #define BUILD_DOUBLE_TEMPLATE(NAME, SIGNATURE, TYPES_A, TYPES_B)
// #define BUILD_SINGLE_SELECTOR(XTYPE, NAME, SIGNATURE, TYPES)
// #define BUILD_SINGLE_SELECTOR_TWICE(XTYPE, NAME, SIGNATURE, TYPES)
// #define BUILD_SINGLE_SELECTOR_THRICE(XTYPE, NAME, SIGNATURE, TYPES)
// #define BUILD_SINGLE_PARTIAL_SELECTOR(XTYPE, NAME, SIGNATURE, TYPES)
// #define BUILD_DOUBLE_SELECTOR(XTYPE, YTYPE, NAME, SIGNATURE, TYPES_A, TYPES_B)
// #define BUILD_TRIPLE_SELECTOR(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_X, TYPES_Y, TYPES_Z)
// #define BUILD_TRIPLE_TEMPLATE(NAME, SIGNATURE, TYPES_X, TYPES_Y, TYPES_Z)
// #define BUILD_PAIRWISE_TEMPLATE(NAME, SIGNATURE, TYPES_A)
// #define BUILD_PAIRWISE_SELECTOR(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_A, TYPES_B)
// #endif

// #define LIST(...) __VA_ARGS__

// #define _SELECTOR_DOUBLE_2(NAME, SIGNATURE, TYPE_A, ENUM, TYPE_B) case ENUM: { NAME SIGNATURE; break; };
// #define SELECTOR_DOUBLE_2(NAME, SIGNATURE, TYPE_A, TYPE_B) EVALUATING_PASTE2(_SELECT, OR_DOUBLE_2(NAME, UNPAREN3(SIGNATURE), TYPE_A, UNPAREN3(TYPE_B)))

// #define _SELECTOR_DOUBLE(YTYPE, NAME, SIGNATURE, ENUM, TYPE_A, ...) case ENUM: { switch(YTYPE) { EXPAND(DISPATCH_DTYPES2(NAME, SIGNATURE, TYPE_A, __VA_ARGS__)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d\n", YTYPE, __FILE__, __LINE__); fflush(stdout);}}; break; };
// #define SELECTOR_DOUBLE(YTYPE, NAME, SIGNATURE, TYPES_B, TYPE_A)  EVALUATING_PASTE(_SELECTOR, _DOUBLE(YTYPE, NAME, SIGNATURE, UNPAREN(TYPE_A), UNPAREN(TYPES_B)))

// #define _SELECTOR_PAIRWISE_2(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, ENUM, TYPE_B) case ENUM: { if (ZTYPE == YTYPE) {NAME SIGNATURE;} else if (XTYPE == ZTYPE ){NAME SIGNATURE;} else {printf("[ERROR] Unknown dtypeX=%d on %s:%d\n", YTYPE, __FILE__, __LINE__); fflush(stdout); throw std::runtime_error("Unknown Z operand");}; break; };
// #define SELECTOR_PAIRWISE_2(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, TYPE_B) EVALUATING_PASTE2(_SELECT, OR_PAIRWISE_2(XTYPE, YTYPE, ZTYPE, NAME, UNPAREN3(SIGNATURE), TYPE_A, UNPAREN3(TYPE_B)))
// #define _SELECTOR_PAIRWISE(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, ENUM, TYPE_A, ...) case ENUM: { switch(YTYPE) { EXPAND(DISPATCH_PAIRWISE2(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPE_A, __VA_ARGS__)); default: {printf("[ERROR] Unknown dtypeX=%d on %s:%d\n", YTYPE, __FILE__, __LINE__); fflush(stdout);}}; break; };
// #define SELECTOR_PAIRWISE(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_B, TYPE_A)  EVALUATING_PASTE(_SELECTOR, _PAIRWISE(XTYPE, YTYPE, ZTYPE, NAME, SIGNATURE, UNPAREN(TYPE_A), UNPAREN(TYPES_B)))

// #define _SELECTOR_TRIPLE_3(NAME, SIGNATURE, TYPE_X, TYPE_Y, ENUM_Z, TYPE_Z) case ENUM_Z: { NAMESIGNATURE;}; break;
// #define SELECTOR_TRIPLE_3(ZTYPE, NAME, SIGNATURE, TYPE_X, TYPE_Y, TYPE_Z) EVALUATING_PASTE3(_SELECTOR, _TRIPLE_3(NAME, SIGNATURE, TYPE_X, TYPE_Y, UNPAREN3(TYPE_Z)))
// #define _SELECTOR_TRIPLE_2(ZTYPE, NAME, SIGNATURE, TYPE_X, ENUM_Y, TYPE_Y, TYPES_Z) case ENUM_Y: { switch (ZTYPE) { EXPAND2(DISPATCH_TTYPES3(ZTYPE, NAME, SIGNATURE, TYPE_X, TYPE_Y, UNPAREN3(TYPES_Z))); default: {printf("[ERROR] Unknown dtypeZ=%d on %s:%d\n", ZTYPE, __FILE__, __LINE__); ; fflush(stdout);} } break; };
// #define SELECTOR_TRIPLE_2(ZTYPE, NAME, SIGNATURE, TYPE_X, TYPES_Z, TYPE_Y) EVALUATING_PASTE2(_SELECTOR, _TRIPLE_2(ZTYPE, NAME, SIGNATURE, TYPE_X, UNPAREN2(TYPE_Y), TYPES_Z))
// #define _SELECTOR_TRIPLE(YTYPE, ZTYPE, NAME, SIGNATURE, ENUM_X, TYPE_X, TYPES_Z, ...) case ENUM_X: { switch (YTYPE) { EXPAND(DISPATCH_TTYPES2(ZTYPE, NAME, SIGNATURE, TYPE_X, TYPES_Z, __VA_ARGS__ )); default: {printf("[ERROR] Unknown dtypeY=%d on %s:%d\n", YTYPE, __FILE__, __LINE__); ; fflush(stdout);} } break; };
// #define SELECTOR_TRIPLE(YTYPE, ZTYPE, NAME, SIGNATURE, TYPES_Z, TYPES_Y, TYPE_X) EVALUATING_PASTE(_SELECTOR, _TRIPLE(YTYPE, ZTYPE, NAME, SIGNATURE, UNPAREN(TYPE_X), TYPES_Z, UNPAREN(TYPES_Y)))

// #define _SELECTOR_SINGLE(A, B, C, D) case C: {AB; break;};
// #define SELECTOR_SINGLE(A, B, C) EVALUATING_PASTE(_SEL, ECTOR_SINGLE(A, B, UNPAREN(C)))

// #define _SELECTOR_SINGLE_THRICE(A, B, C, D) case C: {AB; break;};
// #define SELECTOR_SINGLE_THRICE(A, B, C) EVALUATING_PASTE(_SEL, ECTOR_SINGLE_THRICE(A, B, UNPAREN(C)))

// #define _SELECTOR_SINGLE_TWICE(A, B, C, D) case C: {AB; break;};
// #define SELECTOR_SINGLE_TWICE(A, B, C) EVALUATING_PASTE(_SEL, ECTOR_SINGLE_TWICE(A, B, UNPAREN(C)))

// #define _TEMPLATE_SINGLE_TWICE(A, B, C, D) AB;
// #define TEMPLATE_SINGLE_TWICE(A, B, C) EVALUATING_PASTE(_TEM, PLATE_SINGLE_TWICE(A, B, UNPAREN(C)))

// #define _SELECTOR_PARTIAL_SINGLE(A, B, C, D) case C: {A D, UNPAREN2(B); break;};
// #define SELECTOR_PARTIAL_SINGLE(A, B, C) EVALUATING_PASTE(_SEL, ECTOR_PARTIAL_SINGLE(A, B, UNPAREN(C)))

// #define _RANDOMSINGLE(A, B, C, D) AB;
// #define _RANDOMSINGLEU(A, B, C, D) A D B;
// #define RANDOMSINGLE(A, B, C) EVALUATING_PASTE(_RAND, OMSINGLE(A, UNPAREN(B), UNPAREN(C)))
// #define RANDOMSINGLEU(A, B, C) EVALUATING_PASTE(_RAND, OMSINGLEU(A, UNPAREN(B), UNPAREN(C)))
// #define RANDOMDOUBLE(A, B, C, D) EXPAND(DISPATCH_DTYPES(A, UNPAREN(B), D, UNPAREN(C)))

// #define _RANDOMDOUBLE2(A, B, C, D, E, F) AB;
// #define RANDOMDOUBLE2(A, B, C, D) EVALUATING_PASTE(_RAND, OMDOUBLE2(A, B, UNPAREN(C), UNPAREN(D)))

// #define _RANDOMPAIRWISE2(A, B, C, D, E) AE;
// #define RANDOMPAIRWISE(A, B, C) EVALUATING_PASTE(_RANDOM, PAIRWISE2(A, UNPAREN(C), UNPAREN(B)))

// #define _RANDOMTRIPLE3(A, B, ZN, ZT, YN, YT, XN, XT) AB;
// #define RANDOMTRIPLE3(A, B, Z, Y, X) EVALUATING_PASTE(_RANDOM, TRIPLE3(A, UNPAREN(B), UNPAREN(Z), UNPAREN(Y), UNPAREN(X)))

// #define _RANDOMTRIPLE2(NAME, SIGNATURE, TYPE_Z, TYPE_Y, TYPES_X)  EVALX(_EXEC_TRIPLE_T3(RANDOMTRIPLE3, NAME, SIGNATURE, TYPE_Z, TYPE_Y, UNPAREN(TYPES_X)))
// #define RANDOMTRIPLE2(NAME, SIGNATURE, TYPE_Z, TYPES_X, TYPE_Y) _RANDOMTRIPLE2(NAME, SIGNATURE, TYPE_Z, TYPE_Y, TYPES_X)
// #define _RANDOMTRIPLE(NAME, SIGNATURE, TYPE_Z, TYPES_X, TYPES_Y) EVAL(_EXEC_TRIPLE_T2(RANDOMTRIPLE2, NAME, SIGNATURE, TYPE_Z, TYPES_X, UNPAREN(TYPES_Y)))
// #define RANDOMTRIPLE(NAME, SIGNATURE, TYPES_X, TYPES_Y, TYPE_Z)  _RANDOMTRIPLE(NAME, SIGNATURE, TYPE_Z, TYPES_X, TYPES_Y)


// #define BROADCAST(NAME) sd::BroadcastOpsTuple::custom(sd::scalar::NAME, sd::pairwise::NAME, sd::broadcast::NAME)
// #define BROADCAST_BOOL(NAME) sd::BroadcastBoolOpsTuple::custom(sd::scalar::NAME, sd::pairwise::NAME, sd::broadcast::NAME)

public static final int ALL_STRINGS =UTF32;
public static final int ALL_INDICES =INT64;
public static final int ALL_INTS =UINT64;
public static final int ALL_FLOATS =BFLOAT16;

// #endif //TESTS_CPU_TYPE_BOILERPLATE_H


// Parsed from system/op_boilerplate.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

/*
 * This set of macros is used to generate kernel calls/method calls/cuda kernels on the fly, during precompile phase.
 *
 * Entry macros start with DISPATCH_* name.
 * Basically they are rolling through *_OPS list, building list of kernels.
 * And DISPATCH_KERNEL_META rolls through any two *_OPS list, building meta kernels and their host counterparts
 *
 *
 * Those scary FE_*, FX_*, FZ_*, FM_* etc macro walls are used to give preprocessor ability to loop over list of argument,
 * which emulates forEach() pattern.
 *
 * I.e. here's macro call which generates CUDA kernels for RANDOM_OPS:
 *
 * DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, float, INPUT(Nd4jPointer state, float *z, int *zShapeBuffer, float *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
 *
 * we provide arguments:
 *      output method template
 *      target generic kernel
 *      dataType
 *      signature
 *      parameters to be passed into generic kernel
 *      list of operations
 *
 *
 * List of operations used are defined in the same *.h file, i.e. for RandomOps it's defined as:
 * #define RANDOM_OPS \
 *       (0, randomOps::UniformDistribution) ,\
 *       (1, randomOps::DropOut) ,\
 *       (2, randomOps::DropOutInverted) ,\
 *       (3, randomOps::ProbablisticMerge) ,\
 *       (4, randomOps::Linspace) ,\
 *       (5, randomOps::Choice) ,\
 *       (6, randomOps::GaussianDistribution) ,\
 *       (7, randomOps::BernoulliDistribution) ,\
 *       (8, randomOps::BinomialDistribution)
 *
 *
 * So, DISPATCH_KERNEL_SIMPLE call will generate one kernel for each of ops.
 * This allows us easy addition of new operations. Basically one should just add new Op to the list, and recompile libnd4j
 *
 * HINT: To debug macro you might want to use simple trick: open console and run "watch -n 1 gcc -e file.h"
 * This will give you real-time result of macro preprocessing, which will greatly simplify invention process.
 *
 *
 * @author Paul Dubs (@treo)
 * @author [email protected]
 */
// #pragma once
// #ifndef OP_BOILERPLATE_HH
// #define OP_BOILERPLATE_HH

// #include 
// #include 
// #include 
// #include 

// #ifdef __CUDACC__

// #elif __JAVACPP_HACK__

// #define meta_def
// #define op_def
// #define op_def_special
// #define linkage

// #elif _MSC_VER

// #elif __clang__

// #define op_def inline
// #define op_def_special inline
// #define meta_def inline
// #define linkage

// #elif __GNUC__

// #define linkage
// #define meta_def _Pragma("omp declare simd") inline __attribute__((always_inline))
// #define op_def _Pragma("omp declare simd") inline __attribute__((always_inline))
// #define op_def_special _Pragma("omp declare simd") inline __attribute__((always_inline))

// #endif


public static native @MemberGetter double ELEMENT_THRESHOLD();
public static final double ELEMENT_THRESHOLD = ELEMENT_THRESHOLD();
public static native @MemberGetter double TAD_THRESHOLD();
public static final double TAD_THRESHOLD = TAD_THRESHOLD();

// #define SHAPELIST(...)  new ShapeList({__VA_ARGS__}, block.workspace() != nullptr)

// #ifdef __CUDA_ARCH__
// #define PRINT_FIRST(...)    if (threadIdx.x == 0 && blockIdx.x == 0) {printf(__VA_ARGS__); }
// #else
// #define PRINT_FIRST(...)    printf(__VA_ARGS__); fflush(stdout)
// #endif

// #define DEBUG_CALL(STREAM)      if (sd::Environment::getInstance().isDebug()) { cudaError_t tRes = cudaStreamSynchronize(*STREAM); checkCudaErrors(tRes); if (tRes != 0) { throw std::runtime_error(); }; }
// #define DEBUG_KERNEL(STREAM, OP_NUM)       if (sd::Environment::getInstance().isDebug()) { cudaError_t tRes = cudaStreamSynchronize(*STREAM); checkCudaErrors(tRes); if (tRes != 0) {std::string tFile(__FILE__); std::string tOp = "Kernel OpNum failed: [" + sd::StringUtils::valueToString(OP_NUM) + std::string("]; File: ") + tFile + std::string(":") + sd::StringUtils::valueToString(__LINE__); throw std::runtime_error(tOp.c_str()); }; }


// #define LAUNCH(A, B, C, D) <<>>


// #define CONCAT2(A,B) A ## B
// #define CONCAT3(A,B,C) A ## B ## C

// #define ARGMIX3(A,B,C) A ## B ## _## C
// #define ARGMIX4(A,B,C,D) A ## B ## _## C ## _ ## D

// #define MIX2(A,B) A ## _ ## B
// #define MIX3(A,B,C) A ## _ ## B ## _## C
// #define MIX4(A,B,C,D) A ## _ ## B ## _## C ## _ ## D


// #define EMPTY()
// #define DEFER(id) id EMPTY()
// #define OBSTRUCT(...) __VA_ARGS__ DEFER(EMPTY)()


// #define _EXPAND_OP_CALL(FN, SIG, NUM, TYPE) case NUM: { FN>SIG; break; };
// #define _EXPAND_OP_CALL_TT(FN, SIG, NUM, TYPE) case NUM: { FN>SIG; break; };
// #define _EXPAND_OP_CALL_TTT(FN, SIG, NUM, TYPE) case NUM: { FN>SIG; break; };
// #define _EXPAND_RETURNING_OP_CALL(FN, SIG, NUM, TYPE) else if(opNum == NUM){ return FN>SIG; }
// #define _EXPAND_RETURNING_OP_CALL_TT(FN, SIG, NUM, TYPE) else if(opNum == NUM){ return FN>SIG; }
// #define _EXPAND_PACKED_OP_CALL(FN, SIG, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _OP_CALL (FN, SIG, UNPAREN(OPNUM_PAIR)))
// #define _EXPAND_PACKED_OP_CALL_TT(FN, SIG, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _OP_CALL_TT (FN, SIG, UNPAREN(OPNUM_PAIR)))
// #define _EXPAND_PACKED_OP_CALL_TTT(FN, SIG, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _OP_CALL_TTT (FN, SIG, UNPAREN(OPNUM_PAIR)))
// #define _EXPAND_RETURNING_PACKED_OP_CALL(FN, SIG, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _RETURNING_OP_CALL (FN, SIG, UNPAREN(OPNUM_PAIR)))
// #define _EXPAND_RETURNING_PACKED_OP_CALL_TT(FN, SIG, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _RETURNING_OP_CALL_TT (FN, SIG, UNPAREN(OPNUM_PAIR)))

// #define FE_1(WHAT, FN, SIG, OPNUM_PAIR) WHAT(FN, SIG, OPNUM_PAIR)
// #define FE_2(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_1(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_3(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_2(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_4(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_3(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_5(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_4(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_6(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_5(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_7(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_6(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_8(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_7(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_9(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_8(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_10(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_9(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_11(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_10(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_12(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_11(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_13(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_12(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_14(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_13(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_15(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_14(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_16(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_15(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_17(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_16(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_18(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_17(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_19(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_18(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_20(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_19(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_21(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_20(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_22(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_21(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_23(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_22(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_24(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_23(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_25(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_24(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_26(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_25(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_27(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_26(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_28(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_27(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_29(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_28(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_30(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_29(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_31(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_30(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_32(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_31(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_33(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_32(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_34(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_33(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_35(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_34(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_36(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_35(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_37(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_36(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_38(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_37(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_39(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_38(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_40(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_39(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_41(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_40(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_42(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_41(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_43(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_42(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_44(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_43(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_45(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_44(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_46(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_45(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_47(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_46(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_48(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_47(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_49(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_48(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_50(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_49(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_51(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_50(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_52(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_51(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_53(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_52(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_54(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_53(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_55(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_54(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_56(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_55(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_57(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_56(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_58(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_57(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_59(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_58(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_60(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_59(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_61(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_60(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_62(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_61(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_63(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_62(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_64(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_63(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_65(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_64(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_66(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_65(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_67(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_66(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_68(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_67(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_69(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_68(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_70(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_69(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_71(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_70(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_72(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_71(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_73(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_72(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_74(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_73(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_75(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_74(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_76(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_75(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_77(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_76(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_78(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_77(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_79(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_78(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_80(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_79(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_81(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_80(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_82(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_81(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_83(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_82(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_84(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_83(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_85(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_84(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_86(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_85(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_87(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_86(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_88(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_87(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_89(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_88(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_90(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_89(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_91(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_90(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_92(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_91(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_93(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_92(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_94(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_93(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_95(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_94(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_96(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_95(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_97(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_96(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_98(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_97(WHAT, FN, SIG, __VA_ARGS__))
// #define FE_99(WHAT, FN, SIG, OPNUM_PAIR, ...) WHAT(FN, SIG, OPNUM_PAIR)EVAL(FE_98(WHAT, FN, SIG, __VA_ARGS__))


// #define CL1_1(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)
// #define CL1_2(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_1(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_3(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_2(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_4(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_3(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_5(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_4(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_6(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_5(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_7(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_6(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_8(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_7(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_9(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_8(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_10(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_9(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_11(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_10(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_12(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_11(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_13(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_12(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_14(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_13(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_15(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_14(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_16(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_15(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_17(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_16(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_18(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_17(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_19(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_18(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_20(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_19(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_21(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_20(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_22(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_21(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_23(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_22(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_24(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_23(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_25(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_24(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_26(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_25(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_27(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_26(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_28(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_27(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_29(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_28(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_30(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_29(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_31(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_30(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_32(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_31(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_33(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_32(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_34(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_33(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_35(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_34(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_36(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_35(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_37(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_36(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_38(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_37(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_39(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_38(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_40(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_39(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_41(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_40(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_42(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_41(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_43(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_42(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_44(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_43(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_45(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_44(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_46(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_45(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_47(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_46(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_48(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_47(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_49(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_48(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_50(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_49(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_51(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_50(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_52(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_51(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_53(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_52(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_54(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_53(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_55(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_54(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_56(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_55(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_57(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_56(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_58(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_57(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_59(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_58(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_60(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_59(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_61(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_60(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_62(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_61(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_63(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_62(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_64(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_63(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_65(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_64(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_66(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_65(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_67(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_66(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_68(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_67(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_69(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_68(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_70(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_69(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_71(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_70(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_72(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_71(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_73(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_72(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_74(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_73(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_75(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_74(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_76(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_75(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_77(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_76(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_78(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_77(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_79(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_78(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_80(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_79(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_81(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_80(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_82(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_81(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_83(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_82(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_84(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_83(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_85(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_84(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_86(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_85(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_87(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_86(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_88(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_87(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_89(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_88(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_90(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_89(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_91(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_90(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_92(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_91(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_93(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_92(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_94(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_93(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_95(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_94(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_96(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_95(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_97(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_96(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_98(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_97(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define CL1_99(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(CL1_98(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))


// #define TR_1(WHAT, TYPE, OPNUM_PAIR) WHAT(TYPE, OPNUM_PAIR)
// #define TR_2(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_1(WHAT, TYPE, __VA_ARGS__))
// #define TR_3(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_2(WHAT, TYPE, __VA_ARGS__))
// #define TR_4(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_3(WHAT, TYPE, __VA_ARGS__))
// #define TR_5(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_4(WHAT, TYPE, __VA_ARGS__))
// #define TR_6(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_5(WHAT, TYPE, __VA_ARGS__))
// #define TR_7(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_6(WHAT, TYPE, __VA_ARGS__))
// #define TR_8(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_7(WHAT, TYPE, __VA_ARGS__))
// #define TR_9(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_8(WHAT, TYPE, __VA_ARGS__))
// #define TR_10(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_9(WHAT, TYPE, __VA_ARGS__))
// #define TR_11(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_10(WHAT, TYPE, __VA_ARGS__))
// #define TR_12(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_11(WHAT, TYPE, __VA_ARGS__))
// #define TR_13(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_12(WHAT, TYPE, __VA_ARGS__))
// #define TR_14(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_13(WHAT, TYPE, __VA_ARGS__))
// #define TR_15(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_14(WHAT, TYPE, __VA_ARGS__))
// #define TR_16(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_15(WHAT, TYPE, __VA_ARGS__))
// #define TR_17(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_16(WHAT, TYPE, __VA_ARGS__))
// #define TR_18(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_17(WHAT, TYPE, __VA_ARGS__))
// #define TR_19(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_18(WHAT, TYPE, __VA_ARGS__))
// #define TR_20(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_19(WHAT, TYPE, __VA_ARGS__))
// #define TR_21(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_20(WHAT, TYPE, __VA_ARGS__))
// #define TR_22(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_21(WHAT, TYPE, __VA_ARGS__))
// #define TR_23(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_22(WHAT, TYPE, __VA_ARGS__))
// #define TR_24(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_23(WHAT, TYPE, __VA_ARGS__))
// #define TR_25(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_24(WHAT, TYPE, __VA_ARGS__))
// #define TR_26(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_25(WHAT, TYPE, __VA_ARGS__))
// #define TR_27(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_26(WHAT, TYPE, __VA_ARGS__))
// #define TR_28(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_27(WHAT, TYPE, __VA_ARGS__))
// #define TR_29(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_28(WHAT, TYPE, __VA_ARGS__))
// #define TR_30(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_29(WHAT, TYPE, __VA_ARGS__))
// #define TR_31(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_30(WHAT, TYPE, __VA_ARGS__))
// #define TR_32(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_31(WHAT, TYPE, __VA_ARGS__))
// #define TR_33(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_32(WHAT, TYPE, __VA_ARGS__))
// #define TR_34(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_33(WHAT, TYPE, __VA_ARGS__))
// #define TR_35(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_34(WHAT, TYPE, __VA_ARGS__))
// #define TR_36(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_35(WHAT, TYPE, __VA_ARGS__))
// #define TR_37(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_36(WHAT, TYPE, __VA_ARGS__))
// #define TR_38(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_37(WHAT, TYPE, __VA_ARGS__))
// #define TR_39(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_38(WHAT, TYPE, __VA_ARGS__))
// #define TR_40(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_39(WHAT, TYPE, __VA_ARGS__))
// #define TR_41(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_40(WHAT, TYPE, __VA_ARGS__))
// #define TR_42(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_41(WHAT, TYPE, __VA_ARGS__))
// #define TR_43(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_42(WHAT, TYPE, __VA_ARGS__))
// #define TR_44(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_43(WHAT, TYPE, __VA_ARGS__))
// #define TR_45(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_44(WHAT, TYPE, __VA_ARGS__))
// #define TR_46(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_45(WHAT, TYPE, __VA_ARGS__))
// #define TR_47(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_46(WHAT, TYPE, __VA_ARGS__))
// #define TR_48(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_47(WHAT, TYPE, __VA_ARGS__))
// #define TR_49(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_48(WHAT, TYPE, __VA_ARGS__))
// #define TR_50(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_49(WHAT, TYPE, __VA_ARGS__))
// #define TR_51(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_50(WHAT, TYPE, __VA_ARGS__))
// #define TR_52(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_51(WHAT, TYPE, __VA_ARGS__))
// #define TR_53(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_52(WHAT, TYPE, __VA_ARGS__))
// #define TR_54(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_53(WHAT, TYPE, __VA_ARGS__))
// #define TR_55(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_54(WHAT, TYPE, __VA_ARGS__))
// #define TR_56(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_55(WHAT, TYPE, __VA_ARGS__))
// #define TR_57(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_56(WHAT, TYPE, __VA_ARGS__))
// #define TR_58(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_57(WHAT, TYPE, __VA_ARGS__))
// #define TR_59(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_58(WHAT, TYPE, __VA_ARGS__))
// #define TR_60(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_59(WHAT, TYPE, __VA_ARGS__))
// #define TR_61(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_60(WHAT, TYPE, __VA_ARGS__))
// #define TR_62(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_61(WHAT, TYPE, __VA_ARGS__))
// #define TR_63(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_62(WHAT, TYPE, __VA_ARGS__))
// #define TR_64(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_63(WHAT, TYPE, __VA_ARGS__))
// #define TR_65(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_64(WHAT, TYPE, __VA_ARGS__))
// #define TR_66(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_65(WHAT, TYPE, __VA_ARGS__))
// #define TR_67(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_66(WHAT, TYPE, __VA_ARGS__))
// #define TR_68(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_67(WHAT, TYPE, __VA_ARGS__))
// #define TR_69(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_68(WHAT, TYPE, __VA_ARGS__))
// #define TR_70(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_69(WHAT, TYPE, __VA_ARGS__))
// #define TR_71(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_70(WHAT, TYPE, __VA_ARGS__))
// #define TR_72(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_71(WHAT, TYPE, __VA_ARGS__))
// #define TR_73(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_72(WHAT, TYPE, __VA_ARGS__))
// #define TR_74(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_73(WHAT, TYPE, __VA_ARGS__))
// #define TR_75(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_74(WHAT, TYPE, __VA_ARGS__))
// #define TR_76(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_75(WHAT, TYPE, __VA_ARGS__))
// #define TR_77(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_76(WHAT, TYPE, __VA_ARGS__))
// #define TR_78(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_77(WHAT, TYPE, __VA_ARGS__))
// #define TR_79(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_78(WHAT, TYPE, __VA_ARGS__))
// #define TR_80(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_79(WHAT, TYPE, __VA_ARGS__))
// #define TR_81(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_80(WHAT, TYPE, __VA_ARGS__))
// #define TR_82(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_81(WHAT, TYPE, __VA_ARGS__))
// #define TR_83(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_82(WHAT, TYPE, __VA_ARGS__))
// #define TR_84(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_83(WHAT, TYPE, __VA_ARGS__))
// #define TR_85(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_84(WHAT, TYPE, __VA_ARGS__))
// #define TR_86(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_85(WHAT, TYPE, __VA_ARGS__))
// #define TR_87(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_86(WHAT, TYPE, __VA_ARGS__))
// #define TR_88(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_87(WHAT, TYPE, __VA_ARGS__))
// #define TR_89(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_88(WHAT, TYPE, __VA_ARGS__))
// #define TR_90(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_89(WHAT, TYPE, __VA_ARGS__))
// #define TR_91(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_90(WHAT, TYPE, __VA_ARGS__))
// #define TR_92(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_91(WHAT, TYPE, __VA_ARGS__))
// #define TR_93(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_92(WHAT, TYPE, __VA_ARGS__))
// #define TR_94(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_93(WHAT, TYPE, __VA_ARGS__))
// #define TR_95(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_94(WHAT, TYPE, __VA_ARGS__))
// #define TR_96(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_95(WHAT, TYPE, __VA_ARGS__))
// #define TR_97(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_96(WHAT, TYPE, __VA_ARGS__))
// #define TR_98(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_97(WHAT, TYPE, __VA_ARGS__))
// #define TR_99(WHAT, TYPE, OPNUM_PAIR, ...) WHAT(TYPE, OPNUM_PAIR)EVAL(TR_98(WHAT, TYPE, __VA_ARGS__))

// #define DIR_1(WHAT, PARAMS, OPNUM_PAIR) WHAT(PARAMS, OPNUM_PAIR)
// #define DIR_2(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_1(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_3(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_2(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_4(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_3(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_5(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_4(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_6(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_5(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_7(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_6(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_8(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_7(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_9(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_8(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_10(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_9(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_11(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_10(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_12(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_11(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_13(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_12(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_14(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_13(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_15(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_14(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_16(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_15(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_17(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_16(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_18(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_17(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_19(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_18(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_20(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_19(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_21(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_20(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_22(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_21(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_23(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_22(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_24(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_23(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_25(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_24(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_26(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_25(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_27(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_26(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_28(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_27(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_29(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_28(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_30(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_29(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_31(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_30(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_32(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_31(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_33(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_32(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_34(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_33(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_35(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_34(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_36(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_35(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_37(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_36(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_38(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_37(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_39(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_38(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_40(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_39(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_41(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_40(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_42(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_41(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_43(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_42(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_44(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_43(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_45(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_44(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_46(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_45(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_47(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_46(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_48(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_47(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_49(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_48(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_50(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_49(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_51(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_50(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_52(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_51(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_53(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_52(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_54(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_53(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_55(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_54(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_56(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_55(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_57(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_56(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_58(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_57(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_59(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_58(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_60(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_59(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_61(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_60(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_62(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_61(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_63(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_62(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_64(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_63(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_65(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_64(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_66(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_65(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_67(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_66(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_68(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_67(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_69(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_68(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_70(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_69(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_71(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_70(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_72(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_71(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_73(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_72(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_74(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_73(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_75(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_74(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_76(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_75(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_77(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_76(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_78(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_77(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_79(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_78(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_80(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_79(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_81(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_80(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_82(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_81(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_83(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_82(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_84(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_83(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_85(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_84(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_86(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_85(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_87(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_86(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_88(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_87(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_89(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_88(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_90(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_89(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_91(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_90(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_92(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_91(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_93(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_92(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_94(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_93(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_95(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_94(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_96(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_95(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_97(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_96(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_98(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_97(WHAT, PARAMS, __VA_ARGS__))
// #define DIR_99(WHAT, PARAMS, OPNUM_PAIR, ...) WHAT(PARAMS, OPNUM_PAIR)EVAL(DIR_98(WHAT, PARAMS, __VA_ARGS__))


// #define FZ_1(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)
// #define FZ_2(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_1(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_3(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_2(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_4(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_3(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_5(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_4(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_6(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_5(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_7(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_6(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_8(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_7(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_9(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_8(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_10(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_9(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_11(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_10(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_12(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_11(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_13(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_12(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_14(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_13(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_15(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_14(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_16(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_15(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_17(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_16(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_18(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_17(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_19(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_18(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_20(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_19(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_21(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_20(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_22(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_21(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_23(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_22(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_24(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_23(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_25(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_24(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_26(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_25(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_27(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_26(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_28(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_27(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_29(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_28(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_30(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_29(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_31(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_30(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_32(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_31(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_33(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_32(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_34(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_33(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_35(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_34(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_36(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_35(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_37(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_36(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_38(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_37(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_39(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_38(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_40(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_39(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_41(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_40(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_42(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_41(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_43(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_42(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_44(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_43(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_45(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_44(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_46(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_45(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_47(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_46(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_48(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_47(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_49(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_48(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_50(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_49(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_51(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_50(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_52(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_51(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_53(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_52(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_54(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_53(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_55(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_54(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_56(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_55(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_57(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_56(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_58(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_57(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_59(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_58(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_60(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_59(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_61(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_60(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_62(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_61(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_63(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_62(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_64(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_63(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_65(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_64(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_66(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_65(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_67(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_66(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_68(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_67(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_69(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_68(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_70(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_69(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_71(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_70(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_72(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_71(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_73(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_72(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_74(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_73(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_75(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_74(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_76(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_75(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_77(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_76(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_78(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_77(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_79(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_78(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_80(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_79(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_81(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_80(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_82(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_81(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_83(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_82(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_84(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_83(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_85(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_84(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_86(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_85(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_87(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_86(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_88(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_87(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_89(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_88(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_90(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_89(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_91(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_90(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_92(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_91(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_93(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_92(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_94(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_93(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_95(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_94(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_96(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_95(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_97(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_96(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_98(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_97(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define FZ_99(WHAT, NAME, TYPE, SIGNATURE, OPNUM_PAIR, ...) WHAT(NAME, TYPE, SIGNATURE, OPNUM_PAIR)EVAL(FZ_98(WHAT, NAME, TYPE, SIGNATURE, __VA_ARGS__))

// #define FF_1(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)
// #define FF_2(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_1(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_3(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_2(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_4(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_3(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_5(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_4(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_6(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_5(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_7(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_6(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_8(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_7(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_9(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_8(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_10(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_9(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_11(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_10(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_12(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_11(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_13(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_12(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_14(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_13(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_15(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_14(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_16(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_15(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_17(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_16(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_18(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_17(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_19(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_18(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FF_20(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FF_19(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))


// #define FFI_1(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)
// #define FFI_2(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_1(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_3(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_2(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_4(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_3(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_5(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_4(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_6(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_5(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_7(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_6(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_8(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_7(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_9(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_8(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_10(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_9(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_11(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_10(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_12(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_11(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_13(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_12(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_14(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_13(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_15(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_14(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_16(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_15(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_17(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_16(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_18(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_17(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_19(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_18(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FFI_20(WHAT, TYPE, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(TYPE, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FFI_19(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))

// #define FM_1(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)
// #define FM_2(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_1(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_3(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_2(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_4(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_3(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_5(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_4(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_6(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_5(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_7(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_6(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_8(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_7(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_9(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_8(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_10(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_9(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_11(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_10(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_12(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_11(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_13(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_12(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_14(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_13(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_15(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_14(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_16(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_15(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_17(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_16(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR,__VA_ARGS__))
// #define FM_18(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_17(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_19(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_18(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_20(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_19(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_21(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_20(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_22(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_21(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_23(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_22(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_24(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_23(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_25(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_24(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_26(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_25(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_27(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_26(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_28(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_27(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_29(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_28(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_30(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_29(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_31(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_30(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_32(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_31(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_33(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_32(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_34(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_33(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_35(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_34(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_36(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_35(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_37(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_36(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_38(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_37(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_39(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_38(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_40(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_39(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_41(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_40(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_42(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_41(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_43(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_42(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_44(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_43(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_45(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_44(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_46(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_45(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_47(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_46(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_48(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_47(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_49(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_48(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_50(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_49(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_51(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_50(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_52(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_51(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_53(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_52(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_54(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_53(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_55(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_54(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_56(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_55(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_57(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_56(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_58(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_57(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_59(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_58(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_60(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_59(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_61(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_60(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_62(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_61(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_63(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_62(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_64(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_63(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_65(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_64(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_66(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_65(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_67(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_66(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_68(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_67(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_69(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_68(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FM_70(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FM_69(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))


// #define FX_1(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)
// #define FX_2(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_1(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_3(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_2(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_4(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_3(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_5(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_4(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_6(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_5(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_7(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_6(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_8(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_7(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_9(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_8(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_10(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_9(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_11(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_10(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_12(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_11(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_13(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_12(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_14(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_13(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_15(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_14(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_16(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_15(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_17(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_16(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_18(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_17(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_19(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_18(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_20(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_19(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_21(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_20(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_22(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_21(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_23(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_22(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_24(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_23(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_25(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_24(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_26(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_25(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_27(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_26(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_28(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_27(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_29(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_28(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_30(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_29(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_31(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_30(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_32(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_31(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_33(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_32(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_34(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_33(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_35(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_34(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_36(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_35(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_37(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_36(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_38(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_37(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_39(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_38(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_40(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_39(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_41(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_40(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_42(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_41(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_43(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_42(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_44(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_43(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_45(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_44(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_46(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_45(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_47(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_46(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_48(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_47(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_49(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_48(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_50(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_49(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_51(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_50(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_52(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_51(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_53(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_52(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_54(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_53(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_55(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_54(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_56(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_55(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_57(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_56(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_58(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_57(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_59(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_58(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_60(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_59(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_61(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_60(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_62(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_61(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_63(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_62(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_64(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_63(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_65(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_64(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_66(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_65(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_67(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_66(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_68(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_67(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_69(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_68(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FX_70(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, SIG, OPCLASS, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FX_69(WHAT, FN, SIG, OPCLASS, OPNUM_PAIR, __VA_ARGS__))


// #define FK_1(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)
// #define FK_2(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_1(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_3(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_2(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_4(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_3(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_5(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_4(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_6(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_5(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_7(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_6(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_8(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_7(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_9(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_8(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_10(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_9(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_11(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_10(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_12(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_11(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_13(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_12(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_14(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_13(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_15(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_14(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_16(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_15(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_17(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_16(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_18(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_17(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_19(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_18(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_20(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_19(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_21(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_20(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_22(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_21(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_23(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_22(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_24(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_23(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_25(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_24(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_26(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_25(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_27(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_26(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_28(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_27(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_29(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_28(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_30(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_29(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_31(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_30(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_32(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_31(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_33(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_32(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_34(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_33(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_35(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_34(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_36(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_35(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_37(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_36(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_38(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_37(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_39(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_38(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_40(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_39(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_41(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_40(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_42(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_41(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_43(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_42(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_44(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_43(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_45(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_44(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_46(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_45(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_47(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_46(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_48(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_47(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_49(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_48(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_50(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_49(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_51(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_50(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_52(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_51(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_53(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_52(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_54(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_53(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_55(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_54(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_56(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_55(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_57(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_56(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_58(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_57(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_59(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_58(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_60(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_59(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_61(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_60(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_62(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_61(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_63(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_62(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_64(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_63(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_65(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_64(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_66(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_65(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_67(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_66(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_68(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_67(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_69(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_68(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FK_70(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FK_69(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))



// #define FI_1(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)
// #define FI_2(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_1(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_3(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_2(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_4(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_3(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_5(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_4(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_6(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_5(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_7(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_6(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_8(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_7(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_9(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_8(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_10(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_9(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_11(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_10(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_12(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_11(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_13(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_12(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_14(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_13(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_15(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_14(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_16(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_15(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_17(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_16(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_18(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_17(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_19(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_18(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_20(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_19(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_21(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_20(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_22(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_21(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_23(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_22(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_24(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_23(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_25(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_24(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_26(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_25(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_27(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_26(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_28(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_27(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_29(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_28(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_30(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_29(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_31(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_30(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_32(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_31(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_33(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_32(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_34(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_33(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_35(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_34(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_36(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_35(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_37(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_36(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_38(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_37(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_39(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_38(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_40(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_39(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_41(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_40(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_42(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_41(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_43(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_42(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_44(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_43(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_45(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_44(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_46(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_45(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_47(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_46(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_48(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_47(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_49(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_48(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_50(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_49(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_51(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_50(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_52(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_51(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_53(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_52(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_54(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_53(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_55(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_54(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_56(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_55(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_57(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_56(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_58(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_57(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_59(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_58(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_60(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_59(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_61(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_60(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_62(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_61(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_63(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_62(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_64(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_63(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_65(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_64(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_66(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_65(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_67(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_66(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_68(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_67(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_69(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_68(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FI_70(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B, ...) WHAT(FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, OPNUM_PAIR_B)EVAL(FI_69(WHAT, FN, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))


// #define FS_1(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)
// #define FS_2(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_1(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_3(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_2(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_4(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_3(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_5(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_4(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_6(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_5(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_7(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_6(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_8(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_7(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_9(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_8(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_10(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_9(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_11(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_10(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_12(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_11(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_13(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_12(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_14(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_13(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_15(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_14(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_16(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_15(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_17(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_16(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_18(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_17(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_19(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_18(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_20(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_19(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_21(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_20(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_22(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_21(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_23(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_22(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_24(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_23(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_25(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_24(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_26(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_25(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_27(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_26(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_28(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_27(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_29(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_28(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_30(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_29(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_31(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_30(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_32(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_31(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_33(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_32(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_34(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_33(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_35(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_34(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_36(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_35(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_37(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_36(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_38(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_37(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_39(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_38(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_40(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_39(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_41(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_40(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_42(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_41(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_43(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_42(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_44(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_43(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_45(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_44(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_46(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_45(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_47(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_46(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_48(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_47(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_49(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_48(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_50(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_49(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_51(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_50(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_52(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_51(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_53(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_52(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_54(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_53(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_55(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_54(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_56(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_55(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_57(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_56(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_58(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_57(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_59(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_58(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_60(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_59(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_61(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_60(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_62(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_61(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_63(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_62(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_64(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_63(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_65(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_64(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_66(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_65(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_67(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_66(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_68(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_67(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_69(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_68(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_70(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_69(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_71(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_70(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_72(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_71(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_73(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_72(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_74(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_73(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_75(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_74(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_76(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_75(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_77(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_76(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_78(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_77(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_79(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_78(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_80(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_79(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_81(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_80(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_82(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_81(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_83(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_82(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_84(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_83(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_85(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_84(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_86(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_85(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_87(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_86(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_88(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_87(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_89(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_88(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_90(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_89(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_91(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_90(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_92(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_91(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_93(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_92(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_94(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_93(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_95(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_94(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_96(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_95(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_97(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_96(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_98(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_97(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FS_99(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR, ...) WHAT(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR)EVAL(FS_98(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))

//////////////////////////////
// #ifdef __clang__
// #define _EXPAND_META_CALL(FN, SIG, OPCLASS, NUM_A, TYPE_A, NUM_B, TYPE_B) if(opNumA == NUM_A && opNumB == NUM_B){ MIX4(FN,NUM_A,NUM_B,OPCLASS) LAUNCH(256, 512, 1024, *stream) SIG; };
// #elif _MSC_VER
// #define _EXPAND_META_CALL(FN, SIG, OPCLASS, NUM_A, TYPE_A, NUM_B, TYPE_B) if(opNumA == NUM_A && opNumB == NUM_B){ MIX4(FN,NUM_A,NUM_B,OPCLASS) LAUNCH(256, 512, 1024, *stream) SIG; };
// #elif __GNUC__
// #define _EXPAND_META_CALL(FN, SIG, OPCLASS, NUM_A, TYPE_A, NUM_B, TYPE_B) else if(opNumA == NUM_A && opNumB == NUM_B){ MIX4(FN,NUM_A,NUM_B,OPCLASS) LAUNCH(256, 512, 2048, *stream) SIG; }
// #elif __CUDACC__
// #endif
// #define _EXPAND_OP_SIMPLE(NAME, TYPE, PARAMZ, NUM_A, TYPE_A) case NUM_A: {MIX3(NAME, NUM_A, TYPE) LAUNCH(launchDims.x, launchDims.y, launchDims.z, *stream) PARAMZ;} break;

// #define _EXPAND_OP_CALL_1(NAME, TYPE, PARAMZ, NUM_A, TYPE_A) NAME>PARAMZ;
// #define _EXPAND_OP_DIRECT(PARAMZ, NUM_A, TYPE_A)  case NUM_A: { z = TYPE_A::op PARAMZ; break; }
// #define _EXPAND_OP_CALL_T(TYPE, NUM_A, TYPE_A) OpTracker::getInstance().storeOperation(TYPE, #TYPE_A, NUM_A);

// #define _EXPAND_FACTORY_CALL(TYPE, LAYER_ID, LAYER_NAME, ACTIVATION_ID, ACTIVATION_NAME) if (activationNum == ACTIVATION_ID && layerNum == LAYER_ID) { return new LAYER_NAME>(); };

// #define _EXPAND_PACKED_CALL_1(NAME, TYPE, PARAMZ, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _OP_CALL_1(NAME, TYPE, PARAMZ, UNPAREN(OPNUM_PAIR)))
// #define _EXPAND_PACKED_DIRECT(PARAMZ, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _OP_DIRECT(PARAMZ, UNPAREN(OPNUM_PAIR)))
// #define _EXPAND_PACKED_CALL_T(TYPE, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _OP_CALL_T(TYPE, UNPAREN(OPNUM_PAIR)))

// #define _EXPAND_KERNEL_CALL(NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, NUM_A, TYPE_A, NUM_B, TYPE_B) extern "C" __global__ void ARGMIX4(NAME, NUM_A, NUM_B, TYPE)INPUTZ {KERNEL, TYPE_B>>PARAMZ ;};
// #define _EXPAND_KERNEL_SIMPLE(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, NUM_A, TYPE_A) extern "C" __global__ void ARGMIX3(NAME, NUM_A, TYPE)INPUTZ {KERNEL>PARAMZ ;};
// #define _EXPAND_PACKED_SIMPLE(NAME, TYPE, PARAMZ, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _OP_SIMPLE(NAME, TYPE, PARAMZ, UNPAREN(OPNUM_PAIR)))
// #define _EXPAND_PACKED_KERNEL_SIMPLE(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, OPNUM_PAIR) EVALUATING_PASTE(_EXPAND, _KERNEL_SIMPLE (NAME, KERNEL, TYPE, INPUTZ, PARAMZ, UNPAREN(OPNUM_PAIR)))
// #define _EXPAND_PACKED_KERNEL_CALL(NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR_A, OPNUM_PAIR_B) EVALUATING_PASTE(_EXPAND, _KERNEL_CALL (NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, UNPAREN(OPNUM_PAIR_B), UNPAREN(OPNUM_PAIR_A)))
// #define _EXPAND_PACKED_META_CALL(FN, SIG, OPCLASS, OPNUM_PAIR_A, OPNUM_PAIR_B) EVALUATING_PASTE(_EXPAND, _META_CALL (FN, SIG, OPCLASS, UNPAREN(OPNUM_PAIR_B), UNPAREN(OPNUM_PAIR_A) ))

// #define _EXPAND_PACKED_FACTORY_CALL(TYPE, OPNUM_PAIR_A, OPNUM_PAIR_B) EVALUATING_PASTE(_EXPAND, _FACTORY_CALL (TYPE, UNPAREN(OPNUM_PAIR_B), UNPAREN(OPNUM_PAIR_A)))
//////////////////////////////

// #define GET_MACROS_1(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79, _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95, _96, _97, _98, _99, NAME,...) NAME
// #define GET_MACROS_D(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79, _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95, _96, _97, _98, _99, NAME,...) NAME
// #define GET_MACROS_T(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79, _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95, _96, _97, _98, _99, NAME,...) NAME

// #define  GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79, _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95, _96, _97, _98, _99, NAME,...) NAME
// #define GET_MACROS(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79, _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95, _96, _97, _98, _99, NAME,...) NAME
// #define GET_MACROX(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, NAME,...) NAME
// #define GET_MACROE(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, NAME,...) NAME
// #define GET_MACROK(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, NAME,...) NAME
// #define GET_MACROI(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, NAME,...) NAME
// #define GET_MACROF(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, NAME,...) NAME
// #define FOR_EACH(WHAT, NAME, SIGNATURE, ...) EVAL(GET_MACRO(__VA_ARGS__,  FE_99, FE_98, FE_97, FE_96, FE_95, FE_94, FE_93, FE_92, FE_91, FE_90, FE_89, FE_88, FE_87, FE_86, FE_85, FE_84, FE_83, FE_82, FE_81, FE_80, FE_79, FE_78, FE_77, FE_76, FE_75, FE_74, FE_73, FE_72, FE_71, FE_70, FE_69, FE_68, FE_67, FE_66, FE_65, FE_64, FE_63, FE_62, FE_61, FE_60, FE_59, FE_58, FE_57, FE_56, FE_55, FE_54, FE_53, FE_52, FE_51, FE_50, FE_49, FE_48, FE_47, FE_46, FE_45, FE_44, FE_43, FE_42, FE_41, FE_40, FE_39, FE_38, FE_37, FE_36, FE_35, FE_34, FE_33, FE_32, FE_31, FE_30, FE_29, FE_28, FE_27, FE_26, FE_25, FE_24, FE_23, FE_22, FE_21, FE_20, FE_19, FE_18, FE_17, FE_16, FE_15, FE_14, FE_13, FE_12, FE_11, FE_10, FE_9, FE_8, FE_7, FE_6, FE_5, FE_4, FE_3, FE_2, FE_1)(WHAT, NAME, SIGNATURE, __VA_ARGS__))

// #define FOR_EACH_M(WHAT, NAME, SIGNATURE, OPCLASS, OPNUM_PAIR, ...) EXPAND(GET_MACROX(__VA_ARGS__, FM_70, FM_69, FM_68, FM_67, FM_66, FM_65, FM_64, FM_63, FM_62, FM_61, FM_60, FM_59, FM_58, FM_57, FM_56, FM_55, FM_54, FM_53, FM_52, FM_51, FM_50, FM_49, FM_48, FM_47, FM_46, FM_45, FM_44, FM_43, FM_42, FM_41, FM_40, FM_39, FM_38, FM_37, FM_36, FM_35, FM_34, FM_33, FM_32, FM_31, FM_30, FM_29, FM_28, FM_27, FM_26, FM_25, FM_24, FM_23, FM_22, FM_21, FM_20, FM_19, FM_18, FM_17, FM_16, FM_15, FM_14, FM_13, FM_12, FM_11, FM_10, FM_9, FM_8, FM_7, FM_6, FM_5, FM_4, FM_3, FM_2, FM_1)(WHAT, NAME, SIGNATURE, OPCLASS, OPNUM_PAIR, __VA_ARGS__))
// #define FOR_EACH_X(WHAT, NAME, SIGNATURE, OPCLASS, OPNUM_PAIR, ...) EXPAND(GET_MACROE(__VA_ARGS__, FX_70, FX_69, FX_68, FX_67, FX_66, FX_65, FX_64, FX_63, FX_62, FX_61, FX_60, FX_59, FX_58, FX_57, FX_56, FX_55, FX_54, FX_53, FX_52, FX_51, FX_50, FX_49, FX_48, FX_47, FX_46, FX_45, FX_44, FX_43, FX_42, FX_41, FX_40, FX_39, FX_38, FX_37, FX_36, FX_35, FX_34, FX_33, FX_32, FX_31, FX_30, FX_29, FX_28, FX_27, FX_26, FX_25, FX_24, FX_23, FX_22, FX_21, FX_20, FX_19, FX_18, FX_17, FX_16, FX_15, FX_14, FX_13, FX_12, FX_11, FX_10, FX_9, FX_8, FX_7, FX_6, FX_5, FX_4, FX_3, FX_2, FX_1)(WHAT, NAME, SIGNATURE, OPCLASS, OPNUM_PAIR, __VA_ARGS__))

// #define FOR_EACH_CALL_1(WHAT, NAME, TYPE, PARAMZ, ...) EXPAND(GET_MACROS_1(__VA_ARGS__,  CL1_99, CL1_98, CL1_97, CL1_96, CL1_95, CL1_94, CL1_93, CL1_92, CL1_91, CL1_90, CL1_89, CL1_88, CL1_87, CL1_86, CL1_85, CL1_84, CL1_83, CL1_82, CL1_81, CL1_80, CL1_79, CL1_78, CL1_77, CL1_76, CL1_75, CL1_74, CL1_73, CL1_72, CL1_71, CL1_70, CL1_69, CL1_68, CL1_67, CL1_66, CL1_65, CL1_64, CL1_63, CL1_62, CL1_61, CL1_60, CL1_59, CL1_58, CL1_57, CL1_56, CL1_55, CL1_54, CL1_53, CL1_52, CL1_51, CL1_50, CL1_49, CL1_48, CL1_47, CL1_46, CL1_45, CL1_44, CL1_43, CL1_42, CL1_41, CL1_40, CL1_39, CL1_38, CL1_37, CL1_36, CL1_35, CL1_34, CL1_33, CL1_32, CL1_31, CL1_30, CL1_29, CL1_28, CL1_27, CL1_26, CL1_25, CL1_24, CL1_23, CL1_22, CL1_21, CL1_20, CL1_19, CL1_18, CL1_17, CL1_16, CL1_15, CL1_14, CL1_13, CL1_12, CL1_11, CL1_10, CL1_9, CL1_8, CL1_7, CL1_6, CL1_5, CL1_4, CL1_3, CL1_2, CL1_1)(WHAT, NAME, TYPE, PARAMZ, __VA_ARGS__))
// #define FOR_EACH_DIRECT(WHAT, PARAMZ, ...) EXPAND(GET_MACROS_D(__VA_ARGS__,  DIR_99, DIR_98, DIR_97, DIR_96, DIR_95, DIR_94, DIR_93, DIR_92, DIR_91, DIR_90, DIR_89, DIR_88, DIR_87, DIR_86, DIR_85, DIR_84, DIR_83, DIR_82, DIR_81, DIR_80, DIR_79, DIR_78, DIR_77, DIR_76, DIR_75, DIR_74, DIR_73, DIR_72, DIR_71, DIR_70, DIR_69, DIR_68, DIR_67, DIR_66, DIR_65, DIR_64, DIR_63, DIR_62, DIR_61, DIR_60, DIR_59, DIR_58, DIR_57, DIR_56, DIR_55, DIR_54, DIR_53, DIR_52, DIR_51, DIR_50, DIR_49, DIR_48, DIR_47, DIR_46, DIR_45, DIR_44, DIR_43, DIR_42, DIR_41, DIR_40, DIR_39, DIR_38, DIR_37, DIR_36, DIR_35, DIR_34, DIR_33, DIR_32, DIR_31, DIR_30, DIR_29, DIR_28, DIR_27, DIR_26, DIR_25, DIR_24, DIR_23, DIR_22, DIR_21, DIR_20, DIR_19, DIR_18, DIR_17, DIR_16, DIR_15, DIR_14, DIR_13, DIR_12, DIR_11, DIR_10, DIR_9, DIR_8, DIR_7, DIR_6, DIR_5, DIR_4, DIR_3, DIR_2, DIR_1)(WHAT, PARAMZ, __VA_ARGS__))
// #define FOR_EACH_TRACKER(WHAT, TYPE, ...) EXPAND(GET_MACROS_T(__VA_ARGS__,  TR_99, TR_98, TR_97, TR_96, TR_95, TR_94, TR_93, TR_92, TR_91, TR_90, TR_89, TR_88, TR_87, TR_86, TR_85, TR_84, TR_83, TR_82, TR_81, TR_80, TR_79, TR_78, TR_77, TR_76, TR_75, TR_74, TR_73, TR_72, TR_71, TR_70, TR_69, TR_68, TR_67, TR_66, TR_65, TR_64, TR_63, TR_62, TR_61, TR_60, TR_59, TR_58, TR_57, TR_56, TR_55, TR_54, TR_53, TR_52, TR_51, TR_50, TR_49, TR_48, TR_47, TR_46, TR_45, TR_44, TR_43, TR_42, TR_41, TR_40, TR_39, TR_38, TR_37, TR_36, TR_35, TR_34, TR_33, TR_32, TR_31, TR_30, TR_29, TR_28, TR_27, TR_26, TR_25, TR_24, TR_23, TR_22, TR_21, TR_20, TR_19, TR_18, TR_17, TR_16, TR_15, TR_14, TR_13, TR_12, TR_11, TR_10, TR_9, TR_8, TR_7, TR_6, TR_5, TR_4, TR_3, TR_2, TR_1)(WHAT, TYPE, __VA_ARGS__))

// #define FOR_EACH_Z(WHAT, NAME, TYPE, PARAMZ, ...) EXPAND(GET_MACROS(__VA_ARGS__,  FZ_99, FZ_98, FZ_97, FZ_96, FZ_95, FZ_94, FZ_93, FZ_92, FZ_91, FZ_90, FZ_89, FZ_88, FZ_87, FZ_86, FZ_85, FZ_84, FZ_83, FZ_82, FZ_81, FZ_80, FZ_79, FZ_78, FZ_77, FZ_76, FZ_75, FZ_74, FZ_73, FZ_72, FZ_71, FZ_70, FZ_69, FZ_68, FZ_67, FZ_66, FZ_65, FZ_64, FZ_63, FZ_62, FZ_61, FZ_60, FZ_59, FZ_58, FZ_57, FZ_56, FZ_55, FZ_54, FZ_53, FZ_52, FZ_51, FZ_50, FZ_49, FZ_48, FZ_47, FZ_46, FZ_45, FZ_44, FZ_43, FZ_42, FZ_41, FZ_40, FZ_39, FZ_38, FZ_37, FZ_36, FZ_35, FZ_34, FZ_33, FZ_32, FZ_31, FZ_30, FZ_29, FZ_28, FZ_27, FZ_26, FZ_25, FZ_24, FZ_23, FZ_22, FZ_21, FZ_20, FZ_19, FZ_18, FZ_17, FZ_16, FZ_15, FZ_14, FZ_13, FZ_12, FZ_11, FZ_10, FZ_9, FZ_8, FZ_7, FZ_6, FZ_5, FZ_4, FZ_3, FZ_2, FZ_1)(WHAT, NAME, TYPE, PARAMZ, __VA_ARGS__))
// #define FOR_EACH_S(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, ...) EXPAND(GET_MACROS(__VA_ARGS__,  FS_99, FS_98, FS_97, FS_96, FS_95, FS_94, FS_93, FS_92, FS_91, FS_90, FS_89, FS_88, FS_87, FS_86, FS_85, FS_84, FS_83, FS_82, FS_81, FS_80, FS_79, FS_78, FS_77, FS_76, FS_75, FS_74, FS_73, FS_72, FS_71, FS_70, FS_69, FS_68, FS_67, FS_66, FS_65, FS_64, FS_63, FS_62, FS_61, FS_60, FS_59, FS_58, FS_57, FS_56, FS_55, FS_54, FS_53, FS_52, FS_51, FS_50, FS_49, FS_48, FS_47, FS_46, FS_45, FS_44, FS_43, FS_42, FS_41, FS_40, FS_39, FS_38, FS_37, FS_36, FS_35, FS_34, FS_33, FS_32, FS_31, FS_30, FS_29, FS_28, FS_27, FS_26, FS_25, FS_24, FS_23, FS_22, FS_21, FS_20, FS_19, FS_18, FS_17, FS_16, FS_15, FS_14, FS_13, FS_12, FS_11, FS_10, FS_9, FS_8, FS_7, FS_6, FS_5, FS_4, FS_3, FS_2, FS_1)(WHAT, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__))
// #define FOR_EACH_I(WHAT, NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, ...) EXPAND(GET_MACROI(__VA_ARGS__, FI_70, FI_69, FI_68, FI_67, FI_66, FI_65, FI_64, FI_63, FI_62, FI_61, FI_60, FI_59, FI_58, FI_57, FI_56, FI_55, FI_54, FI_53, FI_52, FI_51, FI_50, FI_49, FI_48, FI_47, FI_46, FI_45, FI_44, FI_43, FI_42, FI_41, FI_40, FI_39, FI_38, FI_37, FI_36, FI_35, FI_34, FI_33, FI_32, FI_31, FI_30, FI_29, FI_28, FI_27, FI_26, FI_25, FI_24, FI_23, FI_22, FI_21, FI_20, FI_19, FI_18, FI_17, FI_16, FI_15, FI_14, FI_13, FI_12, FI_11, FI_10, FI_9, FI_8, FI_7, FI_6, FI_5, FI_4, FI_3, FI_2, FI_1)(WHAT, NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))
// #define FOR_EACH_K(WHAT, NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, ...) EXPAND(GET_MACROK(__VA_ARGS__, FK_70, FK_69, FK_68, FK_67, FK_66, FK_65, FK_64, FK_63, FK_62, FK_61, FK_60, FK_59, FK_58, FK_57, FK_56, FK_55, FK_54, FK_53, FK_52, FK_51, FK_50, FK_49, FK_48, FK_47, FK_46, FK_45, FK_44, FK_43, FK_42, FK_41, FK_40, FK_39, FK_38, FK_37, FK_36, FK_35, FK_34, FK_33, FK_32, FK_31, FK_30, FK_29, FK_28, FK_27, FK_26, FK_25, FK_24, FK_23, FK_22, FK_21, FK_20, FK_19, FK_18, FK_17, FK_16, FK_15, FK_14, FK_13, FK_12, FK_11, FK_10, FK_9, FK_8, FK_7, FK_6, FK_5, FK_4, FK_3, FK_2, FK_1)(WHAT, NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR, __VA_ARGS__))

// #define FOR_EACH_F(WHAT, TYPE, OPNUM_PAIR, ...) EXPAND(GET_MACROF(__VA_ARGS__, FF_20, FF_19, FF_18, FF_17, FF_16, FF_15, FF_14, FF_13, FF_12, FF_11, FF_10, FF_9, FF_8, FF_7, FF_6, FF_5, FF_4, FF_3, FF_2, FF_1)(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))
// #define FOR_EACH_FI(WHAT, TYPE, OPNUM_PAIR, ...) EXPAND(GET_MACROF(__VA_ARGS__, FFI_20, FFI_19, FFI_18, FFI_17, FFI_16, FFI_15, FFI_14, FFI_13, FFI_12, FFI_11, FFI_10, FFI_9, FFI_8, FFI_7, FFI_6, FFI_5, FFI_4, FFI_3, FFI_2, FFI_1)(WHAT, TYPE, OPNUM_PAIR, __VA_ARGS__))


// #define _EXEC_OPS(WHAT, NAME, SIGNATURE, ...) EVAL(FOR_EACH(WHAT, NAME, SIGNATURE, __VA_ARGS__))
// #define _EXEC_OP_SIMPLE(NAME, TYPE, SIGNATURE, ...) EVAL(FOR_EACH_Z(THETA, NAME, TYPE, SIGNATURE, __VA_ARGS__))

// #define _EXEC_BUILDER_1(NAME, TYPE, SIGNATURE, ...) EVAL(FOR_EACH_CALL_1(CALL_1, NAME, TYPE, SIGNATURE, __VA_ARGS__))
// #define _EXEC_OP_DIRECT(SIGNATURE, ...) EVAL(FOR_EACH_DIRECT(DIRECT, SIGNATURE, __VA_ARGS__))
// #define _EXEC_TRACKER(TYPE, ...) EVAL(FOR_EACH_TRACKER(CALL_T, TYPE, __VA_ARGS__))

// #define _EXEC_META_X(WHAT, NAME, SIGNATURE, OPCLASS, OPNUM_PAIR_A, ...) EVAL(FOR_EACH_X(WHAT, NAME, SIGNATURE, OPCLASS, OPNUM_PAIR_A, __VA_ARGS__))
// #define _EXEC_META_M(WHAT, NAME, SIGNATURE, OPCLASS, LIST_A, ...) EVAL(FOR_EACH_M(ALPHA, NAME, SIGNATURE, OPCLASS, OPS_A(LIST_A), __VA_ARGS__))
// #define _EXEC_FACTORY(WHAT, TYPE, LIST_A, ...) EVAL(FOR_EACH_F(PHI, TYPE, OPS_A(LIST_A), __VA_ARGS__))
// #define _EXEC_FACTORY_INTERNAL(WHAT, TYPE, ACTIVATION, ...) EVAL(FOR_EACH_FI(WHAT, TYPE, ACTIVATION, __VA_ARGS__))

// #define _EXEC_KERNEL_F(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, ...) EVAL(FOR_EACH_S(GAMMA, NAME, KERNEL, TYPE, INPUTZ, PARAMZ, __VA_ARGS__ ))
// #define _EXEC_KERNEL_M(NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, LIST_A, ...) EVAL(FOR_EACH_K(BETA, NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPS_A(LIST_A), __VA_ARGS__ ))
// #define _EXEC_KERNEL_X(WHAT, NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ,  OPNUM_PAIR_A, ...) EVAL(FOR_EACH_I(WHAT, NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR_A, __VA_ARGS__))
//_EXPAND_PACKED_OP_CALL
// #define DISPATCH_BY_OPNUM_T(NAME, SIGNATURE, ...) switch(opNum) { EVAL(_EXEC_OPS(_EXPAND_PACKED_OP_CALL, NAME, (SIGNATURE), __VA_ARGS__)) default: { printf("[ERROR] Unknown opNum=%d on %s:%d", opNum, __FILE__, __LINE__); }}
// #define DISPATCH_BY_OPNUM_TT(NAME, SIGNATURE, ...) switch(opNum) { EVAL(_EXEC_OPS(_EXPAND_PACKED_OP_CALL_TT, NAME, (SIGNATURE), __VA_ARGS__)) default: { printf("[ERROR] Unknown opNum=%d on %s:%d", opNum, __FILE__, __LINE__); }}
// #define DISPATCH_BY_OPNUM_TTT(NAME, SIGNATURE, ...) switch(opNum) { EVAL(_EXEC_OPS(_EXPAND_PACKED_OP_CALL_TTT, NAME, (SIGNATURE), __VA_ARGS__)) default: { printf("[ERROR] Unknown opNum=%d on %s:%d", opNum, __FILE__, __LINE__); }}

// #ifdef __clang__
// #define DISPATCH_METAOP(NAME, SIGNATURE, OPCLASS, LIST_A, LIST_B) EVAL(_EXEC_META_M(RANDOMWHAT, NAME, (SIGNATURE), OPCLASS, (LIST_A), LIST_B))
// #elif _MSC_VER
// #define DISPATCH_METAOP(NAME, SIGNATURE, OPCLASS, LIST_A, LIST_B) EVAL(_EXEC_META_M(RANDOMWHAT, NAME, (SIGNATURE), OPCLASS, (LIST_A), LIST_B))
// #elif __GNUC__
// #define DISPATCH_METAOP(NAME, SIGNATURE, OPCLASS, LIST_A, LIST_B) if(false){} EVAL(_EXEC_META_M(RANDOMWHAT, NAME, (SIGNATURE), OPCLASS, (LIST_A), LIST_B)) else{ printf("[ERROR] Unknown opNum=%d on %s:%d", opNumA, __FILE__, __LINE__); }
// #elif __CUDACC__
// #endif

// #define BUILD_LAYERS_FACTORY(TYPE, LIST_A, LIST_B) EVAL(_EXEC_FACTORY(RANDOMWHAT, TYPE, (LIST_A), LIST_B))

// #define DISPATCH_SIMPLE(NAME, TYPE, SIGNATURE, LIST_A) switch(opNum) { EVAL(_EXEC_OP_SIMPLE(NAME, TYPE, (SIGNATURE), LIST_A)) default: { printf("[ERROR] Unknown opNum=%d on %s:%d", opNum, __FILE__, __LINE__); }}

// #define DISPATCH_KERNEL_SIMPLE(NAME, KERNEL, TYPE, INPUTZ, PARAMZ, LIST_A) EVAL(_EXEC_KERNEL_F(NAME, KERNEL, TYPE, (INPUTZ), (PARAMZ), LIST_A))
// #define DISPATCH_KERNEL_META(NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, LIST_A, LIST_B ) EVAL(_EXEC_KERNEL_M(NAME, KERNEL, TYPE, OPCLASS, (INPUTZ), (PARAMZ), (LIST_A), LIST_B))
// #define DISPATCH_INTERNAL(NAME, SIGNATURE, OPCLASS, OPNUM_PAIR_B, ...) EXPAND(_EXEC_META_X(_EXPAND_PACKED_META_CALL, NAME, SIGNATURE, OPCLASS, OPNUM_PAIR_B, __VA_ARGS__))

// #define DISPATCH_FACTORY(TYPE, ACTIVATION, ...) EXPAND(_EXEC_FACTORY_INTERNAL(_EXPAND_PACKED_FACTORY_CALL, TYPE, ACTIVATION, __VA_ARGS__))

// #define DISPATCH_KERNEL_INTERNAL(NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR_B, ...) EXPAND(_EXEC_KERNEL_X(_EXPAND_PACKED_KERNEL_CALL, NAME, KERNEL, TYPE, OPCLASS, INPUTZ, PARAMZ, OPNUM_PAIR_B, __VA_ARGS__))

// #define BUILD_CALL_1(NAME, TYPE, SIGNATURE, OPS) EVAL(_EXEC_BUILDER_1(NAME, TYPE, SIGNATURE, OPS))
// #define BUILD_TRACKER(TYPE, OPS) EVAL(_EXEC_TRACKER(TYPE, OPS))

// #define EXECUTE_NOE(SIGNATURE, LIST_A) switch(opNum) {EVAL(_EXEC_OP_DIRECT(SIGNATURE, LIST_A)) default: { printf("[ERROR] Unknown opNum=%d on %s:%d", opNum, __FILE__, __LINE__); }};

// #define RETURNING_DISPATCH_BY_OPNUM_T(NAME, SIGNATURE, ...) if(false){} EVAL(_EXEC_OPS(_EXPAND_RETURNING_PACKED_OP_CALL, NAME, (SIGNATURE), __VA_ARGS__)) else{ printf("[ERROR] Unknown opNum=%d on %s:%d", opNum, __FILE__, __LINE__); return 0; }
// #define RETURNING_DISPATCH_BY_OPNUM_TT(NAME, SIGNATURE, ...) if(false){} EVAL(_EXEC_OPS(_EXPAND_RETURNING_PACKED_OP_CALL_TT, NAME, (SIGNATURE), __VA_ARGS__)) else{ printf("[ERROR] Unknown opNum=%d on %s:%d", opNum, __FILE__, __LINE__); return 0; }

// #define PARAMS(...) __VA_ARGS__
// #define INPUT(...) __VA_ARGS__
// #define OPS_A(...) __VA_ARGS__
// #define OPS_B(...) __VA_ARGS__
// #define OPS_X(...) __VA_ARGS__
// #define ALPHA(A, B, C, D, E) EXPAND(DISPATCH_INTERNAL(A, B, C, E, UNPAREN(D)))
// #define BETA(A, B, C, D, E, F, G, H) EXPAND(DISPATCH_KERNEL_INTERNAL(A, B, C, D, E, F, H, UNPAREN(G)))
// #define GAMMA(A, B, C, D, E, F) EXPAND(_EXPAND_PACKED_KERNEL_SIMPLE(A, B, C, D, E, F))
// #define THETA(A, B, C, D) EXPAND(_EXPAND_PACKED_SIMPLE(A, B, C, D))
// #define PHI(A, B, C) EXPAND(DISPATCH_FACTORY(A, C, UNPAREN(B)))

// #define CALL_1(A, B, C, D) EXPAND(_EXPAND_PACKED_CALL_1(A, B, C, D))
// #define CALL_T(A, B) EXPAND(_EXPAND_PACKED_CALL_T(A, B))
// #define DIRECT(A, B) EXPAND(_EXPAND_PACKED_DIRECT(A, B))




/** graph definitions */
// #define REQUIRE_OK(A)  if (sd::ops::resultHelper( (A), #A, __FILE__, __LINE__ ) != 0) return ND4J_STATUS_VALIDATION;
// #define REQUIRE_TRUE(COND, ...) if (!(COND)) { if (sd::ops::conditionHelper(__FILE__, __LINE__, COND, __VA_ARGS__) != 0) throw std::invalid_argument("Op validation failed");};

// #define DECLARE_ENTRY(NAME, ...)           template struct ND4J_EXPORT __registratorFloat>;
//                                       template struct ND4J_EXPORT __registratorHalf>;
//                                       template struct ND4J_EXPORT __registratorDouble>;
//                                       template struct ND4J_EXPORT __registratorSynonymHalf>;
//                                       template struct ND4J_EXPORT __registratorSynonymDouble>;
//                                       template struct ND4J_EXPORT __registratorSynonymFloat>;


// #if defined(_MSC_VER) || defined(_WIN64) || defined(_WIN32) || defined(__CLION_IDE__) || defined(__VSCODE__)
// #define NOT_EXCLUDED(NAME) 1>0
// #else
// for now we don't want minifier mechanics working
//#define NOT_EXCLUDED(NAME) defined(SD_ALL_OPS) || defined(NAME)
// #define NOT_EXCLUDED(NAME) 1>0
// #endif

// #ifdef __JAVACPP_HACK__
// #define REGISTER_H(NAME)
// #elif defined(SD_ALL_OPS)
// #else
// #define REGISTER_H(NAME)  template 
//                         struct __registrator_##NAME {
//                             __registrator_##NAME() {
//                                 OpName *ptr = new OpName();
//                                 OpRegistrator::getInstance().registerOperation(ptr);
//                             }
//                         };
//                         static sd::ops::__registrator_##NAME zzz_register_opd_##NAME;
// #endif

// #ifdef __JAVACPP_HACK__
// #define REGISTER_C(NAME)
// #elif defined(SD_ALL_OPS)
// #else
// #define REGISTER_C(NAME)
// #endif

// #define DECLARE_OP(NAME, NIN, NOUT, INPLACEABLE)   class ND4J_EXPORT NAME: public sd::ops::DeclarableOp {
//                                                 public:
//                                                     NAME();
//                                                     sd::ShapeList* calculateOutputShape(sd::ShapeList* inputShape, sd::graph::Context& block);
//                                                 protected:
//                                                     void registerTypes();
//                                                     Nd4jStatus validateAndExecute(sd::graph::Context& block);
//                                                 };
//                                                 REGISTER_H(NAME)

// #define DECLARE_BOOLEAN_OP(NAME, NIN, SCALAR)   class ND4J_EXPORT NAME: public sd::ops::BooleanOp {
//                                                 public:
//                                                     NAME();
//                                                 protected:
//                                                     void registerTypes();
//                                                     Nd4jStatus validateAndExecute(sd::graph::Context& block);
//                                                 };
//                                                 REGISTER_H(NAME)

// #define BOOLEAN_OP_IMPL(NAME, NIN, SCALAR)   NAME::NAME() : sd::ops::BooleanOp(#NAME, NIN, SCALAR) { };
//                                                 REGISTER_C(NAME)
//                                                 Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block)

// #define DECLARE_LIST_OP(NAME, NIN, NOUT, TARGS, IARGS)      class ND4J_EXPORT  NAME: public sd::ops::DeclarableListOp {
//                                                             public:
//                                                                 NAME();
//                                                             protected:
//                                                                 Nd4jStatus validateAndExecute(sd::graph::Context& block);
//                                                             };
//                                                             REGISTER_H(NAME)

// #define LIST_OP_IMPL(NAME, NIN, NOUT, TARGS, IARGS)         NAME::NAME() : sd::ops::DeclarableListOp(NIN, NOUT, #NAME, TARGS, IARGS) { };
//                                                             REGISTER_C(NAME)
//                                                             Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block)

// #define DECLARE_LOGIC_OP(NAME)      class ND4J_EXPORT NAME: public sd::ops::LogicOp {
//                                     public:
//                                         NAME();
//                                     protected:
//                                         Nd4jStatus validateAndExecute(sd::graph::Context& block);
//                                     };
//                                     REGISTER_H(NAME)

// #define LOGIC_OP_IMPL(NAME)     NAME::NAME() : sd::ops::LogicOp(#NAME) { };
//                                 REGISTER_C(NAME)
//                                 Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block) { return sd::ops::LogicOp::validateAndExecute(block); };



// #define OP_IMPL(NAME, NIN, NOUT, INPLACEABLE)   NAME::NAME() : sd::ops::DeclarableOp(NIN, NOUT, #NAME, INPLACEABLE) { };
//                                                 REGISTER_C(NAME)
//                                                 sd::ShapeList* sd::ops::NAME::calculateOutputShape(sd::ShapeList* inputShape, sd::graph::Context& block) {
//                                                     auto shapeList = SHAPELIST();
//                                                     auto opLimit = this->getOpDescriptor()->getNumberOfOutputs() < 1 ? block.width() : this->getOpDescriptor()->getNumberOfOutputs();
//                                                     for (int e = 0; e < opLimit; e++) {
//                                                         auto newshape = ConstantShapeHelper::getInstance().createShapeInfo(ArrayOptions::dataType(inputShape->at(e)), shape::order(inputShape->at(e)), shape::rank(inputShape->at(e)), shape::shapeOf(inputShape->at(e)));
//                                                         shapeList->push_back(newshape);
//                                                     }
//                                                     return shapeList;
//                                                 }
//                                                 Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block)


// #define DECLARE_SYN(NAME, ORIGINAL) template 
//                                     struct __registratorSynonym_##NAME {
//                                         __registratorSynonym_##NAME(const char *name, const char *oname) {
//                                             auto ptr = reinterpret_cast(OpRegistrator::getInstance().getOperation(oname));
//                                             if (ptr == nullptr) {
//                                                 std::string newName(name);
//                                                 std::string oldName(oname);
//                                                 OpRegistrator::getInstance().pdateMSVC(sd::ops::HashHelper::getInstance().getLongHash(newName), oldName);
//                                                 return;
//                                             }
//                                             OpRegistrator::getInstance().registerOperation(name, ptr);
//                                             }
//                                         };
//                                         static sd::ops::__registratorSynonym_##NAME zzz_register_opd_##NAME(#NAME, #ORIGINAL)

// #define DECLARE_DIVERGENT_OP(NAME, NIN, NOUT, INPLACEABLE)  class ND4J_EXPORT NAME: public sd::ops::DeclarableOp {
//                                                             public:
//                                                                 NAME();
//                                                                 sd::ShapeList* calculateOutputShape(sd::ShapeList* inputShape, sd::graph::Context& block);
//                                                             protected:
//                                                                 Nd4jStatus validateAndExecute(sd::graph::Context& block);
//                                                             };
//                                                             REGISTER_H(NAME)

// #define DIVERGENT_OP_IMPL(NAME, NIN, NOUT, INPLACEABLE)     NAME::NAME() : sd::ops::DeclarableOp(NIN, NOUT, #NAME, INPLACEABLE, true) { };
//                                                             REGISTER_C(NAME)
//                                                             sd::ShapeList* sd::ops::NAME::calculateOutputShape(sd::ShapeList* inputShape, sd::graph::Context& block) {
//                                                                 auto shapeList = SHAPELIST();
//                                                                 auto opLimit = this->getOpDescriptor()->getNumberOfOutputs() < 1 ? block.width() : this->getOpDescriptor()->getNumberOfOutputs();
//                                                                 for (int e = 0; e < opLimit; e++) {
//                                                                     Nd4jLong* newshape;
//                                                                     COPY_SHAPE(inputShape->at(0), newshape);
//                                                                     shapeList->push_back(CONSTANT(newshape));
//                                                                 }
//                                                                 return shapeList;
//                                                             }
//                                                             Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block)

// #define DECLARE_CONFIGURABLE_OP(NAME, NIN, NOUT, INPLACEABLE, TARGS, IARGS)     class ND4J_EXPORT NAME: public sd::ops::DeclarableOp {
//                                                                                 public:
//                                                                                     NAME();
//                                                                                     sd::ShapeList* calculateOutputShape(sd::ShapeList* inputShape, sd::graph::Context& block);
//                                                                                 protected:
//                                                                                     void registerTypes();
//                                                                                     Nd4jStatus validateAndExecute(sd::graph::Context& block);
//                                                                                 };
//                                                                                 REGISTER_H(NAME)

// #define CONFIGURABLE_OP_IMPL(NAME, NIN, NOUT, INPLACEABLE, TARGS, IARGS)        NAME::NAME() : sd::ops::DeclarableOp(NIN, NOUT, #NAME, INPLACEABLE, TARGS, IARGS) { };
//                                                                                 REGISTER_C(NAME)
//                                                                                 sd::ShapeList* sd::ops::NAME::calculateOutputShape(sd::ShapeList* inputShape, sd::graph::Context& block) {
//                                                                                     auto shapeList = SHAPELIST();
//                                                                                     auto opLimit = this->getOpDescriptor()->getNumberOfOutputs() < 1 ? block.width() : this->getOpDescriptor()->getNumberOfOutputs();
//                                                                                     for (int e = 0; e < opLimit; e++) {
//                                                                                         auto newshape = ConstantShapeHelper::getInstance().createShapeInfo(ArrayOptions::dataType(inputShape->at(e)), shape::order(inputShape->at(e)), shape::rank(inputShape->at(e)), shape::shapeOf(inputShape->at(e)));
//                                                                                         shapeList->push_back(newshape);
//                                                                                     }
//                                                                                     return shapeList;
//                                                                                 }
//                                                                                 Nd4jStatus sd::ops::NAME::validateAndExecute(Context& block)

// #define DECLARE_REDUCTION_OP(NAME, NIN, NOUT, INPLACEABLE, TARGS, IARGS)        class ND4J_EXPORT NAME: public sd::ops::DeclarableReductionOp {
//                                                                                 public:
//                                                                                     NAME();
//                                                                                 protected:
//                                                                                     void registerTypes();
//                                                                                     Nd4jStatus validateAndExecute(Context& block);
//                                                                                 };
//                                                                                 REGISTER_H(NAME)

// #define REDUCTION_OP_IMPL(NAME, NIN, NOUT, INPLACEABLE, TARGS, IARGS)           NAME::NAME() : sd::ops::DeclarableReductionOp(NIN, NOUT, #NAME, INPLACEABLE, TARGS, IARGS) { };
//                                                                                 REGISTER_C(NAME)
//                                                                                 Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block)


// #define DECLARE_CUSTOM_OP(NAME, NIN, NOUT, INPLACEABLE, TARGS, IARGS)           class ND4J_EXPORT NAME: public sd::ops::DeclarableCustomOp {
//                                                                                 protected:
//                                                                                     void registerTypes();
//                                                                                     Nd4jStatus validateAndExecute(Context& block);
//                                                                                 public:
//                                                                                     NAME();
//                                                                                     sd::ShapeList* calculateOutputShape(sd::ShapeList* inputShape, sd::graph::Context& block);
//                                                                                 };
//                                                                                 REGISTER_H(NAME)

// #define CUSTOM_OP_IMPL(NAME, NIN, NOUT, INPLACEABLE, TARGS, IARGS)              NAME::NAME(): sd::ops::DeclarableCustomOp(NIN, NOUT, #NAME, INPLACEABLE, TARGS, IARGS) { };
//                                                                                 REGISTER_C(NAME)
//                                                                                 Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block)

// this declaration MUST follow DECLARE_CUSTOM_OP
// #define DECLARE_SHAPE_FN(NAME)                                                  sd::ShapeList* sd::ops::NAME::calculateOutputShape(sd::ShapeList* inputShape, sd::graph::Context& block)


// #define DECLARE_SAME_TYPE(NAME)                                                 void sd::ops::NAME::registerTypes() {this->getOpDescriptor()->setSameMode(true);}

// #define DECLARE_TYPES(NAME)                                                     void sd::ops::NAME::registerTypes()

// #define DECLARE_BROADCASTABLE_OP(NAME,TARGS, IARGS)                             class ND4J_EXPORT NAME: public sd::ops::BroadcastableOp {
//                                                                                 protected:
//                                                                                     void registerTypes();
//                                                                                     Nd4jStatus validateAndExecute(Context& block);
//                                                                                 public:
//                                                                                     NAME();
//                                                                                 };
//                                                                                 REGISTER_H(NAME)

// #define DECLARE_BROADCASTABLE_BOOL_OP(NAME,TARGS, IARGS)                        class ND4J_EXPORT NAME: public sd::ops::BroadcastableBoolOp {
//                                                                                 protected:
//                                                                                     void registerTypes();
//                                                                                     Nd4jStatus validateAndExecute(Context& block);
//                                                                                 public:
//                                                                                     NAME();
//                                                                                 };
//                                                                                 REGISTER_H(NAME)


// #define BROADCASTABLE_OP_IMPL(NAME, TARGS, IARGS)                               NAME::NAME(): sd::ops::BroadcastableOp(#NAME, TARGS, IARGS) { };
//                                                                                 REGISTER_C(NAME)
//                                                                                 Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block)

// #define BROADCASTABLE_BOOL_OP_IMPL(NAME, TARGS, IARGS)                          NAME::NAME(): sd::ops::BroadcastableBoolOp(#NAME, TARGS, IARGS) { };
//                                                                                 REGISTER_C(NAME)
//                                                                                 Nd4jStatus sd::ops::NAME::validateAndExecute(sd::graph::Context& block)


// #define DECLARE_DEVICE_OP(NAME, NIN, NOUT, INPLACEABLE, TARGS, IARGS)

// #define REPLICATE_SHAPE(SRC, TGT)   if (shape::order(SRC) == 'c')
//                                         shape::shapeBuffer(shape::rank(SRC), sd::ArrayOptions::dataType(SRC), shape::shapeOf(SRC), TGT);
//                                     else
//                                         shape::shapeBufferFortran(shape::rank(SRC),  sd::ArrayOptions::dataType(SRC), shape::shapeOf(SRC), TGT);


// #ifdef __CUDABLAS__

// #else

// #define ALLOCATE_SPECIAL(VARIABLE, WORKSPACE, LENGTH, TT) VARIABLE = nullptr;
// #define RELEASE_SPECIAL(VARIABLE, WORKSPACE)

// #endif

// #ifdef _RELEASE

// #define ALLOCATE(VARIABLE, WORKSPACE, LENGTH, TT)   if (WORKSPACE == nullptr) {VARIABLE = new TT[LENGTH]; } else {VARIABLE = reinterpret_cast(WORKSPACE->allocateBytes(LENGTH * sizeof(TT))); }; memset(VARIABLE, 0, LENGTH * sizeof(TT));
// #define RELEASE(VARIABLE, WORKSPACE)    if (WORKSPACE == nullptr) { delete[] VARIABLE;};

// #else

// #define ALLOCATE(VARIABLE, WORKSPACE, LENGTH, TT)   if (WORKSPACE == nullptr) {VARIABLE = new TT[LENGTH]; sd::memory::MemoryTracker::getInstance().countIn(sd::memory::MemoryType::HOST, VARIABLE, LENGTH * sizeof(TT)); } else {VARIABLE = reinterpret_cast(WORKSPACE->allocateBytes(LENGTH * sizeof(TT))); }; memset(VARIABLE, 0, LENGTH * sizeof(TT));
// #define RELEASE(VARIABLE, WORKSPACE)    if (WORKSPACE == nullptr) { sd::memory::MemoryTracker::getInstance().countOut(VARIABLE); delete[] VARIABLE;};

// #endif

// #define CONSTANT(SHAPE) ConstantShapeHelper::getInstance().createFromExisting(SHAPE, block.workspace())



// #define STORE_RESULT(A)     this->storeResult(block, 0, A)
// #define OVERWRITE_RESULT(A)     this->overwriteResult(block, 0, A)
// #define OVERWRITE_2_RESULTS(A, B)     this->overwriteResult(block, 0, A); this->overwriteResult(block, 1, B)
// #define STORE_2_RESULTS(A, B)   this->storeResult(block, 0, A); this->storeResult(block, 1, B)
// #define STORE_3_RESULTS(A, B, C)    this->storeResult(block, 0, A); this->storeResult(block, 1, B); this->storeResult(block, 2, C)
// #define STORE_4_RESULTS(A, B, C, D)     this->storeResult(block, 0, A); this->storeResult(block, 1, B); this->storeResult(block, 2, C); this->storeResult(block, 3, D)
// #define STORE_5_RESULTS(A, B, C, D, E)      this->storeResult(block, 0, A); this->storeResult(block, 1, B); this->storeResult(block, 2, C); this->storeResult(block, 3, D); this->storeResult(block, 4, E)
// #define BROADCAST_CHECK_EMPTY(X,Y,Z)     if(X->isEmpty() || Y->isEmpty()){ if(!Z->isEmpty()){ throw std::invalid_argument("Broadcast op validation failed: if x or y are empty, z must be empty");} return Status::OK();}

// #define STASH(NAME, ARRAY)  block.getStash()->storeArray(block.getNodeId(), NAME, ARRAY);
// #define CHECK_STASH(NAME)   block.getStash()->checkStash(block.getNodeId(), NAME);
// #define UNSTASH(NAME)       block.getStash()->extractArray(block.getNodeId(), NAME);

// #define INPUT_VARIABLE(INDEX)     block.array(INDEX)
// #define OUTPUT_VARIABLE(INDEX)    reinterpret_cast(this->getZ(block, INDEX))
// #define OUTPUT_NULLIFIED(INDEX)    reinterpret_cast(this->getNullifiedZ(block, INDEX))

// #define INPUT_LIST(INDEX)     reinterpret_cast(block.getVariable(INDEX)->getNDArrayList())

// #define D_ARG(INDEX)     block.getDArguments()->at(INDEX)
// #define INT_ARG(INDEX)     block.getIArguments()->at(INDEX)
// #define I_ARG(INDEX)     INT_ARG(INDEX)
// #define T_ARG(INDEX)     block.getTArguments()->at(INDEX)
// #define B_ARG(INDEX)     block.getBArguments()->at(INDEX)


// #define COPY_SHAPE(SRC, TGT)    TGT = ShapeBuilders::copyShapeInfo(SRC, true, block.getWorkspace())

// #define COPY_SHAPE_EX(SRC, TGT, WORKSPACE)    TGT = ShapeBuilders::copyShapeInfo(SRC, true, WORKSPACE)

// define macros for compiler enforcement to make function inline  
// #ifdef __clang__
// #define INLINE_LOOPS
// #define FORCEINLINE inline
// #elif _MSC_VER
// #define FORCEINLINE __forceinline
// #elif __GNUC__
// #define INLINE_LOOPS
// #define FORCEINLINE __attribute__((always_inline)) inline 
// #elif __CUDACC__ 
// #else
// #define FORCEINLINE inline 
// #endif


// #ifdef __CUDACC__

// #else

// #define _CUDA_H
// #define _CUDA_D
// #define _CUDA_G
// #define _CUDA_HD

// #endif // CUDACC

// #define CHECK_ALLOC(PTR, MSG, BYTES) if (PTR == nullptr) { throw sd::allocation_exception::build(MSG, BYTES); };



// #ifdef __CUDABLAS__

// #else

// #define LAMBDA_T(X, ...) [__VA_ARGS__] (T X) -> T
// #define LAMBDA_TT(X, Y, ...) [__VA_ARGS__] (T X, T Y) -> T
// #define LAMBDA_TTT(t, u, v, ...) [__VA_ARGS__] (T t, T u, T v) -> T

// #define ILAMBDA_T(X, ...) [__VA_ARGS__] (Nd4jLong _idx, T X) -> T
// #define ILAMBDA_TT(X, Y, ...) [__VA_ARGS__] (Nd4jLong _idx, T X, T Y) -> T

// #define LAMBDA_D(X, ...) [__VA_ARGS__] (double X) -> double
// #define LAMBDA_DD(X, Y, ...) [__VA_ARGS__] (double X, double Y) -> double
// #define LAMBDA_DDD(t, u, v, ...) [__VA_ARGS__] (double t, double u, double v) -> double

// #define LAMBDA_H(X, ...) [__VA_ARGS__] (float16 X) -> float16
// #define LAMBDA_HH(X, Y, ...) [__VA_ARGS__] (float16 X, float16 Y) -> float16

// #define ILAMBDA_D(X, ...) [__VA_ARGS__] (Nd4jLong _idx, double X) -> double
// #define ILAMBDA_DD(X, Y, ...) [__VA_ARGS__] (Nd4jLong _idx, double X, double Y) -> double

// #define ILAMBDA_F(X, ...) [__VA_ARGS__] (Nd4jLong _idx, float X) -> float
// #define ILAMBDA_FF(X, Y, ...) [__VA_ARGS__] (Nd4jLong _idx, float X, float Y) -> float

// #define LAMBDA_F(X, ...) [__VA_ARGS__] (float X) -> float
// #define LAMBDA_FF(X, Y, ...) [__VA_ARGS__] (float X, float Y) -> float
// #define LAMBDA_FFF(t, u, v, ...) [__VA_ARGS__] (float t, float u, float v) -> float

// #endif

// stuff for benchmarks
// #define GENERATE_XYZ() [&] (ResultSet &x, ResultSet &y, ResultSet &z)
// #define GENERATE_XZ() [&] (ResultSet &x, ResultSet &z)

// #define PARAMETRIC_XYZ() [&] (Parameters &p, ResultSet &x, ResultSet &y, ResultSet &z)
// #define PARAMETRIC_XZ() [&] (Parameters &p, ResultSet &x, ResultSet &z)

// #define PARAMETRIC_D() [&] (Parameters &p) -> Context*


// #ifdef __CUDABLAS__
// #endif

// #endif


// Parsed from ops/InputType.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef ND4J_INPUTTYPE_H
// #define ND4J_INPUTTYPE_H
        /** enum sd::ops::InputType */
        public static final int
            InputType_BOOLEAN = 0,
            InputType_NUMERIC = 1,
            InputType_STRINGULAR = 2,
            InputType_NUMERIC_SET = 3,
            InputType_STRINGULAR_SET = 4;
    


// #endif

// Parsed from ops/declarable/OpDescriptor.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_OPDESCRIPTOR_H
// #define LIBND4J_OPDESCRIPTOR_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

        /**
        *   This class is very basic info holder for ops. bean/pojo pretty much.
        *
        */
        @Namespace("sd::ops") @NoOffset public static class OpDescriptor extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public OpDescriptor(Pointer p) { super(p); }
        
            // default constructor
            public OpDescriptor(int numInputs, int numOutputs, @StdString BytePointer opName, @Cast("bool") boolean allowsInplace) { super((Pointer)null); allocate(numInputs, numOutputs, opName, allowsInplace); }
            private native void allocate(int numInputs, int numOutputs, @StdString BytePointer opName, @Cast("bool") boolean allowsInplace);
            public OpDescriptor(int numInputs, int numOutputs, @StdString String opName, @Cast("bool") boolean allowsInplace) { super((Pointer)null); allocate(numInputs, numOutputs, opName, allowsInplace); }
            private native void allocate(int numInputs, int numOutputs, @StdString String opName, @Cast("bool") boolean allowsInplace);

            // constructor for boolean ops
            public OpDescriptor(int numInputs, @StdString BytePointer opName, @Cast("bool") boolean isScalar) { super((Pointer)null); allocate(numInputs, opName, isScalar); }
            private native void allocate(int numInputs, @StdString BytePointer opName, @Cast("bool") boolean isScalar);
            public OpDescriptor(int numInputs, @StdString String opName, @Cast("bool") boolean isScalar) { super((Pointer)null); allocate(numInputs, opName, isScalar); }
            private native void allocate(int numInputs, @StdString String opName, @Cast("bool") boolean isScalar);

            // default constructor

            // constructor for configurable op
            public OpDescriptor(int numInputs, int numOutputs, @Cast("char*") String opName, @Cast("bool") boolean allowsInplace, int tArgs, int iArgs) { super((Pointer)null); allocate(numInputs, numOutputs, opName, allowsInplace, tArgs, iArgs); }
            private native void allocate(int numInputs, int numOutputs, @Cast("char*") String opName, @Cast("bool") boolean allowsInplace, int tArgs, int iArgs);
            public OpDescriptor(int numInputs, int numOutputs, @Cast("char*") BytePointer opName, @Cast("bool") boolean allowsInplace, int tArgs, int iArgs) { super((Pointer)null); allocate(numInputs, numOutputs, opName, allowsInplace, tArgs, iArgs); }
            private native void allocate(int numInputs, int numOutputs, @Cast("char*") BytePointer opName, @Cast("bool") boolean allowsInplace, int tArgs, int iArgs);

            // constructor for non-configurable divergent op
            public OpDescriptor(int numInputs, int numOutputs, @StdString BytePointer opName, @Cast("bool") boolean allowsInplace, @Cast("bool") boolean divergent) { super((Pointer)null); allocate(numInputs, numOutputs, opName, allowsInplace, divergent); }
            private native void allocate(int numInputs, int numOutputs, @StdString BytePointer opName, @Cast("bool") boolean allowsInplace, @Cast("bool") boolean divergent);
            public OpDescriptor(int numInputs, int numOutputs, @StdString String opName, @Cast("bool") boolean allowsInplace, @Cast("bool") boolean divergent) { super((Pointer)null); allocate(numInputs, numOutputs, opName, allowsInplace, divergent); }
            private native void allocate(int numInputs, int numOutputs, @StdString String opName, @Cast("bool") boolean allowsInplace, @Cast("bool") boolean divergent);

            // constructor for non-configurable divergent op

            // constructor for configurable divergent op
            public OpDescriptor(int numInputs, int numOutputs, @Cast("char*") String opName, @Cast("bool") boolean allowsInplace, @Cast("bool") boolean divergent, int tArgs, int iArgs) { super((Pointer)null); allocate(numInputs, numOutputs, opName, allowsInplace, divergent, tArgs, iArgs); }
            private native void allocate(int numInputs, int numOutputs, @Cast("char*") String opName, @Cast("bool") boolean allowsInplace, @Cast("bool") boolean divergent, int tArgs, int iArgs);
            public OpDescriptor(int numInputs, int numOutputs, @Cast("char*") BytePointer opName, @Cast("bool") boolean allowsInplace, @Cast("bool") boolean divergent, int tArgs, int iArgs) { super((Pointer)null); allocate(numInputs, numOutputs, opName, allowsInplace, divergent, tArgs, iArgs); }
            private native void allocate(int numInputs, int numOutputs, @Cast("char*") BytePointer opName, @Cast("bool") boolean allowsInplace, @Cast("bool") boolean divergent, int tArgs, int iArgs);

            // constructor for logical ops (while, scope, etc)
            public OpDescriptor(@Cast("char*") String opName, @Cast("bool") boolean isLogic) { super((Pointer)null); allocate(opName, isLogic); }
            private native void allocate(@Cast("char*") String opName, @Cast("bool") boolean isLogic);
            public OpDescriptor(@Cast("char*") BytePointer opName, @Cast("bool") boolean isLogic) { super((Pointer)null); allocate(opName, isLogic); }
            private native void allocate(@Cast("char*") BytePointer opName, @Cast("bool") boolean isLogic);

            public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef OpDescriptor other);

            // default destructor

            // this method returns minimal expected number of T arguments
            public native int getNumberOfTArgs();

            // this method returns minimal expected number of Integer arguments
            public native int getNumberOfIArgs();

            // this method returns minimal expected number of inputs
            public native int getNumberOfInputs();

            // this method returns hash code for this operation
            public native @Cast("Nd4jLong") long getHash();

            // this method returns minimal expected number of outputs
            public native int getNumberOfOutputs();

            // this method returns opName (can be empty)
            public native @StdString @Cast({"char*", "std::string*"}) BytePointer getOpName();

            // returns TRUE if this op is divergent. FALSE otherwise
            public native @Cast("bool") boolean isDivergent();

            // returns TRUE if this op allows in-place execution
            public native @Cast("bool") boolean allowsInplace();

            // this method allows you to enable/disable inplace call for a given op
            public native void allowInplace(@Cast("bool") boolean reallyAllow);

            // this method returns opNum (applicable for legacy XYZ ops only)
            public native int getOpNum();

            // this method allows to set specifc opNum
            public native void setOpNum(int opNum);

            public native void setHash(@Cast("Nd4jLong") long hash);

            public native @Cast("sd::ops::InputType") int inputType();



            public native OpDescriptor setInputType(@Cast("sd::ops::InputType") int type);
            public native OpDescriptor setAllowedInputTypes(int index, @Cast("sd::DataType*") @StdVector IntPointer dtype);
            public native OpDescriptor setAllowedInputTypes(int index, @Cast("sd::DataType*") @StdVector IntBuffer dtype);
            public native OpDescriptor setAllowedInputTypes(int index, @Cast("sd::DataType*") @StdVector int[] dtype);
            public native OpDescriptor setAllowedOutputTypes(int index, @Cast("sd::DataType*") @StdVector IntPointer dtype);
            public native OpDescriptor setAllowedOutputTypes(int index, @Cast("sd::DataType*") @StdVector IntBuffer dtype);
            public native OpDescriptor setAllowedOutputTypes(int index, @Cast("sd::DataType*") @StdVector int[] dtype);
            public native OpDescriptor setAllowedInputTypes(int index,  @Cast("sd::DataType") int dtype);
            public native OpDescriptor setAllowedOutputTypes(int index, @Cast("sd::DataType") int dtype);
            public native OpDescriptor setAllowedInputTypes(@Cast("sd::DataType") int dtype);
            public native OpDescriptor setAllowedOutputTypes(@Cast("sd::DataType") int dtype);
            public native OpDescriptor allowOverride(@Cast("bool") boolean reallyAllow);
            public native OpDescriptor setSameMode(@Cast("bool") boolean reallySame);
            public native OpDescriptor setInputType(int idx, @Cast("sd::DataType") int dtype);
            public native OpDescriptor setOutputType(int idx, @Cast("sd::DataType") int dtype);

            public native @Cast("sd::DataType*") @StdVector IntPointer getOutputTypesForOutput(int index);

            public native @Cast("bool") boolean checkInputMatch(int index, @Cast("sd::DataType") int dataType);
            public native @Cast("bool") boolean checkOutputMatch(int index, @Cast("sd::DataType") int dataType);
            public native @Cast("bool") boolean isSameMode();

            public native @Cast("bool") boolean isInherit(int index);
        }
    


// #endif //LIBND4J_OPDESCRIPTOR_H


// Parsed from ops/declarable/PlatformHelper.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef SD_PLATFORMHELPER_H
// #define SD_PLATFORMHELPER_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
            /**
             * This abstract class defines methods used by platform-specific helpers implementations
             */
            @Namespace("sd::ops::platforms") @NoOffset public static class PlatformHelper extends Pointer {
                static { Loader.load(); }
                /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                public PlatformHelper(Pointer p) { super(p); }
            

                public native @StdString BytePointer name();

                public native @Cast("samediff::Engine") int engine();

                public native @Cast("Nd4jLong") long hash();

                /**
                 * This method checks, if given helper can be used with given input/output/configuration options
                 *
                 * @param context
                 * @return
                 */
                public native @Cast("bool") boolean isUsable(@ByRef Context context);

                /**
                 * This method invokes helper. Typically this method replaces actual op execution
                 *
                 * @param context
                 * @return
                 */
                public native @Cast("Nd4jStatus") int invokeHelper(@ByRef Context context);

                /**
                 * Helper method, needed for compatibility with DeclarableOp macros
                 * @param ctx
                 * @param inputId
                 * @return
                 */
                public native NDArray getZ(@ByRef Context ctx, int inputId);

                /**
                 * Helper method, needed for compatibility with DeclarableOp macros
                 * @param ctx
                 * @param inputId
                 * @return
                 */
                public native NDArray getNullifiedZ(@ByRef Context ctx, int inputId);
            }
        
    



// #endif //SD_PLATFORMHELPER_H


// Parsed from ops/declarable/BroadcastableOp.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver on 6/6/2018.
//

// #ifndef LIBND4J_BROADCASTABLEOP_H
// #define LIBND4J_BROADCASTABLEOP_H

// #include 
// #include "OpDescriptor.h"
// #include "DeclarableOp.h"
// #include "DeclarableCustomOp.h"
        @Namespace("sd::ops") public static class BroadcastableOp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public BroadcastableOp(Pointer p) { super(p); }
        

            public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
        }
    



// #endif //LIBND4J_BROADCASTABLEOP_H


// Parsed from ops/declarable/BroadcastableBoolOp.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver on 6/6/2018.
//

// #ifndef SD_BROADCASTABLEBOOLOP_H
// #define SD_BROADCASTABLEBOOLOP_H

// #include 
// #include "OpDescriptor.h"
// #include "DeclarableOp.h"
// #include "DeclarableCustomOp.h"
        @Namespace("sd::ops") public static class BroadcastableBoolOp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public BroadcastableBoolOp(Pointer p) { super(p); }
        

            public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
        }
    



// #endif //SD_BROADCASTABLEBOOLOP_H


// Parsed from ops/declarable/DeclarableOp.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_DECLARABLE_OPS_H
// #define LIBND4J_DECLARABLE_OPS_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include "OpDescriptor.h"
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
//#include 

// #include 
// #include 
// #include 

        @Namespace("sd::ops") public native @Cast("Nd4jStatus") int conditionHelper(@Cast("char*") String file, int line, int condition, int argNumber, @Cast("char*") String format);
        @Namespace("sd::ops") public native @Cast("Nd4jStatus") int conditionHelper(@Cast("char*") BytePointer file, int line, int condition, int argNumber, @Cast("char*") BytePointer format);

        /**
         * This class is the basic building block of Graph Operations. Any CustomOp out there is built on top of this "abstract" class.
         *
         */
        @Namespace("sd::ops") @NoOffset public static class DeclarableOp extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public DeclarableOp(Pointer p) { super(p); }
        
            // for special cases, like BooleanOps

            // regular constructors

            // for LogicalOps

            // default testructor

            // this method returns OpDescriptor, describing this Op instance
            public native OpDescriptor getOpDescriptor();

            public native @Cast("Nd4jStatus") int validateDataTypes(@ByRef Context block);

            /**
            *   This method should be available in each implemented Op, and should return Op output shape(s), for a given input shape(s)
            */
            public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);

            /**
             * Returns opName
             *
             * @return
             */
            public native @StdString @Cast({"char*", "std::string*"}) BytePointer getOpName();

            /**
             * Returns opHash
             */
            public native @Cast("Nd4jLong") long getOpHash();

            /**
             * This method sets arguments for op
             */
//            void setArguments();

            /**
             * This method returns pointer to results
             */
//            void getResults();

            /**
             * This method executes given Op
             *
             * @param block
             * @return 0 if OK, error code otherwise
             */
            public native @Cast("Nd4jStatus") int execute(Context block);

            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs);

            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector IntPointer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs);
            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector IntBuffer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs);
            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector int[] dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs);
            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector IntPointer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector IntBuffer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @Cast("Nd4jStatus") int execute(@Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector int[] dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);

            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs);

            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector IntPointer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs);
            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector IntBuffer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs);
            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector int[] dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs);
            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector IntPointer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector IntBuffer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);
            public native @ByVal ResultSet evaluate(@Const @ByRef NDArrayVector inputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs, @Cast("bool*") @StdVector boolean[] bArgs/*=std::vector()*/, @Cast("sd::DataType*") @StdVector int[] dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/);

            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs, @Cast("sd::DataType*") @StdVector IntPointer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/, @Cast("sd::DataType") int type/*=sd::DataType::FLOAT32*/);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs, @Cast("bool*") @StdVector boolean[] bArgs, @Cast("sd::DataType*") @StdVector IntBuffer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/, @Cast("sd::DataType") int type/*=sd::DataType::FLOAT32*/);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs, @Cast("bool*") @StdVector boolean[] bArgs);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs, @Cast("sd::DataType*") @StdVector int[] dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/, @Cast("sd::DataType") int type/*=sd::DataType::FLOAT32*/);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs, @Cast("bool*") @StdVector boolean[] bArgs, @Cast("sd::DataType*") @StdVector IntPointer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/, @Cast("sd::DataType") int type/*=sd::DataType::FLOAT32*/);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoublePointer tArgs, @Cast("Nd4jLong*") @StdVector LongPointer iArgs, @Cast("bool*") @StdVector boolean[] bArgs);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs, @Cast("sd::DataType*") @StdVector IntBuffer dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/, @Cast("sd::DataType") int type/*=sd::DataType::FLOAT32*/);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector DoubleBuffer tArgs, @Cast("Nd4jLong*") @StdVector LongBuffer iArgs, @Cast("bool*") @StdVector BooleanPointer bArgs);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs, @Cast("bool*") @StdVector boolean[] bArgs, @Cast("sd::DataType*") @StdVector int[] dArgs/*=std::vector()*/, @Cast("bool") boolean isInplace/*=false*/, @Cast("sd::DataType") int type/*=sd::DataType::FLOAT32*/);
            public native @Cast("Nd4jStatus") int execute(@ByRef RandomGenerator rng, @Const @ByRef NDArrayVector inputs, @Const @ByRef NDArrayVector outputs, @StdVector double[] tArgs, @Cast("Nd4jLong*") @StdVector long[] iArgs, @Cast("bool*") @StdVector boolean[] bArgs);

            public native @ByVal ResultSet execute(@Const @ByRef OpArgsHolder holder, @Cast("bool") boolean isInplace/*=false*/);
            public native @ByVal ResultSet execute(@Const @ByRef OpArgsHolder holder);


            // There methods provide various validation options
            public native @Cast("Nd4jStatus") int validateNonEmptyInput(@ByRef Context block);

            // this method checks if all input arrays have equal lengths
            public native @Cast("Nd4jStatus") int validateInputLengthMatch(@ByRef Context block);

            // this method checks if all input arrays have the same shapes (orders/strides are NOT checked)
            public native @Cast("Nd4jStatus") int validateInputDimensionsMatch(@ByRef Context block);

            // this method check if all input arrays have the same orders
            public native @Cast("Nd4jStatus") int validateOrdersMatch(@ByRef Context block);

            // this method checks if all input arrays are 2D
            public native @Cast("Nd4jStatus") int validateInput2D(@ByRef Context block);

            // this method checks if all input arrays are 3D
            public native @Cast("Nd4jStatus") int validateInput3D(@ByRef Context block);

            // this method checks if all input arrays are 4D
            public native @Cast("Nd4jStatus") int validateInput4D(@ByRef Context block);

            // this method checks if all input arrays are ND
            public native @Cast("Nd4jStatus") int validateInputDimensions(@ByRef Context block, int rank);

            // this method checks if number of available arguments matches op expectations
            public native @Cast("Nd4jStatus") int validateArguments(@ByRef Context block);
        }
    


// #endif //LIBND4J_DECLARABLE_OPS_H


// Parsed from ops/declarable/DeclarableListOp.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_DECLARABLE_LIST_OP_H
// #define LIBND4J_DECLARABLE_LIST_OP_H

// #include 
// #include 
// #include 
// #include 
        @Namespace("sd::ops") public static class DeclarableListOp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public DeclarableListOp(Pointer p) { super(p); }
        

            
            public native @Cast("Nd4jStatus") int execute(Context block);
            public native @ByVal ResultSet execute(NDArrayList list, @ByRef NDArrayVector inputs, @StdVector DoublePointer tArgs, @StdVector IntPointer iArgs);
            public native @ByVal ResultSet execute(NDArrayList list, @ByRef NDArrayVector inputs, @StdVector DoubleBuffer tArgs, @StdVector IntBuffer iArgs);
            public native @ByVal ResultSet execute(NDArrayList list, @ByRef NDArrayVector inputs, @StdVector double[] tArgs, @StdVector int[] iArgs);

            public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
        }
    


// #endif

// Parsed from ops/declarable/DeclarableReductionOp.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 07.10.2017.
//

// #ifndef LIBND4J_DECLARABLE_REDUCTION_OP_H
// #define LIBND4J_DECLARABLE_REDUCTION_OP_H

// #include 
        @Namespace("sd::ops") public static class DeclarableReductionOp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public DeclarableReductionOp(Pointer p) { super(p); }
        

            public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
        }
    


// #endif //LIBND4J_DECLARABLE_REDUCTION_OP_H


// Parsed from ops/declarable/DeclarableCustomOp.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 07.10.2017.
//

// #ifndef LIBND4J_DECLARABLECUSTOMOP_H
// #define LIBND4J_DECLARABLECUSTOMOP_H

// #include 
        @Namespace("sd::ops") public static class DeclarableCustomOp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public DeclarableCustomOp(Pointer p) { super(p); }
        

            public native ShapeList calculateOutputShape(ShapeList inputShapes, @ByRef Context block);
        }
    


// #endif //LIBND4J_DECLARABLECUSTOMOP_H


// Parsed from ops/declarable/BooleanOp.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 13.10.2017.
//

// #ifndef LIBND4J_BOOLEANOP_H
// #define LIBND4J_BOOLEANOP_H

// #include 
// #include "OpDescriptor.h"
// #include "DeclarableOp.h"
        @Namespace("sd::ops") @NoOffset public static class BooleanOp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public BooleanOp(Pointer p) { super(p); }
        

            public native @Cast("bool") boolean verify(@Const @ByRef NDArrayVector args);
            public native @Cast("bool") boolean verify(@ByRef Context block);

            public native @Cast("Nd4jStatus") int execute(Context block);

            public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
        }
    




// #endif //LIBND4J_BOOLEANOP_H

// Parsed from ops/declarable/LogicOp.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 15.10.2017.
//

// #ifndef LIBND4J_LOGICOP_H
// #define LIBND4J_LOGICOP_H

// #include "DeclarableOp.h"

        /**
         * Logic ops are unique snowflakes in any Graph. They dramatically change Graph Execution process, by introducing loops, conditions, etc.
         *
         * Their code is the part of GraphExecutioner logic. But we still want them to be expressed via Graph
         * \tparam T
         */
        @Namespace("sd::ops") public static class LogicOp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public LogicOp(Pointer p) { super(p); }
        
            public LogicOp(@Cast("char*") String name) { super((Pointer)null); allocate(name); }
            private native void allocate(@Cast("char*") String name);
            public LogicOp(@Cast("char*") BytePointer name) { super((Pointer)null); allocate(name); }
            private native void allocate(@Cast("char*") BytePointer name);

            public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
        }
    



// #endif //LIBND4J_LOGICOP_H


// Parsed from ops/declarable/OpRegistrator.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 07.10.2017.
//

// #ifndef LIBND4J_OPREGISTRATOR_H
// #define LIBND4J_OPREGISTRATOR_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

// handlers part
// #include 
// #include 

// #ifndef __JAVACPP_HACK__

// #endif
        /**
        *   This class provides runtime ops lookup, based on opName or opHash.
        *   To build lookup directory we use *_OP_IMPL macro, which puts static structs at compile time in .cpp files,
        *   so once binary is executed, static objects are initialized automatically, and we get list of all ops
        *   available at runtime via this singleton.
        *
        */
        @Namespace("sd::ops") @NoOffset public static class OpRegistrator extends Pointer {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public OpRegistrator(Pointer p) { super(p); }
        

            public native @ByRef OpRegistrator getInstance();

            public native void exitHandler();
            public native void sigIntHandler(int sig);
            public native void sigSegVHandler(int sig);

            
            public native @Cast("char*") String getAllCustomOperations();

            /**
            * This method registers operation in our registry, so we can use them later
            *
            * @param op
            */
            public native @Cast("bool") boolean registerOperation(@Cast("char*") String name, DeclarableOp op);
            public native @Cast("bool") boolean registerOperation(@Cast("char*") BytePointer name, DeclarableOp op);
            public native @Cast("bool") boolean registerOperation(DeclarableOp op);

            public native void registerHelper(PlatformHelper op);

            public native @Cast("bool") boolean hasHelper(@Cast("Nd4jLong") long hash, @Cast("samediff::Engine") int engine);

            public native DeclarableOp getOperation(@Cast("char*") String name);
            public native DeclarableOp getOperation(@Cast("char*") BytePointer name);
            public native DeclarableOp getOperation(@Cast("Nd4jLong") long hash);

            public native PlatformHelper getPlatformHelper(@Cast("Nd4jLong") long hash, @Cast("samediff::Engine") int engine);

            public native @Cast("Nd4jLong*") @StdVector LongPointer getAllHashes();

            public native int numberOfOperations();
    }


        /*
         *  These structs are used to "register" our ops in OpRegistrator.
         */

    


// #endif //LIBND4J_OPREGISTRATOR_H


// Parsed from ops/declarable/CustomOperations.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 07.10.2017.
//

// #ifndef LIBND4J_CUSTOMOPERATIONS_H
// #define LIBND4J_CUSTOMOPERATIONS_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
    @Namespace("sd") public static class _loader extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public _loader(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public _loader(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public _loader position(long position) {
            return (_loader)super.position(position);
        }
        @Override public _loader getPointer(long i) {
            return new _loader((Pointer)this).position(position + i);
        }
    
        public _loader() { super((Pointer)null); allocate(); }
        private native void allocate();
    }

        // logic ops 
        @Namespace("sd::ops") public static class Switch extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Switch(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Switch(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Switch position(long position) {
                return (Switch)super.position(position);
            }
            @Override public Switch getPointer(long i) {
                return new Switch((Pointer)this).position(position + i);
            }
        
                                                                public Switch() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                                public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                            }
        @Namespace("sd::ops") public static class While extends LogicOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public While(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public While(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public While position(long position) {
                return (While)super.position(position);
            }
            @Override public While getPointer(long i) {
                return new While((Pointer)this).position(position + i);
            }
        
                                        public While() { super((Pointer)null); allocate(); }
                                        private native void allocate();
                                    }
        @Namespace("sd::ops") public static class Scope extends LogicOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Scope(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Scope(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Scope position(long position) {
                return (Scope)super.position(position);
            }
            @Override public Scope getPointer(long i) {
                return new Scope((Pointer)this).position(position + i);
            }
        
                                        public Scope() { super((Pointer)null); allocate(); }
                                        private native void allocate();
                                    }
        @Namespace("sd::ops") public static class Conditional extends LogicOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Conditional(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Conditional(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Conditional position(long position) {
                return (Conditional)super.position(position);
            }
            @Override public Conditional getPointer(long i) {
                return new Conditional((Pointer)this).position(position + i);
            }
        
                                        public Conditional() { super((Pointer)null); allocate(); }
                                        private native void allocate();
                                    }
        @Namespace("sd::ops") public static class Return extends LogicOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Return(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Return(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Return position(long position) {
                return (Return)super.position(position);
            }
            @Override public Return getPointer(long i) {
                return new Return((Pointer)this).position(position + i);
            }
        
                                        public Return() { super((Pointer)null); allocate(); }
                                        private native void allocate();
                                    }


        /**
         * This operations exposes given arguments as it's own outputs, but does it only once.
         * Subsequent calls will be served directly by this op.
         *
         * PLEASE NOTE: This operation is internal graph operation, and shouldn't be used directly usually.
         */
        @Namespace("sd::ops") public static class expose extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public expose(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public expose(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public expose position(long position) {
                return (expose)super.position(position);
            }
            @Override public expose getPointer(long i) {
                return new expose((Pointer)this).position(position + i);
            }
        
                                                                                    public expose() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
    


// #endif //LIBND4J_CUSTOMOPERATIONS_H


// Parsed from ops/declarable/headers/activations.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_HEADERS_ACTIVATIONS_H
// #define LIBND4J_HEADERS_ACTIVATIONS_H


// #include 
        /**
         * This is Sigmoid activation function implementation
         * Math is: 1 / 1 + exp(-x)
         */
//         #if NOT_EXCLUDED(OP_sigmoid)
        @Namespace("sd::ops") public static class sigmoid extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sigmoid(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sigmoid(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sigmoid position(long position) {
                return (sigmoid)super.position(position);
            }
            @Override public sigmoid getPointer(long i) {
                return new sigmoid((Pointer)this).position(position + i);
            }
        
                                                                                    public sigmoid() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class sigmoid_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sigmoid_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sigmoid_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sigmoid_bp position(long position) {
                return (sigmoid_bp)super.position(position);
            }
            @Override public sigmoid_bp getPointer(long i) {
                return new sigmoid_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public sigmoid_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is Softsign activation function implementation
         * Math is: x / 1 + abs(x)
         */
//         #if NOT_EXCLUDED(OP_softsign)
        @Namespace("sd::ops") public static class softsign extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softsign(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softsign(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softsign position(long position) {
                return (softsign)super.position(position);
            }
            @Override public softsign getPointer(long i) {
                return new softsign((Pointer)this).position(position + i);
            }
        
                                                                                    public softsign() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class softsign_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softsign_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softsign_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softsign_bp position(long position) {
                return (softsign_bp)super.position(position);
            }
            @Override public softsign_bp getPointer(long i) {
                return new softsign_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public softsign_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is Tanh activation function implementation
         */
//         #if NOT_EXCLUDED(OP_tanh)
        @Namespace("sd::ops") public static class tanh extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tanh(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tanh(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tanh position(long position) {
                return (tanh)super.position(position);
            }
            @Override public tanh getPointer(long i) {
                return new tanh((Pointer)this).position(position + i);
            }
        
                                                                                    public tanh() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class tanh_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tanh_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tanh_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tanh_bp position(long position) {
                return (tanh_bp)super.position(position);
            }
            @Override public tanh_bp getPointer(long i) {
                return new tanh_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public tanh_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is Softplus activation function implementation
         * Math is: log(1 + exp(x))
         */
//         #if NOT_EXCLUDED(OP_softplus)
        @Namespace("sd::ops") public static class softplus extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softplus(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softplus(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softplus position(long position) {
                return (softplus)super.position(position);
            }
            @Override public softplus getPointer(long i) {
                return new softplus((Pointer)this).position(position + i);
            }
        
                                                                                    public softplus() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class softplus_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softplus_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softplus_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softplus_bp position(long position) {
                return (softplus_bp)super.position(position);
            }
            @Override public softplus_bp getPointer(long i) {
                return new softplus_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public softplus_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is RELU activation function implementation
         */
//         #if NOT_EXCLUDED(OP_relu)
        @Namespace("sd::ops") public static class relu extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public relu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public relu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public relu position(long position) {
                return (relu)super.position(position);
            }
            @Override public relu getPointer(long i) {
                return new relu((Pointer)this).position(position + i);
            }
        
                                                                                    public relu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class relu_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public relu_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public relu_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public relu_bp position(long position) {
                return (relu_bp)super.position(position);
            }
            @Override public relu_bp getPointer(long i) {
                return new relu_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public relu_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is SELU activation function implementation
         */
//         #if NOT_EXCLUDED(OP_selu)
        @Namespace("sd::ops") public static class selu extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public selu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public selu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public selu position(long position) {
                return (selu)super.position(position);
            }
            @Override public selu getPointer(long i) {
                return new selu((Pointer)this).position(position + i);
            }
        
                                                                                    public selu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class selu_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public selu_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public selu_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public selu_bp position(long position) {
                return (selu_bp)super.position(position);
            }
            @Override public selu_bp getPointer(long i) {
                return new selu_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public selu_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is Leaky RELU activation function.
         * Math is: x < 0 ?  alpha * x : x;
         */
//         #if NOT_EXCLUDED(OP_lrelu)
        @Namespace("sd::ops") public static class lrelu extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lrelu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lrelu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lrelu position(long position) {
                return (lrelu)super.position(position);
            }
            @Override public lrelu getPointer(long i) {
                return new lrelu((Pointer)this).position(position + i);
            }
        
                                                                                    public lrelu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class lrelu_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lrelu_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lrelu_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lrelu_bp position(long position) {
                return (lrelu_bp)super.position(position);
            }
            @Override public lrelu_bp getPointer(long i) {
                return new lrelu_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public lrelu_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op is ELU activation function.
         * Math is: x >= 0 ? x : exp(x) - 1;
         */
//         #if NOT_EXCLUDED(OP_elu)
        @Namespace("sd::ops") public static class elu extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public elu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public elu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public elu position(long position) {
                return (elu)super.position(position);
            }
            @Override public elu getPointer(long i) {
                return new elu((Pointer)this).position(position + i);
            }
        
                                                                                    public elu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class elu_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public elu_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public elu_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public elu_bp position(long position) {
                return (elu_bp)super.position(position);
            }
            @Override public elu_bp getPointer(long i) {
                return new elu_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public elu_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is Cube activation function.
         * Math is: x^3
         */
//         #if NOT_EXCLUDED(OP_cube)
        @Namespace("sd::ops") public static class cube extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cube(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cube(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cube position(long position) {
                return (cube)super.position(position);
            }
            @Override public cube getPointer(long i) {
                return new cube((Pointer)this).position(position + i);
            }
        
                                                                                    public cube() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class cube_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cube_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cube_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cube_bp position(long position) {
                return (cube_bp)super.position(position);
            }
            @Override public cube_bp getPointer(long i) {
                return new cube_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public cube_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is RectifiedTanh activation function.
         * Math is: max(0, tanh(x))
         */
//         #if NOT_EXCLUDED(OP_rectifiedtanh)
        @Namespace("sd::ops") public static class rectifiedtanh extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public rectifiedtanh(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public rectifiedtanh(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public rectifiedtanh position(long position) {
                return (rectifiedtanh)super.position(position);
            }
            @Override public rectifiedtanh getPointer(long i) {
                return new rectifiedtanh((Pointer)this).position(position + i);
            }
        
                                                                                    public rectifiedtanh() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class rectifiedtanh_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public rectifiedtanh_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public rectifiedtanh_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public rectifiedtanh_bp position(long position) {
                return (rectifiedtanh_bp)super.position(position);
            }
            @Override public rectifiedtanh_bp getPointer(long i) {
                return new rectifiedtanh_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public rectifiedtanh_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is RationalTanh activation function.
         */
//         #if NOT_EXCLUDED(OP_rationaltanh)
        @Namespace("sd::ops") public static class rationaltanh extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public rationaltanh(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public rationaltanh(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public rationaltanh position(long position) {
                return (rationaltanh)super.position(position);
            }
            @Override public rationaltanh getPointer(long i) {
                return new rationaltanh((Pointer)this).position(position + i);
            }
        
                                                                                    public rationaltanh() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class rationaltanh_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public rationaltanh_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public rationaltanh_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public rationaltanh_bp position(long position) {
                return (rationaltanh_bp)super.position(position);
            }
            @Override public rationaltanh_bp getPointer(long i) {
                return new rationaltanh_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public rationaltanh_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is HardTanh activation function.
         * Math is: x < -1.0 ? -1.0 : x > 1.0 ? 1.0 : x;
         */
//         #if NOT_EXCLUDED(OP_hardtanh)
        @Namespace("sd::ops") public static class hardtanh extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public hardtanh(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public hardtanh(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public hardtanh position(long position) {
                return (hardtanh)super.position(position);
            }
            @Override public hardtanh getPointer(long i) {
                return new hardtanh((Pointer)this).position(position + i);
            }
        
                                                                                    public hardtanh() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class hardtanh_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public hardtanh_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public hardtanh_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public hardtanh_bp position(long position) {
                return (hardtanh_bp)super.position(position);
            }
            @Override public hardtanh_bp getPointer(long i) {
                return new hardtanh_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public hardtanh_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is HardSigmoid activation function.
         * Math is: min(1, max(0, 0.2 * x + 0.5))
         */
//         #if NOT_EXCLUDED(OP_hardsigmoid)
        @Namespace("sd::ops") public static class hardsigmoid extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public hardsigmoid(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public hardsigmoid(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public hardsigmoid position(long position) {
                return (hardsigmoid)super.position(position);
            }
            @Override public hardsigmoid getPointer(long i) {
                return new hardsigmoid((Pointer)this).position(position + i);
            }
        
                                                                                    public hardsigmoid() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class hardsigmoid_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public hardsigmoid_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public hardsigmoid_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public hardsigmoid_bp position(long position) {
                return (hardsigmoid_bp)super.position(position);
            }
            @Override public hardsigmoid_bp getPointer(long i) {
                return new hardsigmoid_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public hardsigmoid_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is Indentity operation. It passes signal umodified in both directions.
         */
//         #if NOT_EXCLUDED(OP_identity)
        @Namespace("sd::ops") public static class identity extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public identity(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public identity(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public identity position(long position) {
                return (identity)super.position(position);
            }
            @Override public identity getPointer(long i) {
                return new identity((Pointer)this).position(position + i);
            }
        
                                                    public identity() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
        @Namespace("sd::ops") public static class identity_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public identity_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public identity_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public identity_bp position(long position) {
                return (identity_bp)super.position(position);
            }
            @Override public identity_bp getPointer(long i) {
                return new identity_bp((Pointer)this).position(position + i);
            }
        
                                                    public identity_bp() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This is Indentity operation. It passes signal umodified in both directions.
         */
//         #if NOT_EXCLUDED(OP_identity_n)
        @Namespace("sd::ops") public static class identity_n extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public identity_n(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public identity_n(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public identity_n position(long position) {
                return (identity_n)super.position(position);
            }
            @Override public identity_n getPointer(long i) {
                return new identity_n((Pointer)this).position(position + i);
            }
        
                                                                                    public identity_n() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is Concatenated RELU implementation.
         * What happens inside: RELU(Concat((x, -x, {-1})))
         *
         * PLEASE NOTE: Concatenation will double amount of features available in input
         */
//         #if NOT_EXCLUDED(OP_crelu)
        @Namespace("sd::ops") public static class crelu extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public crelu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public crelu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public crelu position(long position) {
                return (crelu)super.position(position);
            }
            @Override public crelu getPointer(long i) {
                return new crelu((Pointer)this).position(position + i);
            }
        
                                                                                    public crelu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class crelu_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public crelu_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public crelu_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public crelu_bp position(long position) {
                return (crelu_bp)super.position(position);
            }
            @Override public crelu_bp getPointer(long i) {
                return new crelu_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public crelu_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is RELU6 activation function implementation
         */
//         #if NOT_EXCLUDED(OP_relu6)
        @Namespace("sd::ops") public static class relu6 extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public relu6(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public relu6(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public relu6 position(long position) {
                return (relu6)super.position(position);
            }
            @Override public relu6 getPointer(long i) {
                return new relu6((Pointer)this).position(position + i);
            }
        
                                                                                    public relu6() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class relu6_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public relu6_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public relu6_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public relu6_bp position(long position) {
                return (relu6_bp)super.position(position);
            }
            @Override public relu6_bp getPointer(long i) {
                return new relu6_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public relu6_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * Parametric Rectified Linear Unit
         * f(x) = alpha * x for x < 0, f(x) = x for x >= 0
         */
//         #if NOT_EXCLUDED(OP_prelu)
        @Namespace("sd::ops") public static class prelu extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public prelu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public prelu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public prelu position(long position) {
                return (prelu)super.position(position);
            }
            @Override public prelu getPointer(long i) {
                return new prelu((Pointer)this).position(position + i);
            }
        
                                                                                    public prelu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class prelu_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public prelu_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public prelu_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public prelu_bp position(long position) {
                return (prelu_bp)super.position(position);
            }
            @Override public prelu_bp getPointer(long i) {
                return new prelu_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public prelu_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Thresholded Rectified Linear Unit
         * f(x) = x for x > theta, f(x) = 0 otherwise
         * theta must be >= 0
         */
//         #if NOT_EXCLUDED(OP_thresholdedrelu)
        @Namespace("sd::ops") public static class thresholdedrelu extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public thresholdedrelu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public thresholdedrelu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public thresholdedrelu position(long position) {
                return (thresholdedrelu)super.position(position);
            }
            @Override public thresholdedrelu getPointer(long i) {
                return new thresholdedrelu((Pointer)this).position(position + i);
            }
        
                                                                                    public thresholdedrelu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class thresholdedrelu_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public thresholdedrelu_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public thresholdedrelu_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public thresholdedrelu_bp position(long position) {
                return (thresholdedrelu_bp)super.position(position);
            }
            @Override public thresholdedrelu_bp getPointer(long i) {
                return new thresholdedrelu_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public thresholdedrelu_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    


// #endif

// Parsed from ops/declarable/headers/boolean.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_BOOLEAN_H
// #define LIBND4J_HEADERS_BOOLEAN_H

// #include 

        /**
         * This is scalar boolean op.
         * Both operands should be scalars.
         * 
         * Returns true if x < y
         */
//         #if NOT_EXCLUDED(OP_lt_scalar)
        @Namespace("sd::ops") public static class lt_scalar extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lt_scalar(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lt_scalar(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lt_scalar position(long position) {
                return (lt_scalar)super.position(position);
            }
            @Override public lt_scalar getPointer(long i) {
                return new lt_scalar((Pointer)this).position(position + i);
            }
        
                                                    public lt_scalar() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         * This is scalar boolean op.
         * Both operands should be scalars.
         * 
         * Returns true if x > y
         */
//         #if NOT_EXCLUDED(OP_gt_scalar)
        @Namespace("sd::ops") public static class gt_scalar extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gt_scalar(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gt_scalar(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gt_scalar position(long position) {
                return (gt_scalar)super.position(position);
            }
            @Override public gt_scalar getPointer(long i) {
                return new gt_scalar((Pointer)this).position(position + i);
            }
        
                                                    public gt_scalar() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         * This is scalar boolean op.
         * Both operands should be scalars.
         * 
         * Returns true if x <= y
         */
//         #if NOT_EXCLUDED(OP_lte_scalar)
        @Namespace("sd::ops") public static class lte_scalar extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lte_scalar(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lte_scalar(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lte_scalar position(long position) {
                return (lte_scalar)super.position(position);
            }
            @Override public lte_scalar getPointer(long i) {
                return new lte_scalar((Pointer)this).position(position + i);
            }
        
                                                    public lte_scalar() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         * This is scalar boolean op.
         * Both operands should be scalars.
         * 
         * Returns true if x >= y
         */
//         #if NOT_EXCLUDED(OP_gte_scalar)
        @Namespace("sd::ops") public static class gte_scalar extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gte_scalar(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gte_scalar(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gte_scalar position(long position) {
                return (gte_scalar)super.position(position);
            }
            @Override public gte_scalar getPointer(long i) {
                return new gte_scalar((Pointer)this).position(position + i);
            }
        
                                                    public gte_scalar() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         * This is scalar boolean op.
         * Both operands should be scalars.
         * 
         * Returns true if both operands are equal.
         */
//         #if NOT_EXCLUDED(OP_eq_scalar)
        @Namespace("sd::ops") public static class eq_scalar extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public eq_scalar(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public eq_scalar(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public eq_scalar position(long position) {
                return (eq_scalar)super.position(position);
            }
            @Override public eq_scalar getPointer(long i) {
                return new eq_scalar((Pointer)this).position(position + i);
            }
        
                                                    public eq_scalar() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         * This is scalar boolean op.
         * Both operands should be scalars.
         * 
         * Returns true if x != y
         */
//         #if NOT_EXCLUDED(OP_neq_scalar)
        @Namespace("sd::ops") public static class neq_scalar extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public neq_scalar(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public neq_scalar(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public neq_scalar position(long position) {
                return (neq_scalar)super.position(position);
            }
            @Override public neq_scalar getPointer(long i) {
                return new neq_scalar((Pointer)this).position(position + i);
            }
        
                                                    public neq_scalar() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         * This op takes 2 n-dimensional arrays as input, and return 
         * array of the same shape, with elements, either from x or y, depending on the condition.
         */
//         #if NOT_EXCLUDED(OP_where)
        @Namespace("sd::ops") public static class Where extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Where(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Where(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Where position(long position) {
                return (Where)super.position(position);
            }
            @Override public Where getPointer(long i) {
                return new Where((Pointer)this).position(position + i);
            }
        
                                                                                    public Where() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_where_np)
        @Namespace("sd::ops") public static class where_np extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public where_np(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public where_np(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public where_np position(long position) {
                return (where_np)super.position(position);
            }
            @Override public where_np getPointer(long i) {
                return new where_np((Pointer)this).position(position + i);
            }
        
                                                                                    public where_np() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op takes 2 n-dimensional arrays as input, and return
         * array of the same shape, with elements, either from x or y, depending on the condition.
         */
//         #if NOT_EXCLUDED(OP_select)
        @Namespace("sd::ops") public static class select extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public select(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public select(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public select position(long position) {
                return (select)super.position(position);
            }
            @Override public select getPointer(long i) {
                return new select((Pointer)this).position(position + i);
            }
        
                                                                                    public select() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op takes either 1 argument and 1 scalar
         * or 1 argument and another comparison array
         * and runs a pre defined conditional op.
         *
         *  The output of the op is dynamic in size and returns a flat vector of elements
         *  that return true on the given condition.
         *  In numpy parlance, most people might understand:
         *  a[a > 2]
         *  where a is a numpy array and the condition is true when an element is
         *  > 2. Libnd4j already implements a number of pre defined conditions.
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_choose)
        @Namespace("sd::ops") public static class choose extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public choose(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public choose(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public choose position(long position) {
                return (choose)super.position(position);
            }
            @Override public choose getPointer(long i) {
                return new choose((Pointer)this).position(position + i);
            }
        
                                                                                    public choose() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
        * This op takes 1 n-dimensional array as input, and returns true if for every adjacent pair we have x[i] <= x[i+1].
         */
//         #if NOT_EXCLUDED(OP_is_non_decreasing)
        @Namespace("sd::ops") public static class is_non_decreasing extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public is_non_decreasing(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public is_non_decreasing(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public is_non_decreasing position(long position) {
                return (is_non_decreasing)super.position(position);
            }
            @Override public is_non_decreasing getPointer(long i) {
                return new is_non_decreasing((Pointer)this).position(position + i);
            }
        
                                                    public is_non_decreasing() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         * This op takes 1 n-dimensional array as input, and returns true if for every adjacent pair we have x[i] < x[i+1].
         */
//         #if NOT_EXCLUDED(OP_is_strictly_increasing)
        @Namespace("sd::ops") public static class is_strictly_increasing extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public is_strictly_increasing(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public is_strictly_increasing(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public is_strictly_increasing position(long position) {
                return (is_strictly_increasing)super.position(position);
            }
            @Override public is_strictly_increasing getPointer(long i) {
                return new is_strictly_increasing((Pointer)this).position(position + i);
            }
        
                                                    public is_strictly_increasing() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         * This op takes 1 n-dimensional array as input, and returns true if input is a numeric array.
         */
//         #if NOT_EXCLUDED(OP_is_numeric_tensor)
        @Namespace("sd::ops") public static class is_numeric_tensor extends BooleanOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public is_numeric_tensor(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public is_numeric_tensor(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public is_numeric_tensor position(long position) {
                return (is_numeric_tensor)super.position(position);
            }
            @Override public is_numeric_tensor getPointer(long i) {
                return new is_numeric_tensor((Pointer)this).position(position + i);
            }
        
                                                    public is_numeric_tensor() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                }
//         #endif

        /**
         *
         */
//         #if NOT_EXCLUDED(OP_boolean_not)
        @Namespace("sd::ops") public static class boolean_not extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public boolean_not(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public boolean_not(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public boolean_not position(long position) {
                return (boolean_not)super.position(position);
            }
            @Override public boolean_not getPointer(long i) {
                return new boolean_not((Pointer)this).position(position + i);
            }
        
                                                    public boolean_not() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif
    


// #endif

// Parsed from ops/declarable/headers/broadcastable.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_BROADCASTABLE_H
// #define LIBND4J_HEADERS_BROADCASTABLE_H

// #include 
// #include 
// #include 
// #include 
        // TODO: make broadcastables separate class

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Max(X, Y)
         */
//         #if NOT_EXCLUDED(OP_maximum)
        @Namespace("sd::ops") public static class maximum extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public maximum(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public maximum(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public maximum position(long position) {
                return (maximum)super.position(position);
            }
            @Override public maximum getPointer(long i) {
                return new maximum((Pointer)this).position(position + i);
            }
        
                                                                                    public maximum() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class maximum_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public maximum_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public maximum_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public maximum_bp position(long position) {
                return (maximum_bp)super.position(position);
            }
            @Override public maximum_bp getPointer(long i) {
                return new maximum_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public maximum_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Min(X, Y)
         */
//         #if NOT_EXCLUDED(OP_minimum)
        @Namespace("sd::ops") public static class minimum extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public minimum(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public minimum(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public minimum position(long position) {
                return (minimum)super.position(position);
            }
            @Override public minimum getPointer(long i) {
                return new minimum((Pointer)this).position(position + i);
            }
        
                                                                                    public minimum() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class minimum_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public minimum_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public minimum_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public minimum_bp position(long position) {
                return (minimum_bp)super.position(position);
            }
            @Override public minimum_bp getPointer(long i) {
                return new minimum_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public minimum_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Add(X, Y)
         */
//         #if NOT_EXCLUDED(OP_add)
        @Namespace("sd::ops") public static class add extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public add(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public add(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public add position(long position) {
                return (add)super.position(position);
            }
            @Override public add getPointer(long i) {
                return new add((Pointer)this).position(position + i);
            }
        
                                                                                    public add() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class add_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public add_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public add_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public add_bp position(long position) {
                return (add_bp)super.position(position);
            }
            @Override public add_bp getPointer(long i) {
                return new add_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public add_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Subtract(X, Y)
         */
//         #if NOT_EXCLUDED(OP_subtract)
        @Namespace("sd::ops") public static class subtract extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public subtract(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public subtract(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public subtract position(long position) {
                return (subtract)super.position(position);
            }
            @Override public subtract getPointer(long i) {
                return new subtract((Pointer)this).position(position + i);
            }
        
                                                                                    public subtract() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class subtract_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public subtract_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public subtract_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public subtract_bp position(long position) {
                return (subtract_bp)super.position(position);
            }
            @Override public subtract_bp getPointer(long i) {
                return new subtract_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public subtract_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Subtract(Y, X)
         */
//         #if NOT_EXCLUDED(OP_reversesubtract)
        @Namespace("sd::ops") public static class reversesubtract extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reversesubtract(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reversesubtract(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reversesubtract position(long position) {
                return (reversesubtract)super.position(position);
            }
            @Override public reversesubtract getPointer(long i) {
                return new reversesubtract((Pointer)this).position(position + i);
            }
        
                                                                                    public reversesubtract() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class reversesubtract_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reversesubtract_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reversesubtract_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reversesubtract_bp position(long position) {
                return (reversesubtract_bp)super.position(position);
            }
            @Override public reversesubtract_bp getPointer(long i) {
                return new reversesubtract_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reversesubtract_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = ReverseMod(X, Y) == Mod(Y, X)
         */
//         #if NOT_EXCLUDED(OP_reversemod)
        @Namespace("sd::ops") public static class reversemod extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reversemod(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reversemod(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reversemod position(long position) {
                return (reversemod)super.position(position);
            }
            @Override public reversemod getPointer(long i) {
                return new reversemod((Pointer)this).position(position + i);
            }
        
                                                                                    public reversemod() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class reversemod_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reversemod_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reversemod_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reversemod_bp position(long position) {
                return (reversemod_bp)super.position(position);
            }
            @Override public reversemod_bp getPointer(long i) {
                return new reversemod_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reversemod_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Subtract(X, Y) * Subtract(X, Y)
         */
//         #if NOT_EXCLUDED(OP_squaredsubtract)
        @Namespace("sd::ops") public static class squaredsubtract extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public squaredsubtract(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public squaredsubtract(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public squaredsubtract position(long position) {
                return (squaredsubtract)super.position(position);
            }
            @Override public squaredsubtract getPointer(long i) {
                return new squaredsubtract((Pointer)this).position(position + i);
            }
        
                                                                                    public squaredsubtract() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
                                                                                @Namespace("sd::ops") public static class squaredsubtract_bp extends DeclarableCustomOp {
                                                                                    static { Loader.load(); }
                                                                                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                                                                                    public squaredsubtract_bp(Pointer p) { super(p); }
                                                                                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                                                                                    public squaredsubtract_bp(long size) { super((Pointer)null); allocateArray(size); }
                                                                                    private native void allocateArray(long size);
                                                                                    @Override public squaredsubtract_bp position(long position) {
                                                                                        return (squaredsubtract_bp)super.position(position);
                                                                                    }
                                                                                    @Override public squaredsubtract_bp getPointer(long i) {
                                                                                        return new squaredsubtract_bp((Pointer)this).position(position + i);
                                                                                    }
                                                                                
                                                                                    public squaredsubtract_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Multiply(X, Y)
         */
//         #if NOT_EXCLUDED(OP_multiply)
        @Namespace("sd::ops") public static class multiply extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public multiply(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public multiply(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public multiply position(long position) {
                return (multiply)super.position(position);
            }
            @Override public multiply getPointer(long i) {
                return new multiply((Pointer)this).position(position + i);
            }
        
                                                                                    public multiply() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class multiply_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public multiply_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public multiply_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public multiply_bp position(long position) {
                return (multiply_bp)super.position(position);
            }
            @Override public multiply_bp getPointer(long i) {
                return new multiply_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public multiply_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Divide(X, Y)
         */
//         #if NOT_EXCLUDED(OP_divide)
        @Namespace("sd::ops") public static class divide extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public divide(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public divide(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public divide position(long position) {
                return (divide)super.position(position);
            }
            @Override public divide getPointer(long i) {
                return new divide((Pointer)this).position(position + i);
            }
        
                                                                                    public divide() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class divide_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public divide_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public divide_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public divide_bp position(long position) {
                return (divide_bp)super.position(position);
            }
            @Override public divide_bp getPointer(long i) {
                return new divide_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public divide_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Divide(X, Y) with exception, 0 if Y = 0
         */
//         #if NOT_EXCLUDED(OP_divide_no_nan)
        @Namespace("sd::ops") public static class divide_no_nan extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public divide_no_nan(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public divide_no_nan(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public divide_no_nan position(long position) {
                return (divide_no_nan)super.position(position);
            }
            @Override public divide_no_nan getPointer(long i) {
                return new divide_no_nan((Pointer)this).position(position + i);
            }
        
                                                                                    public divide_no_nan() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif
        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Divide(Y, x)
         */
//         #if NOT_EXCLUDED(OP_reversedivide)
        @Namespace("sd::ops") public static class reversedivide extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reversedivide(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reversedivide(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reversedivide position(long position) {
                return (reversedivide)super.position(position);
            }
            @Override public reversedivide getPointer(long i) {
                return new reversedivide((Pointer)this).position(position + i);
            }
        
                                                                                    public reversedivide() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class reversedivide_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reversedivide_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reversedivide_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reversedivide_bp position(long position) {
                return (reversedivide_bp)super.position(position);
            }
            @Override public reversedivide_bp getPointer(long i) {
                return new reversedivide_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reversedivide_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = FloorMod(X, Y)
         */
//         #if NOT_EXCLUDED(OP_floormod)
        @Namespace("sd::ops") public static class floormod extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public floormod(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public floormod(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public floormod position(long position) {
                return (floormod)super.position(position);
            }
            @Override public floormod getPointer(long i) {
                return new floormod((Pointer)this).position(position + i);
            }
        
                                                                                    public floormod() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class floormod_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public floormod_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public floormod_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public floormod_bp position(long position) {
                return (floormod_bp)super.position(position);
            }
            @Override public floormod_bp getPointer(long i) {
                return new floormod_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public floormod_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_mod)
        @Namespace("sd::ops") public static class mod extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mod(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mod(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mod position(long position) {
                return (mod)super.position(position);
            }
            @Override public mod getPointer(long i) {
                return new mod((Pointer)this).position(position + i);
            }
        
                                                                                    public mod() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class mod_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mod_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mod_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mod_bp position(long position) {
                return (mod_bp)super.position(position);
            }
            @Override public mod_bp getPointer(long i) {
                return new mod_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public mod_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = FloorDiv(X, Y)
         */
//         #if NOT_EXCLUDED(OP_floordiv)
        @Namespace("sd::ops") public static class floordiv extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public floordiv(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public floordiv(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public floordiv position(long position) {
                return (floordiv)super.position(position);
            }
            @Override public floordiv getPointer(long i) {
                return new floordiv((Pointer)this).position(position + i);
            }
        
                                                                                    public floordiv() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
                                                                                @Namespace("sd::ops") public static class floordiv_bp extends DeclarableCustomOp {
                                                                                    static { Loader.load(); }
                                                                                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                                                                                    public floordiv_bp(Pointer p) { super(p); }
                                                                                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                                                                                    public floordiv_bp(long size) { super((Pointer)null); allocateArray(size); }
                                                                                    private native void allocateArray(long size);
                                                                                    @Override public floordiv_bp position(long position) {
                                                                                        return (floordiv_bp)super.position(position);
                                                                                    }
                                                                                    @Override public floordiv_bp getPointer(long i) {
                                                                                        return new floordiv_bp((Pointer)this).position(position + i);
                                                                                    }
                                                                                
                                                                                    public floordiv_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                                                                                //         #endif

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Divide(X, Y)
         */
//         #if NOT_EXCLUDED(OP_realdiv)
        @Namespace("sd::ops") public static class realdiv extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public realdiv(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public realdiv(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public realdiv position(long position) {
                return (realdiv)super.position(position);
            }
            @Override public realdiv getPointer(long i) {
                return new realdiv((Pointer)this).position(position + i);
            }
        
                                                                                    public realdiv() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class realdiv_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public realdiv_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public realdiv_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public realdiv_bp position(long position) {
                return (realdiv_bp)super.position(position);
            }
            @Override public realdiv_bp getPointer(long i) {
                return new realdiv_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public realdiv_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         *
         *
         * \tparam T
         */
        @Namespace("sd::ops") public static class truncatediv extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public truncatediv(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public truncatediv(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public truncatediv position(long position) {
                return (truncatediv)super.position(position);
            }
            @Override public truncatediv getPointer(long i) {
                return new truncatediv((Pointer)this).position(position + i);
            }
        
                                                                                    public truncatediv() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }

        /**
         * This is one of auto-broadcastable operations. It accepts 2 operands, and operation is applied based on their shapes:
         * 1) if shapes are equal that's pairwise operation, result will have the same shape.
         * 2) if shape X is scalar and shape Y is array - result will have shape equal to Y.
         * 3) if shape X is array and shape Y is scalar - result will have shape equal to X.
         * 4) if shape X and Y are both arrays, but shapes aren't equal - result shape will be broadcast result.
         * 
         * This operation returns Z = Assign(X, Y)
         */
//         #if NOT_EXCLUDED(OP_assign)
        @Namespace("sd::ops") public static class assign extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public assign(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public assign(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public assign position(long position) {
                return (assign)super.position(position);
            }
            @Override public assign getPointer(long i) {
                return new assign((Pointer)this).position(position + i);
            }
        
                                                                                    public assign() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class assign_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public assign_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public assign_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public assign_bp position(long position) {
                return (assign_bp)super.position(position);
            }
            @Override public assign_bp getPointer(long i) {
                return new assign_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public assign_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_meshgrid)
        @Namespace("sd::ops") public static class meshgrid extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public meshgrid(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public meshgrid(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public meshgrid position(long position) {
                return (meshgrid)super.position(position);
            }
            @Override public meshgrid getPointer(long i) {
                return new meshgrid((Pointer)this).position(position + i);
            }
        
                                                                                    public meshgrid() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

         /**
         * This op takes 2 equally shaped arrays as input, and provides binary matrix as output.
         * Math is: _x == _y ? (T) 1.0f : (T) 0.0f;
         *
         */
//         #if NOT_EXCLUDED(OP_equals)
        @Namespace("sd::ops") public static class equals extends BroadcastableBoolOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public equals(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public equals(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public equals position(long position) {
                return (equals)super.position(position);
            }
            @Override public equals getPointer(long i) {
                return new equals((Pointer)this).position(position + i);
            }
        
                                                                                    public equals() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This op takes 2 equally shaped arrays as input, and provides binary matrix as output.
         * Math is: _x != _y ? (T) 1.0f : (T) 0.0f;
         */
//         #if NOT_EXCLUDED(OP_not_equals)
        @Namespace("sd::ops") public static class not_equals extends BroadcastableBoolOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public not_equals(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public not_equals(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public not_equals position(long position) {
                return (not_equals)super.position(position);
            }
            @Override public not_equals getPointer(long i) {
                return new not_equals((Pointer)this).position(position + i);
            }
        
                                                                                    public not_equals() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This op takes 2 equally shaped arrays as input, and provides binary matrix as output.
         * Math is: _x <= _y ? (T) 1.0f : (T) 0.0f;
         */
//         #if NOT_EXCLUDED(OP_less_equal)
        @Namespace("sd::ops") public static class less_equal extends BroadcastableBoolOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public less_equal(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public less_equal(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public less_equal position(long position) {
                return (less_equal)super.position(position);
            }
            @Override public less_equal getPointer(long i) {
                return new less_equal((Pointer)this).position(position + i);
            }
        
                                                                                    public less_equal() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This op takes 2 equally shaped arrays as input, and provides binary matrix as output.
         * Math is: _x >= _y ? (T) 1.0f : (T) 0.0f;
         */
//         #if NOT_EXCLUDED(OP_greater_equal)
        @Namespace("sd::ops") public static class greater_equal extends BroadcastableBoolOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public greater_equal(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public greater_equal(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public greater_equal position(long position) {
                return (greater_equal)super.position(position);
            }
            @Override public greater_equal getPointer(long i) {
                return new greater_equal((Pointer)this).position(position + i);
            }
        
                                                                                    public greater_equal() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This op takes 2 equally shaped arrays as input, and provides binary matrix as output.
         * Math is: _x < _y ? (T) 1.0f : (T) 0.0f;
         */
//         #if NOT_EXCLUDED(OP_less)
        @Namespace("sd::ops") public static class less extends BroadcastableBoolOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public less(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public less(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public less position(long position) {
                return (less)super.position(position);
            }
            @Override public less getPointer(long i) {
                return new less((Pointer)this).position(position + i);
            }
        
                                                                                    public less() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This op takes 2 equally shaped arrays as input, and provides binary matrix as output.
         * Math is: _x > _y ? (T) 1.0f : (T) 0.0f;
         */
//         #if NOT_EXCLUDED(OP_greater)
        @Namespace("sd::ops") public static class greater extends BroadcastableBoolOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public greater(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public greater(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public greater position(long position) {
                return (greater)super.position(position);
            }
            @Override public greater getPointer(long i) {
                return new greater((Pointer)this).position(position + i);
            }
        
                                                                                    public greater() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         *
         */
//         #if NOT_EXCLUDED(OP_boolean_and)
        @Namespace("sd::ops") public static class boolean_and extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public boolean_and(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public boolean_and(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public boolean_and position(long position) {
                return (boolean_and)super.position(position);
            }
            @Override public boolean_and getPointer(long i) {
                return new boolean_and((Pointer)this).position(position + i);
            }
        
                                                                                    public boolean_and() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         *
         */
//         #if NOT_EXCLUDED(OP_boolean_or)
        @Namespace("sd::ops") public static class boolean_or extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public boolean_or(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public boolean_or(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public boolean_or position(long position) {
                return (boolean_or)super.position(position);
            }
            @Override public boolean_or getPointer(long i) {
                return new boolean_or((Pointer)this).position(position + i);
            }
        
                                                                                    public boolean_or() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         *
         */
//         #if NOT_EXCLUDED(OP_boolean_xor)
        @Namespace("sd::ops") public static class boolean_xor extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public boolean_xor(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public boolean_xor(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public boolean_xor position(long position) {
                return (boolean_xor)super.position(position);
            }
            @Override public boolean_xor getPointer(long i) {
                return new boolean_xor((Pointer)this).position(position + i);
            }
        
                                                                                    public boolean_xor() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This operation performs calculation of percentile of input array along given axises
         *
         * Input - tensor with rank N > 0
         * Output - tensor with rank (N - length(axis)) or scalar if number of Integer arguments is zero
         * Float arguments:
         *   0: percentile (scalar) in range [0,100] (inclusively)
         *   1: interpolation (optional), possible values are 0-"lower", 1-"higher", 2-"nearest"(default)
         *   2: keepDims (optional), if it is non zero, then unities are kept in reduced resulting shape of output array, default is 0
         * Integer arguments - axis - the sequence of axises to calculate percentile along, if sequence is empty then calculate percentile for whole input tensor and return result as scalar
         * 
         */
//         #if NOT_EXCLUDED(OP_percentile)
        @Namespace("sd::ops") public static class percentile extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public percentile(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public percentile(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public percentile position(long position) {
                return (percentile)super.position(position);
            }
            @Override public percentile getPointer(long i) {
                return new percentile((Pointer)this).position(position + i);
            }
        
                                                                                    public percentile() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * Special atan2 op impl for TF's args order
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_tf_atan2)
        @Namespace("sd::ops") public static class tf_atan2 extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tf_atan2(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tf_atan2(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tf_atan2 position(long position) {
                return (tf_atan2)super.position(position);
            }
            @Override public tf_atan2 getPointer(long i) {
                return new tf_atan2((Pointer)this).position(position + i);
            }
        
                                                                                    public tf_atan2() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * Broadcastable pow implementation
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_Pow)
        @Namespace("sd::ops") public static class Pow extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Pow(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Pow(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Pow position(long position) {
                return (Pow)super.position(position);
            }
            @Override public Pow getPointer(long i) {
                return new Pow((Pointer)this).position(position + i);
            }
        
                                                                                    public Pow() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
        @Namespace("sd::ops") public static class Pow_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Pow_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Pow_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Pow_bp position(long position) {
                return (Pow_bp)super.position(position);
            }
            @Override public Pow_bp getPointer(long i) {
                return new Pow_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public Pow_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Broadcastable igamma implementation
         *
         * igamma(a, x) = gamma(а, x) / Gamma(a) - Gamma distribution function P(a,x)
         * Gamma(a) = int from 0 to infinity { t ^ {a - 1} e^{-t}dt }
         * gamma(a, x) = int from 0 to x { t ^ {a - 1} e^{-t}dt }
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_igamma)
                @Namespace("sd::ops") public static class igamma extends BroadcastableOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public igamma(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public igamma(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public igamma position(long position) {
                        return (igamma)super.position(position);
                    }
                    @Override public igamma getPointer(long i) {
                        return new igamma((Pointer)this).position(position + i);
                    }
                
                                                                                    public igamma() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif
        /**
         * Broadcastable igammac implementation
         * igammac(a, x) = Gamma(a,x)/Gamma(а) - Gamma distribution function Q(a,x)
         * Gamma(a) = int from 0 to infinity { t ^ {a - 1} e^{-t}dt }
         * Gamma(a, x) = int from x to infinity { t ^ {a - 1} e^{-t}dt }
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_igammac)
                @Namespace("sd::ops") public static class igammac extends BroadcastableOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public igammac(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public igammac(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public igammac position(long position) {
                        return (igammac)super.position(position);
                    }
                    @Override public igammac getPointer(long i) {
                        return new igammac((Pointer)this).position(position + i);
                    }
                
                                                                                    public igammac() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif
    


// #endif

// Parsed from ops/declarable/headers/convo.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//
//

// #ifndef LIBND4J_HEADERS_CONVOL_H
// #define LIBND4J_HEADERS_CONVOL_H

// #include 

        /**
         * 1D temporal convolution implementation
         * Expected input:
         * x: 3D array
         * weight: 3D Array
         * bias: optional vector
         *
         * Int args:
         * 0: kernel
         * 1: stride
         * 2: padding
         */
//         #if NOT_EXCLUDED(OP_conv1d)
        @Namespace("sd::ops") public static class conv1d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public conv1d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public conv1d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public conv1d position(long position) {
                return (conv1d)super.position(position);
            }
            @Override public conv1d getPointer(long i) {
                return new conv1d((Pointer)this).position(position + i);
            }
        
                                                                                    public conv1d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class conv1d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public conv1d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public conv1d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public conv1d_bp position(long position) {
                return (conv1d_bp)super.position(position);
            }
            @Override public conv1d_bp getPointer(long i) {
                return new conv1d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public conv1d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * 2D convolution implementation
         * Expected input:
         * x: 4D array
         * weight: 4D Array
         * bias: optional vector, length of outputChannels
         *
         * IntArgs:
         * 0: kernel height
         * 1: kernel width
         * 2: stride height
         * 3: stride width
         * 4: padding height
         * 5: padding width
         * 6: dilation height
         * 7: dilation width
         * 8: same mode:   1 true, 0 false
         * 9: data format: 1 NHWC, 0 NCHW
         */
//         #if NOT_EXCLUDED(OP_conv2d)
        @Namespace("sd::ops") public static class conv2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public conv2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public conv2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public conv2d position(long position) {
                return (conv2d)super.position(position);
            }
            @Override public conv2d getPointer(long i) {
                return new conv2d((Pointer)this).position(position + i);
            }
        
                                                                                    public conv2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class conv2d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public conv2d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public conv2d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public conv2d_bp position(long position) {
                return (conv2d_bp)super.position(position);
            }
            @Override public conv2d_bp getPointer(long i) {
                return new conv2d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public conv2d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class conv2d_input_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public conv2d_input_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public conv2d_input_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public conv2d_input_bp position(long position) {
                return (conv2d_input_bp)super.position(position);
            }
            @Override public conv2d_input_bp getPointer(long i) {
                return new conv2d_input_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public conv2d_input_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Depthwise convolution2d op:
         * Expected inputs:
         * x: 4D array, NCHW format
         * weightsDepth: 4D array,
         * weightsPointwise: optional, 4D array
         * bias: optional, vector
         */
//         #if NOT_EXCLUDED(OP_sconv2d)
        @Namespace("sd::ops") public static class sconv2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sconv2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sconv2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sconv2d position(long position) {
                return (sconv2d)super.position(position);
            }
            @Override public sconv2d getPointer(long i) {
                return new sconv2d((Pointer)this).position(position + i);
            }
        
                                                                                    public sconv2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class sconv2d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sconv2d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sconv2d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sconv2d_bp position(long position) {
                return (sconv2d_bp)super.position(position);
            }
            @Override public sconv2d_bp getPointer(long i) {
                return new sconv2d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public sconv2d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * 2D deconvolution implementation
         *
         * IntArgs:
         * 0: kernel height
         * 1: kernel width
         * 2: stride height
         * 3: stride width
         * 4: padding height
         * 5: padding width
         * 6: dilation height
         * 7: dilation width
         * 8: same mode: 0 false, 1 true
         */
//         #if NOT_EXCLUDED(OP_deconv2d)
        @Namespace("sd::ops") public static class deconv2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public deconv2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public deconv2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public deconv2d position(long position) {
                return (deconv2d)super.position(position);
            }
            @Override public deconv2d getPointer(long i) {
                return new deconv2d((Pointer)this).position(position + i);
            }
        
                                                                                    public deconv2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class deconv2d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public deconv2d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public deconv2d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public deconv2d_bp position(long position) {
                return (deconv2d_bp)super.position(position);
            }
            @Override public deconv2d_bp getPointer(long i) {
                return new deconv2d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public deconv2d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * 3D deconvolution implementation
         *
         * IntArgs:
         * 0:  filter(kernel) depth
         * 1:  filter(kernel) height
         * 2:  filter(kernel) width
         * 3:  strides depth
         * 4:  strides height
         * 5:  strides width
         * 6:  paddings depth
         * 7:  paddings height
         * 8:  paddings width
         * 9:  dilations depth
         * 10: dilations height
         * 11: dilations width
         * 12: same mode: 0 false, 1 true
         * 13: data format (optional): 0-NDHWC, 1-NCDHW, default is 1
         */

//         #if NOT_EXCLUDED(OP_deconv3d)
        @Namespace("sd::ops") public static class deconv3d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public deconv3d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public deconv3d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public deconv3d position(long position) {
                return (deconv3d)super.position(position);
            }
            @Override public deconv3d getPointer(long i) {
                return new deconv3d((Pointer)this).position(position + i);
            }
        
                                                                                    public deconv3d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class deconv3d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public deconv3d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public deconv3d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public deconv3d_bp position(long position) {
                return (deconv3d_bp)super.position(position);
            }
            @Override public deconv3d_bp getPointer(long i) {
                return new deconv3d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public deconv3d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * This op implements max pooling for convolution networks.
         * Expected Input: 4D array, NCHW format.
         *
         * IntArgs:
         * 0: kernel height
         * 1: kernel width
         * 2: stride height
         * 3: stride width
         * 4: padding height
         * 5: padding width
         * 6: dilation height
         * 7: dilation width
         * 8: same mode: 0 false, 1 true
         */
//         #if NOT_EXCLUDED(OP_maxpool2d)
        @Namespace("sd::ops") public static class maxpool2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public maxpool2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public maxpool2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public maxpool2d position(long position) {
                return (maxpool2d)super.position(position);
            }
            @Override public maxpool2d getPointer(long i) {
                return new maxpool2d((Pointer)this).position(position + i);
            }
        
                                                                                    public maxpool2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class maxpool2d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public maxpool2d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public maxpool2d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public maxpool2d_bp position(long position) {
                return (maxpool2d_bp)super.position(position);
            }
            @Override public maxpool2d_bp getPointer(long i) {
                return new maxpool2d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public maxpool2d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op implements average pooling for convolution networks.
         * Expected Input: 4D array, NCHW format.
         *
         * IntArgs:
         * 0: kernel height
         * 1: kernel width
         * 2: stride height
         * 3: stride width
         * 4: padding height
         * 5: padding width
         * 6: dilation height
         * 7: dilation width
         * 8: same mode: 0 false, 1 true
         */
//         #if NOT_EXCLUDED(OP_avgpool2d)
        @Namespace("sd::ops") public static class avgpool2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public avgpool2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public avgpool2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public avgpool2d position(long position) {
                return (avgpool2d)super.position(position);
            }
            @Override public avgpool2d getPointer(long i) {
                return new avgpool2d((Pointer)this).position(position + i);
            }
        
                                                                                    public avgpool2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class avgpool2d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public avgpool2d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public avgpool2d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public avgpool2d_bp position(long position) {
                return (avgpool2d_bp)super.position(position);
            }
            @Override public avgpool2d_bp getPointer(long i) {
                return new avgpool2d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public avgpool2d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op implements pnorm pooling for convolution networks.
         * Expected Input: 4D array, NCHW format.
         *
         * IntArgs:
         * 0: kernel height
         * 1: kernel width
         * 2: stride height
         * 3: stride width
         * 4: padding height
         * 5: padding width
         * 6: dilation height
         * 7: dilation width
         * 8: same mode: 0 false, 1 true
         * 9: p for p-norm
         */
//         #if NOT_EXCLUDED(OP_pnormpool2d)
        @Namespace("sd::ops") public static class pnormpool2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public pnormpool2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public pnormpool2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public pnormpool2d position(long position) {
                return (pnormpool2d)super.position(position);
            }
            @Override public pnormpool2d getPointer(long i) {
                return new pnormpool2d((Pointer)this).position(position + i);
            }
        
                                                                                    public pnormpool2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class pnormpool2d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public pnormpool2d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public pnormpool2d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public pnormpool2d_bp position(long position) {
                return (pnormpool2d_bp)super.position(position);
            }
            @Override public pnormpool2d_bp getPointer(long i) {
                return new pnormpool2d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public pnormpool2d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op implements im2col algorithm, widely used in convolution neural networks
         * Input: 4D input expected
         *
         * Int args:
         * 0: kernel height
         * 1: kernel width
         * 2: stride height
         * 3: stride width
         * 4: padding height
         * 5: padding width
         * 6: dilation height
         * 7: dilation width
         * 8: isSameMode
         */
//         #if NOT_EXCLUDED(OP_im2col)
        @Namespace("sd::ops") public static class im2col extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public im2col(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public im2col(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public im2col position(long position) {
                return (im2col)super.position(position);
            }
            @Override public im2col getPointer(long i) {
                return new im2col((Pointer)this).position(position + i);
            }
        
                                                                                    public im2col() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
		@Namespace("sd::ops") public static class im2col_bp extends DeclarableCustomOp {
		    static { Loader.load(); }
		    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
		    public im2col_bp(Pointer p) { super(p); }
		    /** Native array allocator. Access with {@link Pointer#position(long)}. */
		    public im2col_bp(long size) { super((Pointer)null); allocateArray(size); }
		    private native void allocateArray(long size);
		    @Override public im2col_bp position(long position) {
		        return (im2col_bp)super.position(position);
		    }
		    @Override public im2col_bp getPointer(long i) {
		        return new im2col_bp((Pointer)this).position(position + i);
		    }
		
                                                                                    public im2col_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op implements col2im algorithm, widely used in convolution neural networks
         * Input: 6D input expected (like output of im2col op)
         *
         * Int args:
         * 0: stride height
         * 1: stride width
         * 2: padding height
         * 3: padding width
         * 4: image height
         * 5: image width
         * 6: dilation height
         * 7: dilation width
         */
//         #if NOT_EXCLUDED(OP_col2im)
        @Namespace("sd::ops") public static class col2im extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public col2im(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public col2im(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public col2im position(long position) {
                return (col2im)super.position(position);
            }
            @Override public col2im getPointer(long i) {
                return new col2im((Pointer)this).position(position + i);
            }
        
                                                                                    public col2im() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Expected input: 4D array
         *
         * IntArgs:
         * 0: scale factor for rows (height)
         * 1: scale factor for columns (width)
         * 2: data format: 0 NHWC (default), 1 NCHW
         */
//         #if NOT_EXCLUDED(OP_upsampling2d)
        @Namespace("sd::ops") public static class upsampling2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public upsampling2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public upsampling2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public upsampling2d position(long position) {
                return (upsampling2d)super.position(position);
            }
            @Override public upsampling2d getPointer(long i) {
                return new upsampling2d((Pointer)this).position(position + i);
            }
        
                                                                                    public upsampling2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class upsampling2d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public upsampling2d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public upsampling2d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public upsampling2d_bp position(long position) {
                return (upsampling2d_bp)super.position(position);
            }
            @Override public upsampling2d_bp getPointer(long i) {
                return new upsampling2d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public upsampling2d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Expected input: 4D array
         *
         * IntArgs:
         * 0: scale factor for depth
         * 1: scale factor for rows (height)
         * 2: scale factor for columns (width)
         * 3: data format: 0 NDHWC (default), 1 NCDHW
         */
//         #if NOT_EXCLUDED(OP_upsampling3d)
        @Namespace("sd::ops") public static class upsampling3d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public upsampling3d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public upsampling3d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public upsampling3d position(long position) {
                return (upsampling3d)super.position(position);
            }
            @Override public upsampling3d getPointer(long i) {
                return new upsampling3d((Pointer)this).position(position + i);
            }
        
                                                                                    public upsampling3d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class upsampling3d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public upsampling3d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public upsampling3d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public upsampling3d_bp position(long position) {
                return (upsampling3d_bp)super.position(position);
            }
            @Override public upsampling3d_bp getPointer(long i) {
                return new upsampling3d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public upsampling3d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op produces binary matrix wrt to target dimension.
         * Maximum value within each TAD is replaced with 1, other values are set to true.
         *
         * Int args:
         * 0: axis
         */
//         #if NOT_EXCLUDED(OP_ismax)
        @Namespace("sd::ops") public static class ismax extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public ismax(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public ismax(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public ismax position(long position) {
                return (ismax)super.position(position);
            }
            @Override public ismax getPointer(long i) {
                return new ismax((Pointer)this).position(position + i);
            }
        
                                                                                    public ismax() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Dilation2D op
         *
         * Int args:
         * 0: isSameMode
         */
//         #if NOT_EXCLUDED(OP_dilation2d)
        @Namespace("sd::ops") public static class dilation2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public dilation2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public dilation2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public dilation2d position(long position) {
                return (dilation2d)super.position(position);
            }
            @Override public dilation2d getPointer(long i) {
                return new dilation2d((Pointer)this).position(position + i);
            }
        
                                                                                    public dilation2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_conv3dnew)
        @Namespace("sd::ops") public static class conv3dnew extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public conv3dnew(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public conv3dnew(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public conv3dnew position(long position) {
                return (conv3dnew)super.position(position);
            }
            @Override public conv3dnew getPointer(long i) {
                return new conv3dnew((Pointer)this).position(position + i);
            }
        
                                                                                    public conv3dnew() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class conv3dnew_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public conv3dnew_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public conv3dnew_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public conv3dnew_bp position(long position) {
                return (conv3dnew_bp)super.position(position);
            }
            @Override public conv3dnew_bp getPointer(long i) {
                return new conv3dnew_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public conv3dnew_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_avgpool3dnew)
        @Namespace("sd::ops") public static class avgpool3dnew extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public avgpool3dnew(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public avgpool3dnew(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public avgpool3dnew position(long position) {
                return (avgpool3dnew)super.position(position);
            }
            @Override public avgpool3dnew getPointer(long i) {
                return new avgpool3dnew((Pointer)this).position(position + i);
            }
        
                                                                                    public avgpool3dnew() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class avgpool3dnew_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public avgpool3dnew_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public avgpool3dnew_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public avgpool3dnew_bp position(long position) {
                return (avgpool3dnew_bp)super.position(position);
            }
            @Override public avgpool3dnew_bp getPointer(long i) {
                return new avgpool3dnew_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public avgpool3dnew_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_maxpool3dnew)
        @Namespace("sd::ops") public static class maxpool3dnew extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public maxpool3dnew(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public maxpool3dnew(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public maxpool3dnew position(long position) {
                return (maxpool3dnew)super.position(position);
            }
            @Override public maxpool3dnew getPointer(long i) {
                return new maxpool3dnew((Pointer)this).position(position + i);
            }
        
                                                                                    public maxpool3dnew() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class maxpool3dnew_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public maxpool3dnew_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public maxpool3dnew_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public maxpool3dnew_bp position(long position) {
                return (maxpool3dnew_bp)super.position(position);
            }
            @Override public maxpool3dnew_bp getPointer(long i) {
                return new maxpool3dnew_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public maxpool3dnew_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op same as maxpool2d with a variant to return a matrix of indexes for max values
         *
         * Input - 4D tensor
         * Output:
         *     0 - 4D tensor as input
         *     1 - 4D tensor with max value indexes
         *
         * Int params:
         *   9 int with 2x4 vectors and 1 bool value
         */
//         #if NOT_EXCLUDED(OP_max_pool_woth_argmax)
        @Namespace("sd::ops") public static class max_pool_with_argmax extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public max_pool_with_argmax(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public max_pool_with_argmax(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public max_pool_with_argmax position(long position) {
                return (max_pool_with_argmax)super.position(position);
            }
            @Override public max_pool_with_argmax getPointer(long i) {
                return new max_pool_with_argmax((Pointer)this).position(position + i);
            }
        
                                                                                    public max_pool_with_argmax() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


//         #if NOT_EXCLUDED(OP_depthwise_conv2d)
        @Namespace("sd::ops") public static class depthwise_conv2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public depthwise_conv2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public depthwise_conv2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public depthwise_conv2d position(long position) {
                return (depthwise_conv2d)super.position(position);
            }
            @Override public depthwise_conv2d getPointer(long i) {
                return new depthwise_conv2d((Pointer)this).position(position + i);
            }
        
                                                                                    public depthwise_conv2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class depthwise_conv2d_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public depthwise_conv2d_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public depthwise_conv2d_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public depthwise_conv2d_bp position(long position) {
                return (depthwise_conv2d_bp)super.position(position);
            }
            @Override public depthwise_conv2d_bp getPointer(long i) {
                return new depthwise_conv2d_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public depthwise_conv2d_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * point-wise 2D convolution
         * Expected input:
         * x: 4D array
         * weight: 4D Array [1,  1,  iC, oC] (NHWC) or [oC, iC,  1,  1] (NCHW)
         * bias: optional vector, length of oC
         *
         * IntArgs:
         * 0: data format: 1 NHWC, 0 NCHW (optional, by default = NHWC)
         */
        @Namespace("sd::ops") public static class pointwise_conv2d extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public pointwise_conv2d(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public pointwise_conv2d(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public pointwise_conv2d position(long position) {
                return (pointwise_conv2d)super.position(position);
            }
            @Override public pointwise_conv2d getPointer(long i) {
                return new pointwise_conv2d((Pointer)this).position(position + i);
            }
        
                                                                                    public pointwise_conv2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

        @Namespace("sd::ops") public static class deconv2d_tf extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public deconv2d_tf(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public deconv2d_tf(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public deconv2d_tf position(long position) {
                return (deconv2d_tf)super.position(position);
            }
            @Override public deconv2d_tf getPointer(long i) {
                return new deconv2d_tf((Pointer)this).position(position + i);
            }
        
                                                                                    public deconv2d_tf() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

    



// #endif

// Parsed from ops/declarable/headers/list.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_LIST_H
// #define LIBND4J_HEADERS_LIST_H

// #include 
        // list operations, basically all around NDArrayList

        /**
         * This operations puts given NDArray into (optionally) given NDArrayList. 
         * If no NDArrayList was provided - new one will be created
         */
//         #if NOT_EXCLUDED(OP_write_list)
        @Namespace("sd::ops") public static class write_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public write_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public write_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public write_list position(long position) {
                return (write_list)super.position(position);
            }
            @Override public write_list getPointer(long i) {
                return new write_list((Pointer)this).position(position + i);
            }
        
                                                                public write_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operation concatenates given NDArrayList, and returns NDArray as result
         */
//         #if NOT_EXCLUDED(OP_stack_list)
        @Namespace("sd::ops") public static class stack_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public stack_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public stack_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public stack_list position(long position) {
                return (stack_list)super.position(position);
            }
            @Override public stack_list getPointer(long i) {
                return new stack_list((Pointer)this).position(position + i);
            }
        
                                                                public stack_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operations selects specified index fron NDArrayList and returns it as NDArray
         * Expected arguments:
         * x: non-empty list
         * indices: optional, scalar with index
         * 
         * Int args:
         * optional, index
         */
//         #if NOT_EXCLUDED(OP_read_list)
        @Namespace("sd::ops") public static class read_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public read_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public read_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public read_list position(long position) {
                return (read_list)super.position(position);
            }
            @Override public read_list getPointer(long i) {
                return new read_list((Pointer)this).position(position + i);
            }
        
                                                                public read_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operations selects specified indices fron NDArrayList and returns them as NDArray
         * Expected arguments:
         * x: non-empty list
         * indices: optional, vector with indices
         * 
         * Int args:
         * optional, indices
         */
//         #if NOT_EXCLUDED(OP_pick_list)
        @Namespace("sd::ops") public static class pick_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public pick_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public pick_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public pick_list position(long position) {
                return (pick_list)super.position(position);
            }
            @Override public pick_list getPointer(long i) {
                return new pick_list((Pointer)this).position(position + i);
            }
        
                                                                public pick_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operations returns scalar, with number of existing arrays within given NDArrayList
         * Expected arguments:
         * x: list
         */
//         #if NOT_EXCLUDED(OP_size_list)
        @Namespace("sd::ops") public static class size_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public size_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public size_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public size_list position(long position) {
                return (size_list)super.position(position);
            }
            @Override public size_list getPointer(long i) {
                return new size_list((Pointer)this).position(position + i);
            }
        
                                                                public size_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operation creates new empty NDArrayList
         */
//         #if NOT_EXCLUDED(OP_create_list)
        @Namespace("sd::ops") public static class create_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public create_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public create_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public create_list position(long position) {
                return (create_list)super.position(position);
            }
            @Override public create_list getPointer(long i) {
                return new create_list((Pointer)this).position(position + i);
            }
        
                                                                public create_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operation unpacks given NDArray into specified NDArrayList wrt specified indices
         */
//         #if NOT_EXCLUDED(OP_scatter_list)
        @Namespace("sd::ops") public static class scatter_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_list position(long position) {
                return (scatter_list)super.position(position);
            }
            @Override public scatter_list getPointer(long i) {
                return new scatter_list((Pointer)this).position(position + i);
            }
        
                                                                public scatter_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operation splits given NDArray into chunks, and stores them into given NDArrayList wert sizes
         * Expected arguments:
         * list: optional, NDArrayList. if not available - new NDArrayList will be created
         * array: array to be split
         * sizes: vector with sizes for each chunk
         */
//         #if NOT_EXCLUDED(OP_split_list)
        @Namespace("sd::ops") public static class split_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public split_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public split_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public split_list position(long position) {
                return (split_list)super.position(position);
            }
            @Override public split_list getPointer(long i) {
                return new split_list((Pointer)this).position(position + i);
            }
        
                                                                public split_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operation builds NDArray from NDArrayList using indices
         * Expected arguments:
         * x: non-empty list
         * indices: vector with indices for gather operation
         */
//         #if NOT_EXCLUDED(OP_gather_list)
        @Namespace("sd::ops") public static class gather_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gather_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gather_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gather_list position(long position) {
                return (gather_list)super.position(position);
            }
            @Override public gather_list getPointer(long i) {
                return new gather_list((Pointer)this).position(position + i);
            }
        
                                                                public gather_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operation clones given NDArrayList
         */
//         #if NOT_EXCLUDED(OP_clone_list)
        @Namespace("sd::ops") public static class clone_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public clone_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public clone_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public clone_list position(long position) {
                return (clone_list)super.position(position);
            }
            @Override public clone_list getPointer(long i) {
                return new clone_list((Pointer)this).position(position + i);
            }
        
                                                                public clone_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif

        /**
         * This operation unstacks given NDArray into NDArrayList by the first dimension
         */
//         #if NOT_EXCLUDED(OP_unstack_list)
        @Namespace("sd::ops") public static class unstack_list extends DeclarableListOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unstack_list(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unstack_list(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unstack_list position(long position) {
                return (unstack_list)super.position(position);
            }
            @Override public unstack_list getPointer(long i) {
                return new unstack_list((Pointer)this).position(position + i);
            }
        
                                                                public unstack_list() { super((Pointer)null); allocate(); }
                                                                private native void allocate();
                                                            }
//         #endif
    


// #endif

// Parsed from ops/declarable/headers/recurrent.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_RECURRENT_H
// #define LIBND4J_HEADERS_RECURRENT_H

// #include 

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation for Simple Recurrent Unit: "Training RNNs as Fast as CNNs" Tao Lei, Yu Zhang, Yoav Artzi
       *
       * Input arrays:
       *    0: input 3d tensor with shape [bS x K x N], N - number of time steps, bS - batch size, K - number of features
       *    1: 2d tensor of weights [3K x K]
       *    2: row of biases with twice length [1 x 2K]
       *    3: 2d tensor of previous cell state [bS x K]
       *    4: optional, 2d tensor of dropout mask [bS x K]
       *
       * Output arrays:
       *    0: 3d tensor of cell output [bS x K x N]
       *    1: 3d tensor of cell state [bS x K x N]
       */
//         #if NOT_EXCLUDED(OP_sru)
        @Namespace("sd::ops") public static class sru extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sru(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sru(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sru position(long position) {
                return (sru)super.position(position);
            }
            @Override public sru getPointer(long i) {
                return new sru((Pointer)this).position(position + i);
            }
        
                                                                                    public sru() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation for Simple Recurrent Unit (bidirectional case): "Training RNNs as Fast as CNNs" Tao Lei, Yu Zhang, Yoav Artzi
       *
       * Input arrays:
       *    0: input 3d tensor with shape [N x bS x 2K], N - number of time steps, bS - batch size, K - number of features
       *    1: 2d tensor of weights [2K x 6K]
       *    2: row of biases with twice length [1 x 4K]
       *    3: 2d tensor of previous cell state [bS x 2K]
       *    4: optional, 2d tensor of dropout mask [bS x 2K]
       *
       * Output arrays:
       *    0: 3d tensor of cell output [N x bS x 2K]
       *    1: 3d tensor of cell state [N x bS x 2K]
       */
//         #if NOT_EXCLUDED(OP_sru_bi)
        @Namespace("sd::ops") public static class sru_bi extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sru_bi(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sru_bi(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sru_bi position(long position) {
                return (sru_bi)super.position(position);
            }
            @Override public sru_bi getPointer(long i) {
                return new sru_bi((Pointer)this).position(position + i);
            }
        
                                                                                    public sru_bi() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation for back propagation in Simple Recurrent Unit: "Training RNNs as Fast as CNNs" Tao Lei, Yu Zhang, Yoav Artzi
       *
       * Input arrays:
       *    0: input 3d tensor with shape [bS x K x N], N - number of time steps, bS - batch size, K - number of features
       *    1: 2d tensor of weights [3K x K]
       *    2: row of biases with twice length [1 x 2K]
       *    3: 2d tensor of previous cell state [bS x K]
       *    4: 3d tensor of cell state [bS x K x N]
       *    5: 2d tensor of cell state gradients [bS x K]
       *    6: 3d tensor of state output gradients [bS x K x N]
       *    7: optional, 2d tensor of dropout mask [bS x K]
       *
       * Output arrays:
       *    0: 3d tensor of input gradients [bS x K x N]
       *    1: 3d tensor of weights gradients [bS x 3K x K]
       *    2: 2d, row of biases gradients [1 x 2K]
       *    3: 2d, tensor of state gradients [bS x K]
       */
//         #if NOT_EXCLUDED(OP_sru)
        @Namespace("sd::ops") public static class sru_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sru_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sru_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sru_bp position(long position) {
                return (sru_bp)super.position(position);
            }
            @Override public sru_bp getPointer(long i) {
                return new sru_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public sru_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation for back propagation in Simple Recurrent Unit (bidirectional case): "Training RNNs as Fast as CNNs" Tao Lei, Yu Zhang, Yoav Artzi
       *
       * Input arrays:
       *    0: input 3d tensor with shape [N x bS x 2K], N - number of time steps, bS - batch size, K - number of features
       *    1: 2d tensor of weights [2K x 6K]
       *    2: row of biases with twice length [1 x 4K]
       *    3: 2d tensor of previous cell state [bS x 2K]
       *    4: 3d tensor of cell state [N x bS x 2K]
       *    5: 2d tensor of cell state gradients [bS x 2K]
       *    6: 3d tensor of state output gradients [N x bS x 2K]
       *    7: optional, 2d tensor of dropout mask [bS x 2K]
       *
       * Output arrays:
       *    0: 3d tensor of input gradients [N x bS x 2K]
       *    1: 3d tensor of weights gradients [N x 2K x 6K]
       *    2: 2d, row of biases gradients [1 x 4K]
       *    3: 2d, tensor of state gradients [bS x 2K]
       */
//         #if NOT_EXCLUDED(OP_sru_bi)
        @Namespace("sd::ops") public static class sru_bi_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sru_bi_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sru_bi_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sru_bi_bp position(long position) {
                return (sru_bi_bp)super.position(position);
            }
            @Override public sru_bi_bp getPointer(long i) {
                return new sru_bi_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public sru_bi_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation for LSTM cell with peep hole connections:
       *    S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural Computation
       *    and
       *    https://research.google.com/pubs/archive/43905.pdf
       *    Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014.
       *
       * Input arrays:
       *    0: input with shape [batchSize x inSize], batchSize - batch size, inSize - number of features
       *    1: previous cell output [batchSize x numProj],  that is at previous time step t-1, in case of projection=false -> numProj=numUnits!!!
       *    2: previous cell state  [batchSize x numUnits], that is at previous time step t-1
       *    3: input-to-hidden  weights, [inSize  x 4*numUnits]
       *    4: hidden-to-hidden weights, [numProj x 4*numUnits]
       *    5: diagonal weights for peephole connections [3*numUnits]
       *    6: projection weights [numUnits x numProj]
       *    7: biases, [4*numUnits]
       *
       *  Input integer arguments:
       *    0: if not zero, provide peephole connections
       *    1: if not zero, then projection is performed, if zero then numProj==numUnits is mandatory!
       *
       *  Input float arguments:
       *    0: clipping value for cell state, if it is not equal to zero, then cell state is clipped
       *    1: clipping value for projected cell output, if it is not equal to zero, then projected cell output is clipped
       *    2: the bias added to forget gates in order to reduce the scale of forgetting in the beginning of the training
       *
       * Output arrays:
       *    0: current cell output [batchSize x numProj], that is at current time step t
       *    1: current cell state  [batchSize x numUnits], that is at current time step t
       */
//         #if NOT_EXCLUDED(OP_lstmCell)
        @Namespace("sd::ops") public static class lstmCell extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstmCell(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstmCell(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstmCell position(long position) {
                return (lstmCell)super.position(position);
            }
            @Override public lstmCell getPointer(long i) {
                return new lstmCell((Pointer)this).position(position + i);
            }
        
                                                                                    public lstmCell() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_lstmLayerCell)
        @Namespace("sd::ops") public static class lstmLayerCell extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstmLayerCell(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstmLayerCell(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstmLayerCell position(long position) {
                return (lstmLayerCell)super.position(position);
            }
            @Override public lstmLayerCell getPointer(long i) {
                return new lstmLayerCell((Pointer)this).position(position + i);
            }
        
                                                                                    public lstmLayerCell() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_lstmLayerCell)
        @Namespace("sd::ops") public static class lstmLayerCellBp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstmLayerCellBp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstmLayerCellBp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstmLayerCellBp position(long position) {
                return (lstmLayerCellBp)super.position(position);
            }
            @Override public lstmLayerCellBp getPointer(long i) {
                return new lstmLayerCellBp((Pointer)this).position(position + i);
            }
        
                                                                                    public lstmLayerCellBp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation for LSTM cell with optional peep hole connections:
       *    S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural Computation
       *    and
       *    https://research.google.com/pubs/archive/43905.pdf
       *    Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014.
	   * See also: https://arxiv.org/pdf/1503.04069.pdf
       *
       * Input arrays:
       *    0: input [bS, inSize] at time t
       *    1: previous cell state  [bS, numUnits], time t-1
       *    2: previous output [bS, numUnits], time t-1
       *    3: Weights - concatenated (input-to-hidden, hidden-to-hidden weights)  weights, [(inSize+numUnits), 4*numUnits]
       *    4: weights - cell peephole (t-1) connections to input modulation gate, [numUnits]
       *    5: weights - cell peephole (t-1) connections to forget gate, [numUnits]
       *    6: weights - cell peephole (t) connections to output gate, [numUnits]
       *    7: biases, shape [4*numUnits]
       *
       *  Input integer arguments:
       *    0: if not zero, provide peephole connections
       *
       *  Input float arguments:
       *    0: the bias added to forget gates in order to reduce the scale of forgetting in the beginning of the training
	   *    1: clipping value for cell state, if it is not equal to zero, then cell state is clipped
       *
       * Output arrays:
       *    0: i      - Input modulation gate activations [bS, numUnits]
       *    1: c (cs) - Cell state (pre tanh) [bs, numUnits] (cs)
       *    2: f      - Output - forget gate activations [bs, numUnits]
       *    3: o      - Output - output gate activations [bs, numUnits]
       *    4: z (ci) - Output - block input [bs, numUnits]
       *    5: h (co) - Cell state, post tanh [bs, numUnits]
       *    6: y (h)  - Current cell output [bS, numUnits], time t
       */
//         #if NOT_EXCLUDED(OP_lstmBlockCell)
        @Namespace("sd::ops") public static class lstmBlockCell extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstmBlockCell(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstmBlockCell(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstmBlockCell position(long position) {
                return (lstmBlockCell)super.position(position);
            }
            @Override public lstmBlockCell getPointer(long i) {
                return new lstmBlockCell((Pointer)this).position(position + i);
            }
        
                                                                                    public lstmBlockCell() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation for LSTM layer with optional peep hole connections.
       * See lstmBlockCell for details. lstmBlockCell is used internally for computation.
       * This method expects as input (and returns as output) sequences in one of 3 formats, depending on the data format arg:
       * dataFormat = 0 -> TNS: shape [timeLength, numExamples, inOutSize] - sometimes referred to as "time major"
       * dataFormat = 1 -> NST: shape [numExamples, inOutSize, timeLength]
       * dataFormat = 2 -> NTS: shape [numExamples, timeLength, inOutSize] - TF "time_major=false" layout
       *
       *
       * Input arrays:
       *    0: max sequence length; long/int64 scalar
       *    1: input [seqLength, bS, inSize] at time t
       *    2: previous/initial cell state  [bS, numUnits]
       *    3: previous/initial output [bS, numUnits]
       *    4: Weights - concatenated (input-to-hidden, hidden-to-hidden weights)  weights, [(inSize+numUnits), 4*numUnits]
       *    5: weights - cell peephole (t-1) connections to input modulation gate, [numUnits]
       *    6: weights - cell peephole (t-1) connections to forget gate, [numUnits]
       *    7: weights - cell peephole (t) connections to output gate, [numUnits]
       *    8: biases, Shape [4*numUnits]
       *
       *  Input integer arguments:
       *    0: if not zero, provide peephole connections
       *    1: Data format - 0=TNS=[seqLen,mb,size]; 1=NST=[mb,size,seqLen]; 2=NTS=[mb,seqLen,size]
       *
       *  Input float arguments:
       *    0: the bias added to forget gates in order to reduce the scale of forgetting in the beginning of the training
       *    1: clipping value for cell state, if it is not equal to zero, then cell state is clipped
       *
       * Output arrays:
       *    0: i      - Input modulation gate activations, rank 3, shape as per dataFormat
       *    1: c (cs) - Cell state (pre tanh), rank 3, shape as per dataFormat
       *    2: f      - Output - forget gate activations, rank 3, shape as per dataFormat
       *    3: o      - Output - output gate activations, rank 3, shape as per dataFormat
       *    4: z (ci) - Output - block input, rank 3, shape as per dataFormat
       *    5: h (co) - Cell state, post tanh, rank 3, shape as per dataFormat
       *    6: y (h)  - Current cell output, rank 3, shape as per dataFormat
       */
//         #if NOT_EXCLUDED(OP_lstmBlock)
        @Namespace("sd::ops") public static class lstmBlock extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstmBlock(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstmBlock(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstmBlock position(long position) {
                return (lstmBlock)super.position(position);
            }
            @Override public lstmBlock getPointer(long i) {
                return new lstmBlock((Pointer)this).position(position + i);
            }
        
                                                                                    public lstmBlock() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        //////////////////////////////////////////////////////////////////////////
//         #if NOT_EXCLUDED(OP_lstmLayer)
        @Namespace("sd::ops") public static class lstmLayer extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstmLayer(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstmLayer(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstmLayer position(long position) {
                return (lstmLayer)super.position(position);
            }
            @Override public lstmLayer getPointer(long i) {
                return new lstmLayer((Pointer)this).position(position + i);
            }
        
                                                                                    public lstmLayer() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        //////////////////////////////////////////////////////////////////////////
//         #if NOT_EXCLUDED(OP_lstmLayer)
        @Namespace("sd::ops") public static class lstmLayer_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstmLayer_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstmLayer_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstmLayer_bp position(long position) {
                return (lstmLayer_bp)super.position(position);
            }
            @Override public lstmLayer_bp getPointer(long i) {
                return new lstmLayer_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public lstmLayer_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operations for Simple Recurrent Unit cell: "Training RNNs as Fast as CNNs" Tao Lei, Yu Zhang, Yoav Artzi
       *
       * Input arrays:
       *    0: input with shape [batchSize x inSize], batchSize - batch size, inSize - number of features
       *    1: previous cell state [batchSize x inSize], that is at previous time step t-1
       *    2: weights [inSize x 3*inSize]
       *    3: biases [1 x 2*inSize]
       *
       * Output arrays:
       *    0: current cell output [batchSize x inSize], that is at current time step t
       *    1: current cell state  [batchSize x inSize], that is at current time step t
       */
//         #if NOT_EXCLUDED(OP_sruCell)
        @Namespace("sd::ops") public static class sruCell extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sruCell(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sruCell(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sruCell position(long position) {
                return (sruCell)super.position(position);
            }
            @Override public sruCell getPointer(long i) {
                return new sruCell((Pointer)this).position(position + i);
            }
        
                                                                                    public sruCell() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of gated Recurrent Unit cell:
       *    Kyunghyun Cho, Bart van Merrienboer, Caglar Gulcehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, Yoshua Bengio
       *    "Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation"
       *
       * Input arrays:
       *    0: input with shape [batchSize x inSize], batchSize - batch size, inSize - number of features
       *    1: previous cell output [batchSize x numUnits],  that is at previous time step t-1
       *    2: RU weights - [(inSize+numUnits), 2*numUnits] - reset and update gates (input/recurrent weights)
       *    3: C weights - [(inSize+numUnits), numUnits] - cell gate (input/recurrent weights)
       *    4: reset and update biases, [2*numUnits] - reset and update gates
       *    5: cell biases, [numUnits]
       *
       * Output arrays:
       *    0: Reset gate output [bS, numUnits]
       *    1: Update gate output [bS, numUnits]
       *    2: Cell gate output [bS, numUnits]
       *    3: Current cell output [bS, numUnits]
       */
//         #if NOT_EXCLUDED(OP_gruCell)
        @Namespace("sd::ops") public static class gruCell extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gruCell(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gruCell(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gruCell position(long position) {
                return (gruCell)super.position(position);
            }
            @Override public gruCell getPointer(long i) {
                return new gruCell((Pointer)this).position(position + i);
            }
        
                                                                                    public gruCell() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_gruCell)
        @Namespace("sd::ops") public static class gruCell_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gruCell_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gruCell_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gruCell_bp position(long position) {
                return (gruCell_bp)super.position(position);
            }
            @Override public gruCell_bp getPointer(long i) {
                return new gruCell_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public gruCell_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation "LSTM time sequences" with peep hole connections:
       *
       * Input arrays:
       *    0: input with shape [time x batchSize x inSize], time - number of time steps, batchSize - batch size, inSize - number of features
       *    1: initial cell output [batchSize x numProj],  that is at time step = 0, in case of projection=false -> numProj=numUnits!!!
       *    2: initial cell state  [batchSize x numUnits], that is at time step = 0
       *    3: input-to-hidden  weights, [inSize  x 4*numUnits]
       *    4: hidden-to-hidden weights, [numProj x 4*numUnits]
       *    5: diagonal weights for peephole connections [3*numUnits]
       *    6: projection weights [numUnits x numProj]
       *    7: biases, [4*numUnits]
       *
       *  Input integer arguments:
       *    0: if not zero, provide peephole connections
       *    1: if not zero, then projection is performed, if zero then numProj==numUnits is mandatory!
       *
       *  Input float arguments:
       *    0: clipping value for cell state, if it is not equal to zero, then cell state is clipped
       *    1: clipping value for projected cell output, if it is not equal to zero, then projected cell output is clipped
       *    2: the bias added to forget gates in order to reduce the scale of forgetting in the beginning of the training
       *
       * Output arrays:
       *    0: cell outputs [time x batchSize x numProj], that is per each time step
       *    1: cell states  [time x batchSize x numUnits], that is per each time step
       */
//         #if NOT_EXCLUDED(OP_lstm)
        @Namespace("sd::ops") public static class lstm extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstm position(long position) {
                return (lstm)super.position(position);
            }
            @Override public lstm getPointer(long i) {
                return new lstm((Pointer)this).position(position + i);
            }
        
                                                                                    public lstm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of gated Recurrent Unit:
       *
       * Input arrays:
       *    0: input with shape [time x batchSize x inSize], time - number of time steps, batchSize - batch size, inSize - number of features
       *    1: initial cell output [batchSize x numUnits],  that is at time step = 0
       *    2: input-to-hidden  weights, [inSize   x 3*numUnits]
       *    3: hidden-to-hidden weights, [numUnits x 3*numUnits]
       *    4: biases, [3*numUnits]
       *
       * Output arrays:
       *    0: cell outputs [time x batchSize x numUnits], that is per each time step
       */
//         #if NOT_EXCLUDED(OP_gru)
        @Namespace("sd::ops") public static class gru extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gru(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gru(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gru position(long position) {
                return (gru)super.position(position);
            }
            @Override public gru getPointer(long i) {
                return new gru((Pointer)this).position(position + i);
            }
        
                                                                                    public gru() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_gru)
        @Namespace("sd::ops") public static class gru_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gru_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gru_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gru_bp position(long position) {
                return (gru_bp)super.position(position);
            }
            @Override public gru_bp getPointer(long i) {
                return new gru_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public gru_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation "static RNN time sequences" with peep hole connections:
       *
       * Input arrays:
       *    0: input with shape [time x batchSize x inSize], time - number of time steps, batchSize - batch size, inSize - number of features
       *    1: input-to-hidden  weights, [inSize   x numUnits]
       *    2: hidden-to-hidden weights, [numUnits x numUnits]
       *    3: biases, [2*numUnits]
       *    4: (optional) initial cell output [batchSize x numUnits], that is at time step = 0
       *    5: (optional) vector with shape [batchSize] containing integer values within [0,time), each element of this vector set max time step per each input in batch, this provides no calculations for time >= maxTimeStep
       *
       * Output arrays:
       *    0: cell outputs [time x batchSize x numUnits]
       *    1: cell final non-zero output [batchSize x numUnits]
       */
        @Namespace("sd::ops") public static class static_rnn extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public static_rnn(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public static_rnn(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public static_rnn position(long position) {
                return (static_rnn)super.position(position);
            }
            @Override public static_rnn getPointer(long i) {
                return new static_rnn((Pointer)this).position(position + i);
            }
        
                                                                                    public static_rnn() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation "static RNN time sequences" with peep hole connections:
       *
       * Input arrays:
       *    0: input with shape [time x batchSize x inSize] or [batchSize x time x numUnits], time - number of time steps, batchSize - batch size, inSize - number of features
       *    1: input-to-hidden  weights, [inSize   x numUnits]
       *    2: hidden-to-hidden weights, [numUnits x numUnits]
       *    3: biases, [2*numUnits]
       *    4: (optional) initial cell output [batchSize x numUnits], that is at time step = 0
       *    5: (optional) vector with shape [batchSize] containing integer values within [0,time), each element of this vector set max time step per each input in batch, this provides no calculations for time >= maxTimeStep
       *
       *  Input integer arguments:
       *    0: (optional) timeMajor - if non zero then input shape is [time, batchSize, ...], else [batchSize, time, ...]
       *
       * Output arrays:
       *    0: cell outputs [time x batchSize x numUnits] or [batchSize x time x numUnits]
       *    1: cell final non-zero output [batchSize x numUnits]
       */
        @Namespace("sd::ops") public static class dynamic_rnn extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public dynamic_rnn(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public dynamic_rnn(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public dynamic_rnn position(long position) {
                return (dynamic_rnn)super.position(position);
            }
            @Override public dynamic_rnn getPointer(long i) {
                return new dynamic_rnn((Pointer)this).position(position + i);
            }
        
                                                                                    public dynamic_rnn() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation "static RNN time sequences" with peep hole connections:
       *
       * Input arrays:
       *    0: input with shape [time x batchSize x inSize], time - number of time steps, batchSize - batch size, inSize - number of features
       *    1: input-to-hidden  weights for forward RNN, [inSize   x numUnitsFW]
       *    2: hidden-to-hidden weights for forward RNN, [numUnitsFW x numUnitsFW]
       *    3: biases for forward RNN, [2*numUnitsFW]
       *    4: input-to-hidden  weights for backward RNN, [inSize   x numUnitsBW]
       *    5: hidden-to-hidden weights for backward RNN, [numUnitsBW x numUnitsBW]
       *    6: biases for backward RNN, [2*numUnitsBW]
       *    7: (optional) initial cell output for forward RNN [batchSize x numUnitsFW], that is at time step = 0
       *    8: (optional) initial cell output for backward RNN [batchSize x numUnitsBW], that is at time step = 0
       *    9: (optional) vector with shape [batchSize] containing integer values within [0,time), each element of this vector set max time step per each input in batch, this provides no calculations for time >= maxTimeStep
       *
       * Output arrays:
       *    0: cell outputs [time x batchSize x (numUnitsFW + numUnitsBW)]
       *    1: cell final non-zero output for forward RNN  [batchSize x numUnitsFW]
       *    2: cell final non-zero output for backward RNN [batchSize x numUnitsBW]
       */
        @Namespace("sd::ops") public static class static_bidirectional_rnn extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public static_bidirectional_rnn(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public static_bidirectional_rnn(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public static_bidirectional_rnn position(long position) {
                return (static_bidirectional_rnn)super.position(position);
            }
            @Override public static_bidirectional_rnn getPointer(long i) {
                return new static_bidirectional_rnn((Pointer)this).position(position + i);
            }
        
                                                                                    public static_bidirectional_rnn() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of operation "static RNN time sequences" with peep hole connections:
       *
       * Input arrays:
       *    0: input with shape [time x batchSize x inSize] or [batchSize x time x inSize], time - number of time steps, batchSize - batch size, inSize - number of features
       *    1: input-to-hidden  weights for forward RNN, [inSize   x numUnitsFW]
       *    2: hidden-to-hidden weights for forward RNN, [numUnitsFW x numUnitsFW]
       *    3: biases for forward RNN, [2*numUnitsFW]
       *    4: input-to-hidden  weights for backward RNN, [inSize   x numUnitsBW]
       *    5: hidden-to-hidden weights for backward RNN, [numUnitsBW x numUnitsBW]
       *    6: biases for backward RNN, [2*numUnitsBW]
       *    7: (optional) initial cell output for forward RNN [batchSize x numUnitsFW], that is at time step = 0
       *    8: (optional) initial cell output for backward RNN [batchSize x numUnitsBW], that is at time step = 0
       *    9: (optional) vector with shape [batchSize] containing integer values within [0,time), each element of this vector set max time step per each input in batch, this provides no calculations for time >= maxTimeStep
       *
       *  Input integer arguments:
       *    0: (optional) timeMajor - if non zero then input shape is [time, batchSize, ...], else [batchSize, time, ...]
       *
       * Output arrays:
       *    0: cell outputs for forward  RNN [time x batchSize x numUnitsFW] or [batchSize x time x  numUnitsFW]
       *    1: cell outputs for backward RNN [time x batchSize x numUnitsBW] or [batchSize x time x  numUnitsBW]
       *    2: cell final non-zero output for forward  RNN [batchSize x numUnitsFW]
       *    3: cell final non-zero output for backward RNN [batchSize x numUnitsBW]
       */
        @Namespace("sd::ops") public static class dynamic_bidirectional_rnn extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public dynamic_bidirectional_rnn(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public dynamic_bidirectional_rnn(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public dynamic_bidirectional_rnn position(long position) {
                return (dynamic_bidirectional_rnn)super.position(position);
            }
            @Override public dynamic_bidirectional_rnn getPointer(long i) {
                return new dynamic_bidirectional_rnn((Pointer)this).position(position + i);
            }
        
                                                                                    public dynamic_bidirectional_rnn() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }



// #endif

// Parsed from ops/declarable/headers/transforms.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_TRANSFORMS_H
// #define LIBND4J_HEADERS_TRANSFORMS_H

// #include 
//         #if NOT_EXCLUDED(OP_clipbyvalue)
        @Namespace("sd::ops") public static class clipbyvalue extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public clipbyvalue(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public clipbyvalue(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public clipbyvalue position(long position) {
                return (clipbyvalue)super.position(position);
            }
            @Override public clipbyvalue getPointer(long i) {
                return new clipbyvalue((Pointer)this).position(position + i);
            }
        
                                                                                    public clipbyvalue() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_clipbynorm)
        @Namespace("sd::ops") public static class clipbynorm extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public clipbynorm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public clipbynorm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public clipbynorm position(long position) {
                return (clipbynorm)super.position(position);
            }
            @Override public clipbynorm getPointer(long i) {
                return new clipbynorm((Pointer)this).position(position + i);
            }
        
                                                                                    public clipbynorm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class clipbynorm_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public clipbynorm_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public clipbynorm_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public clipbynorm_bp position(long position) {
                return (clipbynorm_bp)super.position(position);
            }
            @Override public clipbynorm_bp getPointer(long i) {
                return new clipbynorm_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public clipbynorm_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_clipbyavgnorm)
        @Namespace("sd::ops") public static class clipbyavgnorm extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public clipbyavgnorm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public clipbyavgnorm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public clipbyavgnorm position(long position) {
                return (clipbyavgnorm)super.position(position);
            }
            @Override public clipbyavgnorm getPointer(long i) {
                return new clipbyavgnorm((Pointer)this).position(position + i);
            }
        
                                                                                    public clipbyavgnorm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class clipbyavgnorm_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public clipbyavgnorm_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public clipbyavgnorm_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public clipbyavgnorm_bp position(long position) {
                return (clipbyavgnorm_bp)super.position(position);
            }
            @Override public clipbyavgnorm_bp getPointer(long i) {
                return new clipbyavgnorm_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public clipbyavgnorm_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_cumsum)
        @Namespace("sd::ops") public static class cumsum extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cumsum(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cumsum(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cumsum position(long position) {
                return (cumsum)super.position(position);
            }
            @Override public cumsum getPointer(long i) {
                return new cumsum((Pointer)this).position(position + i);
            }
        
                                                                                    public cumsum() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_cumprod)
        @Namespace("sd::ops") public static class cumprod extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cumprod(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cumprod(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cumprod position(long position) {
                return (cumprod)super.position(position);
            }
            @Override public cumprod getPointer(long i) {
                return new cumprod((Pointer)this).position(position + i);
            }
        
                                                                                    public cumprod() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_tile)
        @Namespace("sd::ops") public static class tile extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tile(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tile(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tile position(long position) {
                return (tile)super.position(position);
            }
            @Override public tile getPointer(long i) {
                return new tile((Pointer)this).position(position + i);
            }
        
                                                                                    public tile() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class tile_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tile_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tile_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tile_bp position(long position) {
                return (tile_bp)super.position(position);
            }
            @Override public tile_bp getPointer(long i) {
                return new tile_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public tile_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_repeat)
        @Namespace("sd::ops") public static class repeat extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public repeat(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public repeat(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public repeat position(long position) {
                return (repeat)super.position(position);
            }
            @Override public repeat getPointer(long i) {
                return new repeat((Pointer)this).position(position + i);
            }
        
                                                                                    public repeat() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_invert_permutation)
        @Namespace("sd::ops") public static class invert_permutation extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public invert_permutation(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public invert_permutation(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public invert_permutation position(long position) {
                return (invert_permutation)super.position(position);
            }
            @Override public invert_permutation getPointer(long i) {
                return new invert_permutation((Pointer)this).position(position + i);
            }
        
                                                                                    public invert_permutation() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        @Namespace("sd::ops") public static class concat extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public concat(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public concat(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public concat position(long position) {
                return (concat)super.position(position);
            }
            @Override public concat getPointer(long i) {
                return new concat((Pointer)this).position(position + i);
            }
        
                                                                                    public concat() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class concat_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public concat_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public concat_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public concat_bp position(long position) {
                return (concat_bp)super.position(position);
            }
            @Override public concat_bp getPointer(long i) {
                return new concat_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public concat_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

//         #if NOT_EXCLUDED(OP_mergemax)
        @Namespace("sd::ops") public static class mergemax extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mergemax(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mergemax(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mergemax position(long position) {
                return (mergemax)super.position(position);
            }
            @Override public mergemax getPointer(long i) {
                return new mergemax((Pointer)this).position(position + i);
            }
        
                                                    public mergemax() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
        @Namespace("sd::ops") public static class mergemax_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mergemax_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mergemax_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mergemax_bp position(long position) {
                return (mergemax_bp)super.position(position);
            }
            @Override public mergemax_bp getPointer(long i) {
                return new mergemax_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public mergemax_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
        /*
         * Complete tensor with max indices merged from all input tensors list
         *
         * INPUT: tensors with the same shape
         * OUTPUT: integer tensor with the same shape
         * INT_ARG: result type (one of int), INT32 by default
         */
//         #if NOT_EXCLUDED(OP_mergemaxindex)
        @Namespace("sd::ops") public static class mergemaxindex extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mergemaxindex(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mergemaxindex(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mergemaxindex position(long position) {
                return (mergemaxindex)super.position(position);
            }
            @Override public mergemaxindex getPointer(long i) {
                return new mergemaxindex((Pointer)this).position(position + i);
            }
        
                                                                                    public mergemaxindex() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_mergeadd)
        @Namespace("sd::ops") public static class mergeadd extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mergeadd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mergeadd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mergeadd position(long position) {
                return (mergeadd)super.position(position);
            }
            @Override public mergeadd getPointer(long i) {
                return new mergeadd((Pointer)this).position(position + i);
            }
        
                                                    public mergeadd() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
        @Namespace("sd::ops") public static class mergeadd_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mergeadd_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mergeadd_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mergeadd_bp position(long position) {
                return (mergeadd_bp)super.position(position);
            }
            @Override public mergeadd_bp getPointer(long i) {
                return new mergeadd_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public mergeadd_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_mergeavg)
        @Namespace("sd::ops") public static class mergeavg extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mergeavg(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mergeavg(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mergeavg position(long position) {
                return (mergeavg)super.position(position);
            }
            @Override public mergeavg getPointer(long i) {
                return new mergeavg((Pointer)this).position(position + i);
            }
        
                                                    public mergeavg() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
        @Namespace("sd::ops") public static class mergeavg_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mergeavg_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mergeavg_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mergeavg_bp position(long position) {
                return (mergeavg_bp)super.position(position);
            }
            @Override public mergeavg_bp getPointer(long i) {
                return new mergeavg_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public mergeavg_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_scatter_update)
        @Namespace("sd::ops") public static class scatter_update extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_update(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_update(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_update position(long position) {
                return (scatter_update)super.position(position);
            }
            @Override public scatter_update getPointer(long i) {
                return new scatter_update((Pointer)this).position(position + i);
            }
        
                                                                                    public scatter_update() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_Floor)
        @Namespace("sd::ops") public static class Floor extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Floor(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Floor(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Floor position(long position) {
                return (Floor)super.position(position);
            }
            @Override public Floor getPointer(long i) {
                return new Floor((Pointer)this).position(position + i);
            }
        
                                                    public Floor() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_Log1p)
        @Namespace("sd::ops") public static class Log1p extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Log1p(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Log1p(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Log1p position(long position) {
                return (Log1p)super.position(position);
            }
            @Override public Log1p getPointer(long i) {
                return new Log1p((Pointer)this).position(position + i);
            }
        
                                                    public Log1p() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_reverse)
        @Namespace("sd::ops") public static class reverse extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reverse(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reverse(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reverse position(long position) {
                return (reverse)super.position(position);
            }
            @Override public reverse getPointer(long i) {
                return new reverse((Pointer)this).position(position + i);
            }
        
                                                                                    public reverse() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class reverse_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reverse_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reverse_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reverse_bp position(long position) {
                return (reverse_bp)super.position(position);
            }
            @Override public reverse_bp getPointer(long i) {
                return new reverse_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reverse_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_gather)
        @Namespace("sd::ops") public static class gather extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gather(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gather(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gather position(long position) {
                return (gather)super.position(position);
            }
            @Override public gather getPointer(long i) {
                return new gather((Pointer)this).position(position + i);
            }
        
                                                                                    public gather() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_pad)
        @Namespace("sd::ops") public static class pad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public pad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public pad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public pad position(long position) {
                return (pad)super.position(position);
            }
            @Override public pad getPointer(long i) {
                return new pad((Pointer)this).position(position + i);
            }
        
                                                                                    public pad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * creates identity 2D matrix or batch of identical 2D identity matrices
         *
         * Input array:
         * provide some array - in any case operation simply neglects it
         *
         * Input float argument (if passed):
         * TArgs[0] - type of elements of output array, default value is 5 (float)
         *
         * Input integer arguments:
         * IArgs[0]       - order of output identity matrix, 99 -> 'c'-order, 102 -> 'f'-order
         * IArgs[1]       - the number of rows in output inner-most 2D identity matrix
         * IArgs[2]       - optional, the number of columns in output inner-most 2D identity matrix, if this argument is not provided then it is taken to be equal to number of rows
         * IArgs[3,4,...] - optional, shape of batch, output matrix will have leading batch dimensions of this shape
         */
//         #if NOT_EXCLUDED(OP_eye)
        @Namespace("sd::ops") public static class eye extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public eye(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public eye(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public eye position(long position) {
                return (eye)super.position(position);
            }
            @Override public eye getPointer(long i) {
                return new eye((Pointer)this).position(position + i);
            }
        
                                                                                    public eye() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_gather_nd)
        @Namespace("sd::ops") public static class gather_nd extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public gather_nd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public gather_nd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public gather_nd position(long position) {
                return (gather_nd)super.position(position);
            }
            @Override public gather_nd getPointer(long i) {
                return new gather_nd((Pointer)this).position(position + i);
            }
        
                                                                                    public gather_nd() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_reverse_sequence)
        @Namespace("sd::ops") public static class reverse_sequence extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reverse_sequence(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reverse_sequence(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reverse_sequence position(long position) {
                return (reverse_sequence)super.position(position);
            }
            @Override public reverse_sequence getPointer(long i) {
                return new reverse_sequence((Pointer)this).position(position + i);
            }
        
                                                                                    public reverse_sequence() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_trace)
        @Namespace("sd::ops") public static class trace extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public trace(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public trace(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public trace position(long position) {
                return (trace)super.position(position);
            }
            @Override public trace getPointer(long i) {
                return new trace((Pointer)this).position(position + i);
            }
        
                                                                                    public trace() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_random_shuffle)
        @Namespace("sd::ops") public static class random_shuffle extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public random_shuffle(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public random_shuffle(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public random_shuffle position(long position) {
                return (random_shuffle)super.position(position);
            }
            @Override public random_shuffle getPointer(long i) {
                return new random_shuffle((Pointer)this).position(position + i);
            }
        
                                                    public random_shuffle() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * clip a list of given tensors with given average norm when needed
         *
         * Input:
         *    a list of tensors (at least one)
         *
         * Input floating point argument:
         *    clip_norm - a value that used as threshold value and norm to be used
         *
         * return a list of clipped tensors
         *  and global_norm as scalar tensor at the end
         */
//         #if NOT_EXCLUDED(OP_clip_by_global_norm)
        @Namespace("sd::ops") public static class clip_by_global_norm extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public clip_by_global_norm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public clip_by_global_norm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public clip_by_global_norm position(long position) {
                return (clip_by_global_norm)super.position(position);
            }
            @Override public clip_by_global_norm getPointer(long i) {
                return new clip_by_global_norm((Pointer)this).position(position + i);
            }
        
                                                                                    public clip_by_global_norm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        @Namespace("sd::ops") public static class tri extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tri(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tri(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tri position(long position) {
                return (tri)super.position(position);
            }
            @Override public tri getPointer(long i) {
                return new tri((Pointer)this).position(position + i);
            }
        
                                                                                    public tri() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

        @Namespace("sd::ops") public static class triu extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public triu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public triu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public triu position(long position) {
                return (triu)super.position(position);
            }
            @Override public triu getPointer(long i) {
                return new triu((Pointer)this).position(position + i);
            }
        
                                                                                    public triu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

        @Namespace("sd::ops") public static class triu_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public triu_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public triu_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public triu_bp position(long position) {
                return (triu_bp)super.position(position);
            }
            @Override public triu_bp getPointer(long i) {
                return new triu_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public triu_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

//         #if NOT_EXCLUDED(OP_mirror_pad)
        @Namespace("sd::ops") public static class mirror_pad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mirror_pad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mirror_pad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mirror_pad position(long position) {
                return (mirror_pad)super.position(position);
            }
            @Override public mirror_pad getPointer(long i) {
                return new mirror_pad((Pointer)this).position(position + i);
            }
        
                                                                                    public mirror_pad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_cumsum)
        @Namespace("sd::ops") public static class cumsum_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cumsum_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cumsum_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cumsum_bp position(long position) {
                return (cumsum_bp)super.position(position);
            }
            @Override public cumsum_bp getPointer(long i) {
                return new cumsum_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public cumsum_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_cumprod)
        @Namespace("sd::ops") public static class cumprod_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cumprod_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cumprod_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cumprod_bp position(long position) {
                return (cumprod_bp)super.position(position);
            }
            @Override public cumprod_bp getPointer(long i) {
                return new cumprod_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public cumprod_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


//         #if NOT_EXCLUDED(OP_flatten)
        @Namespace("sd::ops") public static class flatten extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public flatten(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public flatten(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public flatten position(long position) {
                return (flatten)super.position(position);
            }
            @Override public flatten getPointer(long i) {
                return new flatten((Pointer)this).position(position + i);
            }
        
                                                                                    public flatten() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * returns histogram (as 1D array) with fixed bins width
         *
         * Input arrays:
         * - input array with elements to be binned into output histogram
         * - range array with first element being bottom limit and second element being top limit of histogram,
             please note that input_value <= range[0] will be mapped to histogram[0], input_value >= range[1] will be mapped to histogram[-1]
         *
         * Input integer arguments:
         *    nbins (optional) - number of histogram bins, default value is 100
         */
//         #if NOT_EXCLUDED(OP_histogram_fixed_width)
        @Namespace("sd::ops") public static class histogram_fixed_width extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public histogram_fixed_width(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public histogram_fixed_width(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public histogram_fixed_width position(long position) {
                return (histogram_fixed_width)super.position(position);
            }
            @Override public histogram_fixed_width getPointer(long i) {
                return new histogram_fixed_width((Pointer)this).position(position + i);
            }
        
                                                                                    public histogram_fixed_width() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * standardizes input array to be zero mean unit variance along the given axis
         *
         *
         */
//         #if NOT_EXCLUDED(OP_standardize)
                @Namespace("sd::ops") public static class standardize extends DeclarableOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public standardize(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public standardize(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public standardize position(long position) {
                        return (standardize)super.position(position);
                    }
                    @Override public standardize getPointer(long i) {
                        return new standardize((Pointer)this).position(position + i);
                    }
                
                                                                                    public standardize() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                @Namespace("sd::ops") public static class standardize_bp extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public standardize_bp(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public standardize_bp(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public standardize_bp position(long position) {
                        return (standardize_bp)super.position(position);
                    }
                    @Override public standardize_bp getPointer(long i) {
                        return new standardize_bp((Pointer)this).position(position + i);
                    }
                
                                                                                    public standardize_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation calculates hash code, optionally along dimension
         */
//         #if NOT_EXCLUDED(OP_hashcode)
            @Namespace("sd::ops") public static class hashcode extends DeclarableCustomOp {
                static { Loader.load(); }
                /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                public hashcode(Pointer p) { super(p); }
                /** Native array allocator. Access with {@link Pointer#position(long)}. */
                public hashcode(long size) { super((Pointer)null); allocateArray(size); }
                private native void allocateArray(long size);
                @Override public hashcode position(long position) {
                    return (hashcode)super.position(position);
                }
                @Override public hashcode getPointer(long i) {
                    return new hashcode((Pointer)this).position(position + i);
                }
            
                                                                                    public hashcode() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation calculates number of entries per bin
         */
//         #if NOT_EXCLUDED(OP_histogram)
        @Namespace("sd::ops") public static class histogram extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public histogram(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public histogram(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public histogram position(long position) {
                return (histogram)super.position(position);
            }
            @Override public histogram getPointer(long i) {
                return new histogram((Pointer)this).position(position + i);
            }
        
                                                                                    public histogram() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// #endif

// Parsed from ops/declarable/headers/parity_ops.h

/*
 *  ******************************************************************************
 *  *
 *  *
 *  * This program and the accompanying materials are made available under the
 *  * terms of the Apache License, Version 2.0 which is available at
 *  * https://www.apache.org/licenses/LICENSE-2.0.
 *  *
 *  * See the NOTICE file distributed with this work for additional
 *  * information regarding copyright ownership.
 *  * Unless required by applicable law or agreed to in writing, software
 *  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 *  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 *  * License for the specific language governing permissions and limitations
 *  * under the License.
 *  *
 *  * SPDX-License-Identifier: Apache-2.0
 *  *****************************************************************************
 */

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_PARITY_H
// #define LIBND4J_HEADERS_PARITY_H

// #include 
        /**
         * This operation returns index of max element in a given NDArray (optionally: along given dimension(s))
         * Expected input:
         * 0: N-dimensional array
         * 1: optional axis vector
         *
         * Int args:
         * 0: optional axis
         */
//         #if NOT_EXCLUDED(OP_argmax)
        @Namespace("sd::ops") public static class argmax extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public argmax(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public argmax(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public argmax position(long position) {
                return (argmax)super.position(position);
            }
            @Override public argmax getPointer(long i) {
                return new argmax((Pointer)this).position(position + i);
            }
        
                                                                                    public argmax() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation returns index of min element in a given NDArray (optionally: along given dimension(s))
         * Expected input:
         * 0: N-dimensional array
         * 1: optional axis vector
         *
         * Int args:
         * 0: optional axis
         */
//         #if NOT_EXCLUDED(OP_argmin)
        @Namespace("sd::ops") public static class argmin extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public argmin(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public argmin(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public argmin position(long position) {
                return (argmin)super.position(position);
            }
            @Override public argmin getPointer(long i) {
                return new argmin((Pointer)this).position(position + i);
            }
        
                                                                                    public argmin() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation returns index of absolute max element in a given NDArray (optionally: along given dimension(s))
         * Expected input:
         * 0: N-dimensional array
         * 1: optional axis vector
         *
         * Int args:
         * 0: optional axis
         */
//         #if NOT_EXCLUDED(OP_argamax)
        @Namespace("sd::ops") public static class argamax extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public argamax(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public argamax(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public argamax position(long position) {
                return (argamax)super.position(position);
            }
            @Override public argamax getPointer(long i) {
                return new argamax((Pointer)this).position(position + i);
            }
        
                                                                                    public argamax() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation returns index of absolute min element in a given NDArray (optionally: along given dimension(s))
         * Expected input:
         * 0: N-dimensional array
         * 1: optional axis vector
         *
         * Int args:
         * 0: optional axis
         */
//         #if NOT_EXCLUDED(OP_argamin)
        @Namespace("sd::ops") public static class argamin extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public argamin(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public argamin(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public argamin position(long position) {
                return (argamin)super.position(position);
            }
            @Override public argamin getPointer(long i) {
                return new argamin((Pointer)this).position(position + i);
            }
        
                                                                                    public argamin() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation provides various normalization modes:
         * 0: frobenius
         * 1: euclidean (norm2)
         * 2: norm1
         * 3: norm2
         * 4: inf-norm
         * 5: p-norm
         *
         * Expected arguments:
         * input: N-dimensional array
         *
         *
         * Int args:
         * 0...: axis
         *
         * T args:
         * 0: norm mode
         * 1: p for p-norm
         */
//         #if NOT_EXCLUDED(OP_norm)
        @Namespace("sd::ops") public static class norm extends DeclarableReductionOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public norm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public norm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public norm position(long position) {
                return (norm)super.position(position);
            }
            @Override public norm getPointer(long i) {
                return new norm((Pointer)this).position(position + i);
            }
        
                                                                                    public norm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
        * Inserts elements provided by diagonal array into the main diagonal of innermost matrices of input array
        *
        * Input arrays:
        *  0: input array, considered as batch of matrices
        *  1: diagonal array containing elements to be inserted into input array,
        *     following rank condition should be satisfied: diagonal_rank = input_rank - 1,
        *     the shapes of diagonal and input arrays must be equal except last dimension of input array,
        *     for example if input_shape = [A,B,C,D] then diagonal_shape = [A,B,C],
        *     also last dimension of diagonal array should be equal to smaller of last and last but one input dimensions
        *     that is: diagonal_shape[-1] = min(input_shape[-1], input_shape[-2])
        *
        * Output array:
        *  0: has the same shape as input, corresponding diagonal elements are substituted
        */
//         #if NOT_EXCLUDED(OP_matrix_set_diag)
        @Namespace("sd::ops") public static class matrix_set_diag extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public matrix_set_diag(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public matrix_set_diag(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public matrix_set_diag position(long position) {
                return (matrix_set_diag)super.position(position);
            }
            @Override public matrix_set_diag getPointer(long i) {
                return new matrix_set_diag((Pointer)this).position(position + i);
            }
        
                                                                                    public matrix_set_diag() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
        * Inserts elements provided by diagonal array into the main diagonal of innermost matrices of output array,
        * rest output elements are set to zeros
        *
        * Input array:
        *    diagonal: array containing elements to be inserted into output array,
        *              following rank condition is present: diagonal_rank = ouput_rank - 1
        *
        * Output array:
        *   0: is considered as batch of matrices, if for example diagonal array has shape [A,B,C] then output array has shape [A,B,C,C]
        */
        @Namespace("sd::ops") public static class matrix_diag extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public matrix_diag(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public matrix_diag(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public matrix_diag position(long position) {
                return (matrix_diag)super.position(position);
            }
            @Override public matrix_diag getPointer(long i) {
                return new matrix_diag((Pointer)this).position(position + i);
            }
        
                                                                                    public matrix_diag() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

        /**
        * This op calculates regularized incomplete beta integral Ix(a, b).
        * Implementation is based on two algorithms depending on input values of a and b:
        * - when a and b are both >  maxValue (3000.), then Gauss-Legendre quadrature method is applied
        * - when a and b are both <= maxValue (3000.), then modified Lentz’s algorithm for continued fractions is applied
        *
        * Input arrays:
        *    a: defines power t^{a-1}, must be > 0, type float.
        *    b: defines power (1-t)^{b-1}, must be > 0, type float.
        *    x: defines upper limit of integration, must be within (0 <= x <= 1) range, type float.
        *
        * Output array:
        *    0: values of  regularized incomplete beta integral that corresponds to variable upper limit x, type float
        *
        * Three input and one output arrays must have the same shape
        */
//         #if NOT_EXCLUDED(OP_betainc)
        @Namespace("sd::ops") public static class betainc extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public betainc(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public betainc(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public betainc position(long position) {
                return (betainc)super.position(position);
            }
            @Override public betainc getPointer(long i) {
                return new betainc((Pointer)this).position(position + i);
            }
        
                                                                                    public betainc() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation is added for compatibility purposes mostly.
         * PLEASE NOTE: Please consider using Add instead
         * Expected arguments:
         * 0: N-dimensional input
         * 1: bias vector
         */
//         #if NOT_EXCLUDED(OP_biasadd)
        @Namespace("sd::ops") public static class biasadd extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public biasadd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public biasadd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public biasadd position(long position) {
                return (biasadd)super.position(position);
            }
            @Override public biasadd getPointer(long i) {
                return new biasadd((Pointer)this).position(position + i);
            }
        
                                                                                    public biasadd() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class biasadd_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public biasadd_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public biasadd_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public biasadd_bp position(long position) {
                return (biasadd_bp)super.position(position);
            }
            @Override public biasadd_bp getPointer(long i) {
                return new biasadd_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public biasadd_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Returns a diagonal tensor with a given diagonal values. Given a diagonal, this operation returns a tensor with the diagonal and everything else padded with zeros.
         */
//         #if NOT_EXCLUDED(OP_diag)
        @Namespace("sd::ops") public static class diag extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public diag(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public diag(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public diag position(long position) {
                return (diag)super.position(position);
            }
            @Override public diag getPointer(long i) {
                return new diag((Pointer)this).position(position + i);
            }
        
                                                                                    public diag() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Returns a diagonal tensor with a given diagonal values. Given a diagonal, this operation returns a tensor with the diagonal and everything else padded with zeros.
         */
//         #if NOT_EXCLUDED(OP_diag_part)
        @Namespace("sd::ops") public static class diag_part extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public diag_part(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public diag_part(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public diag_part position(long position) {
                return (diag_part)super.position(position);
            }
            @Override public diag_part getPointer(long i) {
                return new diag_part((Pointer)this).position(position + i);
            }
        
                                                                                    public diag_part() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Returns a diagonal vector for any submatricies with in a given tensor.
         * It is an op inverse to matrix_set_giag.
         * Using input tensor as batched 2D diagonals flat them to vector (1D) with diagonal values.
         *
         * Input : batched tensor with rank >=2
         * Output: tensor with rank lesser by 1 from input
         */
//         #if NOT_EXCLUDED(OP_matrix_diag_part)
        @Namespace("sd::ops") public static class matrix_diag_part extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public matrix_diag_part(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public matrix_diag_part(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public matrix_diag_part position(long position) {
                return (matrix_diag_part)super.position(position);
            }
            @Override public matrix_diag_part getPointer(long i) {
                return new matrix_diag_part((Pointer)this).position(position + i);
            }
        
                                                                                    public matrix_diag_part() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * QR decomposition: A = QR, where Q is ortogonal (Q * QT = I) and R is upper triangular.
         * For A (MxN) Q is M x M and R is (NxN). 
         *
         * Input : 
         *    0 - float (or complex float) tensor with shape {.,..,...,M,N} - batch of float matricies
         *
         * Output: 
         *    0 - float tensor with shape {.,..,...,MxN} - batch of ortogonal matricies {Qs}
         *    1 - float tensor with shape {.,..,...,NxN} - batch of upper triangular matricies {Rs}
         */
//         #if NOT_EXCLUDED(OP_qr)
        @Namespace("sd::ops") public static class qr extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public qr(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public qr(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public qr position(long position) {
                return (qr)super.position(position);
            }
            @Override public qr getPointer(long i) {
                return new qr((Pointer)this).position(position + i);
            }
        
                                                                                    public qr() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation takes 2 arrays: original values, and values to be excluded. And returns 2 arrays: values left after exclusion, and indices in original array for surivals.
         * Expected arguments:
         * 0: vector with original values
         * 1: vector with values to exclude
         */
//         #if NOT_EXCLUDED(OP_listdiff)
        @Namespace("sd::ops") public static class listdiff extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public listdiff(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public listdiff(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public listdiff position(long position) {
                return (listdiff)super.position(position);
            }
            @Override public listdiff getPointer(long i) {
                return new listdiff((Pointer)this).position(position + i);
            }
        
                                                                                    public listdiff() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation applies Add operation to specific inputs wrt indices
         * Expected arguments:
         * input: array to be updated
         * indices: array containing indexes for first dimension of input
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_add)
        @Namespace("sd::ops") public static class scatter_add extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_add(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_add(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_add position(long position) {
                return (scatter_add)super.position(position);
            }
            @Override public scatter_add getPointer(long i) {
                return new scatter_add((Pointer)this).position(position + i);
            }
        
                                                    public scatter_add() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation applies Subtract operation to specific inputs wrt indices
         * Expected arguments:
         * input: array to be updated
         * indices: array containing indexes for first dimension of input
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_sub)
        @Namespace("sd::ops") public static class scatter_sub extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_sub(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_sub(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_sub position(long position) {
                return (scatter_sub)super.position(position);
            }
            @Override public scatter_sub getPointer(long i) {
                return new scatter_sub((Pointer)this).position(position + i);
            }
        
                                                    public scatter_sub() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation applies Multiply operation to specific inputs wrt indices
         * Expected arguments:
         * input: array to be updated
         * indices: array containing indexes for first dimension of input
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_mul)
        @Namespace("sd::ops") public static class scatter_mul extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_mul(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_mul(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_mul position(long position) {
                return (scatter_mul)super.position(position);
            }
            @Override public scatter_mul getPointer(long i) {
                return new scatter_mul((Pointer)this).position(position + i);
            }
        
                                                    public scatter_mul() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation applies Divide operation to specific inputs wrt indices
         * Expected arguments:
         * input: array to be updated
         * indices: array containing indexes for first dimension of input
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_div)
        @Namespace("sd::ops") public static class scatter_div extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_div(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_div(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_div position(long position) {
                return (scatter_div)super.position(position);
            }
            @Override public scatter_div getPointer(long i) {
                return new scatter_div((Pointer)this).position(position + i);
            }
        
                                                    public scatter_div() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation applies Assign operation to specific inputs wrt indices
         * Expected arguments:
         * input: array to be updated
         * indices: array containing indexes for first dimension of input
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_upd)
        @Namespace("sd::ops") public static class scatter_upd extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_upd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_upd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_upd position(long position) {
                return (scatter_upd)super.position(position);
            }
            @Override public scatter_upd getPointer(long i) {
                return new scatter_upd((Pointer)this).position(position + i);
            }
        
                                                    public scatter_upd() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation applies Max operation to specific inputs through given indices
         * Expected arguments:
         * input: array to be updated
         * indices: array containing indexes for first dimension of input
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_max)
        @Namespace("sd::ops") public static class scatter_max extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_max(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_max(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_max position(long position) {
                return (scatter_max)super.position(position);
            }
            @Override public scatter_max getPointer(long i) {
                return new scatter_max((Pointer)this).position(position + i);
            }
        
                                                    public scatter_max() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation applies Min operation to specific inputs through given indices
         * Expected arguments:
         * input: array to be updated
         * indices: array containing indexes for first dimension of input
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_min)
        @Namespace("sd::ops") public static class scatter_min extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_min(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_min(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_min position(long position) {
                return (scatter_min)super.position(position);
            }
            @Override public scatter_min getPointer(long i) {
                return new scatter_min((Pointer)this).position(position + i);
            }
        
                                                    public scatter_min() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation scatter "updates" elements into new output array according to given "indices"
         * Expected arguments:
         * indices: array containing elements/slices indexes of output array to put "updates" elements into, the rest output elements will be zeros
         * updates: array containing elements to be inserted into output array
         * shape: contains shape of output array
         */
//         #if NOT_EXCLUDED(OP_scatter_nd)
        @Namespace("sd::ops") public static class scatter_nd extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_nd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_nd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_nd position(long position) {
                return (scatter_nd)super.position(position);
            }
            @Override public scatter_nd getPointer(long i) {
                return new scatter_nd((Pointer)this).position(position + i);
            }
        
                                                                                    public scatter_nd() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation scatter "updates" elements into input array along given "indices"
         * Expected arguments:
         * input: array to be updated
         * indices: array containing elements/slices indexes of input array to put "updates" elements into
         * updates: array containing elements to be inserted into input array
         */
//         #if NOT_EXCLUDED(OP_scatter_nd_update)
        @Namespace("sd::ops") public static class scatter_nd_update extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_nd_update(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_nd_update(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_nd_update position(long position) {
                return (scatter_nd_update)super.position(position);
            }
            @Override public scatter_nd_update getPointer(long i) {
                return new scatter_nd_update((Pointer)this).position(position + i);
            }
        
                                                    public scatter_nd_update() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation adds "updates" elements to input array along given "indices"
         * Expected arguments:
         * input: array to be updated
         * indices: array containing elements/slices indexes of input array to add "updates" elements to
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_add)
        @Namespace("sd::ops") public static class scatter_nd_add extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_nd_add(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_nd_add(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_nd_add position(long position) {
                return (scatter_nd_add)super.position(position);
            }
            @Override public scatter_nd_add getPointer(long i) {
                return new scatter_nd_add((Pointer)this).position(position + i);
            }
        
                                                    public scatter_nd_add() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation subtract "updates" elements from input array along given "indices"
         * Expected arguments:
         * input: array to be updated
         * indices: array containing elements/slices indexes of input array to subtract "updates" elements from
         * updates: array containing elements to be interfered with input
         */
//         #if NOT_EXCLUDED(OP_scatter_sub)
        @Namespace("sd::ops") public static class scatter_nd_sub extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public scatter_nd_sub(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public scatter_nd_sub(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public scatter_nd_sub position(long position) {
                return (scatter_nd_sub)super.position(position);
            }
            @Override public scatter_nd_sub getPointer(long i) {
                return new scatter_nd_sub((Pointer)this).position(position + i);
            }
        
                                                    public scatter_nd_sub() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation takes input's shape, and returns new NDArray filled with specified value
         * Expected arguments:
         * input: N-dimensional array
         *
         * T args:
         * 0: scalar value, used to fill NDArray
         */
//         #if NOT_EXCLUDED(OP_fill_as)
        @Namespace("sd::ops") public static class fill_as extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public fill_as(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public fill_as(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public fill_as position(long position) {
                return (fill_as)super.position(position);
            }
            @Override public fill_as getPointer(long i) {
                return new fill_as((Pointer)this).position(position + i);
            }
        
                                                                                    public fill_as() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation applies element-wise rint (round to integral value) operation
         */
//         #if NOT_EXCLUDED(OP_rint)
        @Namespace("sd::ops") public static class rint extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public rint(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public rint(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public rint position(long position) {
                return (rint)super.position(position);
            }
            @Override public rint getPointer(long i) {
                return new rint((Pointer)this).position(position + i);
            }
        
                                                    public rint() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation returns unique elements from input array as vector, and their original indices in input array
         * Expected input:
         * input: N-dimensional array
         */
//         #if NOT_EXCLUDED(OP_unique)
        @Namespace("sd::ops") public static class unique extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unique(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unique(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unique position(long position) {
                return (unique)super.position(position);
            }
            @Override public unique getPointer(long i) {
                return new unique((Pointer)this).position(position + i);
            }
        
                                                                                    public unique() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation returns 3 1D arrays for given 1D array with unique element count and indexes
         * input:
         *     0 - 1D array
         *
         * output:
         *     0 - 1D array with unique values
         *     1 - 1D array with ids for values in array above
         *     2 - 1D array with counts for values in array above
         */
//         #if NOT_EXCLUDED(OP_unique_with_counts)
        @Namespace("sd::ops") public static class unique_with_counts extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unique_with_counts(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unique_with_counts(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unique_with_counts position(long position) {
                return (unique_with_counts)super.position(position);
            }
            @Override public unique_with_counts getPointer(long i) {
                return new unique_with_counts((Pointer)this).position(position + i);
            }
        
                                                                                    public unique_with_counts() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation splits input NDArray into multiple TADs along given dimensions
         * Expected arguments:
         * input: N-dimensional array
         *
         * Int args:
         * 0..: TAD axis
         */
//         #if NOT_EXCLUDED(OP_tear)
        @Namespace("sd::ops") public static class tear extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tear(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tear(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tear position(long position) {
                return (tear)super.position(position);
            }
            @Override public tear getPointer(long i) {
                return new tear((Pointer)this).position(position + i);
            }
        
                                                                                    public tear() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op does the same as tear, just uses different input format:
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_unstack)
        @Namespace("sd::ops") public static class unstack extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unstack(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unstack(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unstack position(long position) {
                return (unstack)super.position(position);
            }
            @Override public unstack getPointer(long i) {
                return new unstack((Pointer)this).position(position + i);
            }
        
                                                                                    public unstack() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation extracts a strided (optionally) slice from a tensor,
         */
//         #if NOT_EXCLUDED(OP_strided_slice)
        @Namespace("sd::ops") public static class strided_slice extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public strided_slice(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public strided_slice(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public strided_slice position(long position) {
                return (strided_slice)super.position(position);
            }
            @Override public strided_slice getPointer(long i) {
                return new strided_slice((Pointer)this).position(position + i);
            }
        
                                                                                    public strided_slice() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                } // TODO: new op type needed. that returns VIEW
        @Namespace("sd::ops") public static class strided_slice_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public strided_slice_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public strided_slice_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public strided_slice_bp position(long position) {
                return (strided_slice_bp)super.position(position);
            }
            @Override public strided_slice_bp getPointer(long i) {
                return new strided_slice_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public strided_slice_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation extracts a slice from a tensor.
         *
         */
//         #if NOT_EXCLUDED(OP_slice)
        @Namespace("sd::ops") public static class slice extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public slice(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public slice(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public slice position(long position) {
                return (slice)super.position(position);
            }
            @Override public slice getPointer(long i) {
                return new slice((Pointer)this).position(position + i);
            }
        
                                                                                    public slice() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class slice_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public slice_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public slice_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public slice_bp position(long position) {
                return (slice_bp)super.position(position);
            }
            @Override public slice_bp getPointer(long i) {
                return new slice_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public slice_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation generate sequences. Basically from......to, with step used as increment.
         * Expected arguments:
         * start: optional scalar with starting value
         * stop: optional scalar with end value
         * step: optional scalar witn step value
         *
         * Int args: (optional)
         * 0: optional scalar with starting value
         * 1: optional scalar with end value
         * 1: optional scalar witn step value
         *
         * T args: (optional)
         * 0: optional scalar with starting value
         * 1: optional scalar with end value
         * 1: optional scalar witn step value
         */
//         #if NOT_EXCLUDED(OP_range)
        @Namespace("sd::ops") public static class range extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public range(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public range(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public range position(long position) {
                return (range)super.position(position);
            }
            @Override public range getPointer(long i) {
                return new range((Pointer)this).position(position + i);
            }
        
                                                                                    public range() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation return one-hot encoded n-dimensional array
         * Expected arguments:
         * input: N-dimensional array
         *
         * T args:
         * 0: 'on' value
         * 1: 'off' value
         *
         * Int args:
         * 0: depth
         * 1: axis
         */
//         #if NOT_EXCLUDED(OP_onehot)
        @Namespace("sd::ops") public static class onehot extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public onehot(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public onehot(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public onehot position(long position) {
                return (onehot)super.position(position);
            }
            @Override public onehot getPointer(long i) {
                return new onehot((Pointer)this).position(position + i);
            }
        
                                                                                    public onehot() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * This operation calculate the confusion matrix for a
         * pair of prediction and label 1-D arrays.
         * Expected arguments:
         * Input arrays:
         *   0 - predictions: 1-D array
         *   1 - labels: 1-D array
         *   2 - weights : optional
         * Int args:
         *   0 - num_classes: optional
         *
         */
//         #if NOT_EXCLUDED(OP_confusion_matrix)
        @Namespace("sd::ops") public static class confusion_matrix extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public confusion_matrix(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public confusion_matrix(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public confusion_matrix position(long position) {
                return (confusion_matrix)super.position(position);
            }
            @Override public confusion_matrix getPointer(long i) {
                return new confusion_matrix((Pointer)this).position(position + i);
            }
        
                                                                                    public confusion_matrix() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
		 * This operation stacks a list of rank tensors into one rank-(R+1) tensor.
		 * Expected arguments:
		 * 0...: N-Dimensional arrays to stack
		 *
		 */
//         #if NOT_EXCLUDED(OP_stack)
        @Namespace("sd::ops") public static class stack extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public stack(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public stack(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public stack position(long position) {
                return (stack)super.position(position);
            }
            @Override public stack getPointer(long i) {
                return new stack((Pointer)this).position(position + i);
            }
        
                                                                                    public stack() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation returns length of input array
         * Expected arguments:
         * input: N-dimensional array
         *
         * TODO: make this operation reduction, to allow TAD -> size
         */
//         #if NOT_EXCLUDED(OP_size)
        @Namespace("sd::ops") public static class size extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public size(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public size(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public size position(long position) {
                return (size)super.position(position);
            }
            @Override public size getPointer(long i) {
                return new size((Pointer)this).position(position + i);
            }
        
                                                                                    public size() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                } // add DeclarableScalarOp?
//         #endif


        /**
         * This operation returns rank of input array as scalar value.
         */
//         #if NOT_EXCLUDED(OP_rank)
        @Namespace("sd::ops") public static class rank extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public rank(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public rank(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public rank position(long position) {
                return (rank)super.position(position);
            }
            @Override public rank getPointer(long i) {
                return new rank((Pointer)this).position(position + i);
            }
        
                                                                                    public rank() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                } // ^
//         #endif


//         #if NOT_EXCLUDED(OP_broadcastgradientargs)
        @Namespace("sd::ops") public static class broadcastgradientargs extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public broadcastgradientargs(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public broadcastgradientargs(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public broadcastgradientargs position(long position) {
                return (broadcastgradientargs)super.position(position);
            }
            @Override public broadcastgradientargs getPointer(long i) {
                return new broadcastgradientargs((Pointer)this).position(position + i);
            }
        
                                                    public broadcastgradientargs() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This operation takes input's shape, and returns new NDArray filled with zeros
         * Expected arguments:
         * input: N-dimensional array
         *
         */
//         #if NOT_EXCLUDED(OP_zeros_as)
        @Namespace("sd::ops") public static class zeros_as extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public zeros_as(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public zeros_as(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public zeros_as position(long position) {
                return (zeros_as)super.position(position);
            }
            @Override public zeros_as getPointer(long i) {
                return new zeros_as((Pointer)this).position(position + i);
            }
        
                                                                                    public zeros_as() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation takes input's shape, and returns new NDArray filled with ones
         * Expected arguments:
         * input: N-dimensional array
         *
         */
//         #if NOT_EXCLUDED(OP_ones_as)
        @Namespace("sd::ops") public static class ones_as extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public ones_as(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public ones_as(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public ones_as position(long position) {
                return (ones_as)super.position(position);
            }
            @Override public ones_as getPointer(long i) {
                return new ones_as((Pointer)this).position(position + i);
            }
        
                                                                                    public ones_as() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation applies element-wise pow(x, 2) to the given input
         * Expected arguments:
         * input: N-Dimensional array
         */
//         #if NOT_EXCLUDED(OP_square)
        @Namespace("sd::ops") public static class square extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public square(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public square(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public square position(long position) {
                return (square)super.position(position);
            }
            @Override public square getPointer(long i) {
                return new square((Pointer)this).position(position + i);
            }
        
                                                    public square() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
        * This op calculates Hurwitz zeta function zeta(x, q) = sum_{n=0}^{inf} (q + n)^{-x}
        * Implementation is based on Euler-Maclaurin summation formula
        *
        *   Input arrays:
        *   x: define power {-x}, must be > 1, type float.
        *   q: define summand in denominator, must be > 0, type float.
        *
        * Output array:
        *    0: corresponding values of Hurwitz zeta function
        *
        * Two input and one output arrays must have the same shape
        */
//         #if NOT_EXCLUDED(OP_zeta)
        @Namespace("sd::ops") public static class zeta extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public zeta(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public zeta(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public zeta position(long position) {
                return (zeta)super.position(position);
            }
            @Override public zeta getPointer(long i) {
                return new zeta((Pointer)this).position(position + i);
            }
        
                                                                                    public zeta() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
        * This op calculates polygamma function psi^(n)(x). Implementation is based on serial representation written in
        * terms of the Hurwitz zeta function: polygamma = (-1)^{n+1} * n! * zeta(n+1, x).
        *
        * Input arrays:
        *    0: n - define derivative order (n+1), type integer (however currently is implemented as float casted to integer)
        *    1: x - abscissa points where to evaluate the polygamma function, type float
        *
        * Output array:
        *    0: values of polygamma function at corresponding x, type float
        *
        * Two input and one output arrays have the same shape
        */
//         #if NOT_EXCLUDED(OP_polygamma)
        @Namespace("sd::ops") public static class polygamma extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public polygamma(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public polygamma(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public polygamma position(long position) {
                return (polygamma)super.position(position);
            }
            @Override public polygamma getPointer(long i) {
                return new polygamma((Pointer)this).position(position + i);
            }
        
                                                                                    public polygamma() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

       /**
        * This op calculates lgamma function lgamma(x) = log(Gamma(x))
        *
        * Input arrays:
        *    0: x - input matrix
        *
        * Output array:
        *    0: log of Gamma(x)
        *
        */
//         #if NOT_EXCLUDED(OP_lgamma)
        @Namespace("sd::ops") public static class lgamma extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lgamma(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lgamma(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lgamma position(long position) {
                return (lgamma)super.position(position);
            }
            @Override public lgamma getPointer(long i) {
                return new lgamma((Pointer)this).position(position + i);
            }
        
                                                    public lgamma() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
        * This op calculates digamma function psi(x) = derivative of log(Gamma(x))
        *
        * Input arrays:
        *    0: x - abscissa points where to evaluate the digamma function, type float
        *
        * Output array:
        *    0: values of digamma function at corresponding x, type float
        *
        */
//         #if NOT_EXCLUDED(OP_digamma)
        @Namespace("sd::ops") public static class digamma extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public digamma(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public digamma(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public digamma position(long position) {
                return (digamma)super.position(position);
            }
            @Override public digamma getPointer(long i) {
                return new digamma((Pointer)this).position(position + i);
            }
        
                                                                                    public digamma() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation takes shape as first argument, and returns new NDArray filled with specific scalar value.
         * Input arrays:
         * 0 - shape vector
         * 1 - optional scalar NDArray
         *
         * T arguments:
         * 0 - optional scalar value
         *
         */
//         #if NOT_EXCLUDED(OP_fill)
        @Namespace("sd::ops") public static class fill extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public fill(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public fill(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public fill position(long position) {
                return (fill)super.position(position);
            }
            @Override public fill getPointer(long i) {
                return new fill((Pointer)this).position(position + i);
            }
        
                                                                                    public fill() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation splits given NDArray into chunks of specific size, along given dimension
         * Input arrays:
         * 0 - input array
         * 1 - array of sizes
         * 2 - optional axis
         *
         * Integer arguments:
         * 0 - optional axis
         *
         */
//         #if NOT_EXCLUDED(OP_split_v)
        @Namespace("sd::ops") public static class split_v extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public split_v(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public split_v(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public split_v position(long position) {
                return (split_v)super.position(position);
            }
            @Override public split_v getPointer(long i) {
                return new split_v((Pointer)this).position(position + i);
            }
        
                                                                                    public split_v() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation splits given NDArray into chunks of specific size, along given dimension
         * 0 - input array
         * 1 - optional axis
         *
         * Integer arguments:
         * 0 - number of splits
         * 1 - optional axis
         */
//         #if NOT_EXCLUDED(OP_split)
        @Namespace("sd::ops") public static class split extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public split(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public split(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public split position(long position) {
                return (split)super.position(position);
            }
            @Override public split getPointer(long i) {
                return new split((Pointer)this).position(position + i);
            }
        
                                                                                    public split() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * This operation adjusts image hue by delta
         * Input arrays:
         * 0 - input array with rank >= 3, must have at least one dimension equal 3, that is dimension containing channels.
         * 1 - optional argument, input scalar-array containing delta
         *
         * T arguments:
         * 0 - optional argument, delta value
         *
         * Int arguments:
         * 0 - optional argument, corresponds to dimension with 3 channels
         */
//         #if NOT_EXCLUDED(OP_adjust_hue)
        @Namespace("sd::ops") public static class adjust_hue extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public adjust_hue(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public adjust_hue(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public adjust_hue position(long position) {
                return (adjust_hue)super.position(position);
            }
            @Override public adjust_hue getPointer(long i) {
                return new adjust_hue((Pointer)this).position(position + i);
            }
        
                                                                                    public adjust_hue() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation adjusts image saturation by delta
         * Input arrays:
         * 0 - input array with rank >= 3, must have at least one dimension equal 3, that is dimension containing channels.
         * 1 - optional argument, input scalar-array containing saturation factor
         *
         * T arguments:
         * 0 - optional argument, saturation factor
         *
         * Int arguments:
         * 0 - optional argument, corresponds to dimension with 3 channels
         */
//         #if NOT_EXCLUDED(OP_adjust_saturation)
        @Namespace("sd::ops") public static class adjust_saturation extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public adjust_saturation(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public adjust_saturation(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public adjust_saturation position(long position) {
                return (adjust_saturation)super.position(position);
            }
            @Override public adjust_saturation getPointer(long i) {
                return new adjust_saturation((Pointer)this).position(position + i);
            }
        
                                                                                    public adjust_saturation() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation adjusts image contrast by given factor ( z = (x - mean) * factor + mean )
         * Input arrays:
         * 0 - input array with rank >= 3, must have last one dimension equal 3, that is dimension containing channels.
         * 1 - optional argument, input scalar-array containing saturation contrast factor
         *
         * T arguments:
         * 0 - optional argument, contrast factor
         *
         */
//         #if NOT_EXCLUDED(OP_adjust_contrast)
        @Namespace("sd::ops") public static class adjust_contrast extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public adjust_contrast(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public adjust_contrast(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public adjust_contrast position(long position) {
                return (adjust_contrast)super.position(position);
            }
            @Override public adjust_contrast getPointer(long i) {
                return new adjust_contrast((Pointer)this).position(position + i);
            }
        
                                                                                    public adjust_contrast() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class adjust_contrast_v2 extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public adjust_contrast_v2(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public adjust_contrast_v2(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public adjust_contrast_v2 position(long position) {
                return (adjust_contrast_v2)super.position(position);
            }
            @Override public adjust_contrast_v2 getPointer(long i) {
                return new adjust_contrast_v2((Pointer)this).position(position + i);
            }
        
                                                                                    public adjust_contrast_v2() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif




        /**
         * This operation rearranges data from depth into blocks of spatial data. This is the reverse transformation
         * of space_to_depth op. This op output is a copy of the input tensor where values from the depth dimension
         * are moved in spatial blocks to the height and width dimensions. Int attr 0 indicates the input
         * block size and how the data is moved.
         * Input:
         *     0 - 4D tensor on given type
         * Output:
         *     0 - 4D tensor of given type and proper shape
         *
         * Int arguments:
         *     0 - block size
         *     1 - output data format: 0 ("NHWC"): shape{ batch, height, width, channels }
         *                             1 ("NCHW"): shape{ batch, channels, height, width }
         *                             2 ("NCHW_VECT_C"): int8 shape{ batch, channels / 4, height, width, 4 }
         *                             optional (default 0)
         */
//         #if NOT_EXCLUDED(OP_depth_to_space)
        @Namespace("sd::ops") public static class depth_to_space extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public depth_to_space(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public depth_to_space(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public depth_to_space position(long position) {
                return (depth_to_space)super.position(position);
            }
            @Override public depth_to_space getPointer(long i) {
                return new depth_to_space((Pointer)this).position(position + i);
            }
        
                                                                                    public depth_to_space() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation rearranges blocks of spatial data, into depth.This op output is a copy of the input tensor
         * where values from the height and width dimensions are moved to the depth dimension. Int attr 0 indicates
         * the input block size.
         *
         * Input:
         *     - 4D tensor of given type
         * Output:
         *     - 4D tensor
         *
         * Int arguments:
         *     0 - block size
         *     1 - output data format: 0 ("NHWC"): shape{ batch, height, width, channels }
         *                             1 ("NCHW"): shape{ batch, channels, height, width }
         *                             2 ("NCHW_VECT_C"): int8 shape{ batch, channels / 4, height, width, 4 }
         *                             optional (default 0)
         *
         */
//         #if NOT_EXCLUDED(OP_space_to_depth)
        @Namespace("sd::ops") public static class space_to_depth extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public space_to_depth(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public space_to_depth(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public space_to_depth position(long position) {
                return (space_to_depth)super.position(position);
            }
            @Override public space_to_depth getPointer(long i) {
                return new space_to_depth((Pointer)this).position(position + i);
            }
        
                                                                                    public space_to_depth() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op calculates cross-product between input arguments
         * Input arguments
         * 0 - vector or tensor A
         * 1 - vector or tensor B
         */
//         #if NOT_EXCLUDED(OP_cross)
        @Namespace("sd::ops") public static class cross extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cross(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cross(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cross position(long position) {
                return (cross)super.position(position);
            }
            @Override public cross getPointer(long i) {
                return new cross((Pointer)this).position(position + i);
            }
        
                                                    public cross() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. More specifically, this op
         * outputs a copy of the input tensor where values from the height and width dimensions are moved to the
         * batch dimension. After the zero-padding, both height and width of the input must be divisible by the block
         * size.
         *
         * Inputs:
         *  0 - input tensor
         *  1 - 2D paddings tensor (shape {M, 2})
         *
         *  Output:
         *    - result tensor
         *
         *  Int args:
         *      0 - block size (M)
         *
         */
//         #if NOT_EXCLUDED(OP_space_to_batch)
        @Namespace("sd::ops") public static class space_to_batch extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public space_to_batch(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public space_to_batch(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public space_to_batch position(long position) {
                return (space_to_batch)super.position(position);
            }
            @Override public space_to_batch getPointer(long i) {
                return new space_to_batch((Pointer)this).position(position + i);
            }
        
                                                                                    public space_to_batch() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /*
         * This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks of shape
         * block_shape, and interleaves these blocks with the "batch" dimension (0) such that in the output,
         * the spatial dimensions [1, ..., M] correspond to the position within the grid, and the batch dimension
         * combines both the position within a spatial block and the original batch position. Prior to division into
         * blocks, the spatial dimensions of the input are optionally zero padded according to paddings.
         *
         * Inputs:
         *      0 - input (N-D tensor)
         *      1 - block_shape - int 1D tensor with M length
         *      2 - paddings - int 2D tensor with shape {M, 2}
         *
         * Output:
         *      - N-D tensor with the same type as input 0.
         *
         * */
//         #if NOT_EXCLUDED(OP_space_to_batch_nd)
        @Namespace("sd::ops") public static class space_to_batch_nd extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public space_to_batch_nd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public space_to_batch_nd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public space_to_batch_nd position(long position) {
                return (space_to_batch_nd)super.position(position);
            }
            @Override public space_to_batch_nd getPointer(long i) {
                return new space_to_batch_nd((Pointer)this).position(position + i);
            }
        
                                                                                    public space_to_batch_nd() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         *
         *
         */
//         #if NOT_EXCLUDED(OP_batch_to_space)
        @Namespace("sd::ops") public static class batch_to_space extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public batch_to_space(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public batch_to_space(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public batch_to_space position(long position) {
                return (batch_to_space)super.position(position);
            }
            @Override public batch_to_space getPointer(long i) {
                return new batch_to_space((Pointer)this).position(position + i);
            }
        
                                                                                    public batch_to_space() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_batch_to_space_nd)
        @Namespace("sd::ops") public static class batch_to_space_nd extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public batch_to_space_nd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public batch_to_space_nd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public batch_to_space_nd position(long position) {
                return (batch_to_space_nd)super.position(position);
            }
            @Override public batch_to_space_nd getPointer(long i) {
                return new batch_to_space_nd((Pointer)this).position(position + i);
            }
        
                                                                                    public batch_to_space_nd() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * top_k operation returns a vector of k top values for
         *  given NDArray as tensor with default boolean (true)
         *  as sort for result index array
         *  will be sorted by the values in descending order.
         *  The first parameter is a NDArray for working.
         *  The second is k (default 1) - optional
         *  The third is boolean value(default is true) (0 - as is, 1 - sorted by value) optional
         */
//         #if NOT_EXCLUDED(OP_top_k)
        @Namespace("sd::ops") public static class top_k extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public top_k(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public top_k(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public top_k position(long position) {
                return (top_k)super.position(position);
            }
            @Override public top_k getPointer(long i) {
                return new top_k((Pointer)this).position(position + i);
            }
        
                                                                                    public top_k() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * in_top_k operation returns a vector of k boolean values for
         *  given NDArray as 2D matrix of predicted in the NDArray k top values
         *  The first parameter is a NDArray of predicted values (2d array).
         *  The second is NDArray as vector of indeces k top values will be search.
         *  The third is k
         */
//         #if NOT_EXCLUDED(OP_in_top_k)
        @Namespace("sd::ops") public static class in_top_k extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public in_top_k(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public in_top_k(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public in_top_k position(long position) {
                return (in_top_k)super.position(position);
            }
            @Override public in_top_k getPointer(long i) {
                return new in_top_k((Pointer)this).position(position + i);
            }
        
                                                                                    public in_top_k() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * moments operation calculate a mean and variation for given NDArray
         * with reduce a result according to axis array given.
         * For full axis the result is both mean and variance of all members in array.
         * Otherwise there are two NDArrays with means and variances for
         * Axes can be put as the second NDArray or as int vector.
         *
         * the optional flag "keep_dims" can be set as T param
         */
//         #if NOT_EXCLUDED(OP_moments)
        @Namespace("sd::ops") public static class moments extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public moments(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public moments(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public moments position(long position) {
                return (moments)super.position(position);
            }
            @Override public moments getPointer(long i) {
                return new moments((Pointer)this).position(position + i);
            }
        
                                                                                    public moments() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * embedding_lookup - search for submatrices in given matrix and retunts them
         * accordingly to index array given.
         */
//         #if NOT_EXCLUDED(OP_embedding_lookup)
        @Namespace("sd::ops") public static class embedding_lookup extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public embedding_lookup(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public embedding_lookup(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public embedding_lookup position(long position) {
                return (embedding_lookup)super.position(position);
            }
            @Override public embedding_lookup getPointer(long i) {
                return new embedding_lookup((Pointer)this).position(position + i);
            }
        
                                                                                    public embedding_lookup() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * dynamic_partition - partition a input tensor onto num_partitions
         * accordingly to index array given.
         *
         * the first param - NDArray to be partitioned.
         * the second param - index array
         * the third param (integer param) - num or partitions.
         *
         * returns a num of NDArrays as output
         */
//         #if NOT_EXCLUDED(OP_dynamic_partition)
        @Namespace("sd::ops") public static class dynamic_partition extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public dynamic_partition(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public dynamic_partition(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public dynamic_partition position(long position) {
                return (dynamic_partition)super.position(position);
            }
            @Override public dynamic_partition getPointer(long i) {
                return new dynamic_partition((Pointer)this).position(position + i);
            }
        
                                                                                    public dynamic_partition() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_dynamic_partition_bp)
        @Namespace("sd::ops") public static class dynamic_partition_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public dynamic_partition_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public dynamic_partition_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public dynamic_partition_bp position(long position) {
                return (dynamic_partition_bp)super.position(position);
            }
            @Override public dynamic_partition_bp getPointer(long i) {
                return new dynamic_partition_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public dynamic_partition_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * dynamic_stitch - merge partitions from the second param a input tensor
         * into a single tensor accordingly to index array given.
         *
         * the first param - index array
         * the second params - tensors to be merged
         *
         * returns a num of NDArrays as output
         *
         * the operation is inversion od dynamic_partition
         */
//         #if NOT_EXCLUDED(OP_dynamic_stitch)
        @Namespace("sd::ops") public static class dynamic_stitch extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public dynamic_stitch(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public dynamic_stitch(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public dynamic_stitch position(long position) {
                return (dynamic_stitch)super.position(position);
            }
            @Override public dynamic_stitch getPointer(long i) {
                return new dynamic_stitch((Pointer)this).position(position + i);
            }
        
                                                                                    public dynamic_stitch() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * zero_fraction op.
         * compute a fraction of zeros in given array
         *
         * input param - an array (tensor)
         * output value - a real number with given type (e.g. float or double)
         */
//         #if NOT_EXCLUDED(OP_zero_fraction)
        @Namespace("sd::ops") public static class zero_fraction extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public zero_fraction(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public zero_fraction(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public zero_fraction position(long position) {
                return (zero_fraction)super.position(position);
            }
            @Override public zero_fraction getPointer(long i) {
                return new zero_fraction((Pointer)this).position(position + i);
            }
        
                                                                                    public zero_fraction() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * xw_plus_b op.
         * multiply two first matrices and add third vector to each row of result
         *
         * input params:
         *   - 2D matrix NxM
         *   - 2D matrix MxN
         *   - 1D vector with N elements
         * output value - 2D matrix NxN as multiply of matrixes and add vector
         * Int args:
         *      0 - optional switcher of weights format, if int arg == 1 - mkldnn, else mmul
         */
//         #if NOT_EXCLUDED(OP_xw_plus_b)
                @Namespace("sd::ops") public static class xw_plus_b extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public xw_plus_b(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public xw_plus_b(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public xw_plus_b position(long position) {
                        return (xw_plus_b)super.position(position);
                    }
                    @Override public xw_plus_b getPointer(long i) {
                        return new xw_plus_b((Pointer)this).position(position + i);
                    }
                
                                                                                    public xw_plus_b() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                @Namespace("sd::ops") public static class xw_plus_b_bp extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public xw_plus_b_bp(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public xw_plus_b_bp(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public xw_plus_b_bp position(long position) {
                        return (xw_plus_b_bp)super.position(position);
                    }
                    @Override public xw_plus_b_bp getPointer(long i) {
                        return new xw_plus_b_bp((Pointer)this).position(position + i);
                    }
                
                                                                                    public xw_plus_b_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation is missed due it simplicy.
         * Input and output params are the same after operation.
         * Input - NDArray, output - NDArray with the same shape.
         */
//         #if NOT_EXCLUDED(OP_stop_gradient)
        @Namespace("sd::ops") public static class stop_gradient extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public stop_gradient(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public stop_gradient(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public stop_gradient position(long position) {
                return (stop_gradient)super.position(position);
            }
            @Override public stop_gradient getPointer(long i) {
                return new stop_gradient((Pointer)this).position(position + i);
            }
        
                                                    public stop_gradient() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_parallel_stack)
        @Namespace("sd::ops") public static class parallel_stack extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public parallel_stack(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public parallel_stack(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public parallel_stack position(long position) {
                return (parallel_stack)super.position(position);
            }
            @Override public parallel_stack getPointer(long i) {
                return new parallel_stack((Pointer)this).position(position + i);
            }
        
                                                                                    public parallel_stack() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * normalize_moments operation normalize already calculated mean and variation
         * accordingly to shift and count.
         * input params:
         *  - count of data
         *  - tensor with mean
         *  - tensor with variance (the same shape as before)
         *
         *  - optional floating point param shift.
         *
         *  returns a normalized pair mean and variance with the same shapes as input
         */
//         #if NOT_EXCLUDED(OP_normalize_moments)
        @Namespace("sd::ops") public static class normalize_moments extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public normalize_moments(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public normalize_moments(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public normalize_moments position(long position) {
                return (normalize_moments)super.position(position);
            }
            @Override public normalize_moments getPointer(long i) {
                return new normalize_moments((Pointer)this).position(position + i);
            }
        
                                                                                    public normalize_moments() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * sufficient_statistics operation return calculated mean and variation with data count.
         * this operation is invert for moments
         * accordingly to shift and count.
         * input params:
         *  - input tensor
         *  - axes vector
         *
         *
         *  - optional floating point param shift.
         *  - optional int (as bool) keep_dimension
         *
         *  returns four tensors:
         *     - scalar tensor (data count)
         *     - sum elements of input (accross axises)
         *     - sum of squares of input (accross axises)
         *     - shift (if was given by input floating param)
         */
//         #if NOT_EXCLUDED(OP_sufficient_statistics)
        @Namespace("sd::ops") public static class sufficient_statistics extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sufficient_statistics(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sufficient_statistics(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sufficient_statistics position(long position) {
                return (sufficient_statistics)super.position(position);
            }
            @Override public sufficient_statistics getPointer(long i) {
                return new sufficient_statistics((Pointer)this).position(position + i);
            }
        
                                                                                    public sufficient_statistics() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op calculates weighted logarithmic loss of input
         * Input arguments
         *  0 - target
         *  1 - input
         *  2 - weights (scalar or vector with same as last dimension)
         *
         *  return value - a tensor with the same shape as target or input
         */
//         #if NOT_EXCLUDED(OP_weighted_cross_entropy_with_logits)
        @Namespace("sd::ops") public static class weighted_cross_entropy_with_logits extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public weighted_cross_entropy_with_logits(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public weighted_cross_entropy_with_logits(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public weighted_cross_entropy_with_logits position(long position) {
                return (weighted_cross_entropy_with_logits)super.position(position);
            }
            @Override public weighted_cross_entropy_with_logits getPointer(long i) {
                return new weighted_cross_entropy_with_logits((Pointer)this).position(position + i);
            }
        
                                                    public weighted_cross_entropy_with_logits() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * This op calculates dropout of input
         * Input arguments
         *  0 - input tensor
         *  1 - noise_shape - (vector with shape to reduce) - optional
         *
         *  int parameter - seed for random numbers
         *  T parameter - probability (should be between 0 and 1)
         *  return value - a tensor with the same shape as target or input
         */
//         #if NOT_EXCLUDED(OP_dropout)
        @Namespace("sd::ops") public static class dropout extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public dropout(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public dropout(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public dropout position(long position) {
                return (dropout)super.position(position);
            }
            @Override public dropout getPointer(long i) {
                return new dropout((Pointer)this).position(position + i);
            }
        
                                                                                    public dropout() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_dropout_bp)
        @Namespace("sd::ops") public static class dropout_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public dropout_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public dropout_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public dropout_bp position(long position) {
                return (dropout_bp)super.position(position);
            }
            @Override public dropout_bp getPointer(long i) {
                return new dropout_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public dropout_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /*  Calculates alpha weighted dropout
            T params:
                0 - drop probability
                1 - alpha value
                2 - alpha' value
                3 - beta value
         */
//         #if NOT_EXCLUDED(OP_alpha_dropout_bp)
        @Namespace("sd::ops") public static class alpha_dropout_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public alpha_dropout_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public alpha_dropout_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public alpha_dropout_bp position(long position) {
                return (alpha_dropout_bp)super.position(position);
            }
            @Override public alpha_dropout_bp getPointer(long i) {
                return new alpha_dropout_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public alpha_dropout_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * bincount operation return a vector with element counted.
         *
         * input params:
         *  - input tensor - only int part are accepted
         *  - weights - the same shape tensor with integer weights for element (optional)
         *  default weight - 1,1,1..,1 for all values in the tensor
         *
         *  optional ints:
         *  - min_length - zero or greater
         *  - max_length - between min_length and max(input) + 1
         *
         *  returns four tensors:
         *     - vector tensor with length to min(max_len, max(input) + 1) with count
         *  of values in indexed place
         *
         */
//         #if NOT_EXCLUDED(OP_bincount)
        @Namespace("sd::ops") public static class bincount extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public bincount(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public bincount(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public bincount position(long position) {
                return (bincount)super.position(position);
            }
            @Override public bincount getPointer(long i) {
                return new bincount((Pointer)this).position(position + i);
            }
        
                                                                                    public bincount() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * broadcast_dynamic_shape op.
         *
         * input params:
         *    0 - the first shape (vector with shape)
         *    1 - the second shape (vector with shape)
         *
         * return value:
         *    vector with broadcasted shape
         */
//         #if NOT_EXCLUDED(OP_broadcast_dynamic_shape)
        @Namespace("sd::ops") public static class broadcast_dynamic_shape extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public broadcast_dynamic_shape(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public broadcast_dynamic_shape(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public broadcast_dynamic_shape position(long position) {
                return (broadcast_dynamic_shape)super.position(position);
            }
            @Override public broadcast_dynamic_shape getPointer(long i) {
                return new broadcast_dynamic_shape((Pointer)this).position(position + i);
            }
        
                                                                                    public broadcast_dynamic_shape() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * matrix_determinant op.
         *
         * input params:
         *    0 - the tensor with dimension (x * y * z * ::: * M * M)
         *
         * return value:
         *    tensor with dimension (x * y * z * ::: *) with determinant for all
         * M x M matricies
         */
//         #if NOT_EXCLUDED(OP_matrix_determinant)
        @Namespace("sd::ops") public static class matrix_determinant extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public matrix_determinant(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public matrix_determinant(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public matrix_determinant position(long position) {
                return (matrix_determinant)super.position(position);
            }
            @Override public matrix_determinant getPointer(long i) {
                return new matrix_determinant((Pointer)this).position(position + i);
            }
        
                                                                                    public matrix_determinant() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * log_matrix_determinant op.
         *
         * input params:
         *    0 - the tensor with dimension (x * y * z * ::: * M * M)
         *
         * return value:
         *    tensor with dimension (x * y * z * ::: *) with log determinant for all
         * M x M matricies
         */

//         #if NOT_EXCLUDED(OP_log_matrix_determinant)
        @Namespace("sd::ops") public static class log_matrix_determinant extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public log_matrix_determinant(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public log_matrix_determinant(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public log_matrix_determinant position(long position) {
                return (log_matrix_determinant)super.position(position);
            }
            @Override public log_matrix_determinant getPointer(long i) {
                return new log_matrix_determinant((Pointer)this).position(position + i);
            }
        
                                                                                    public log_matrix_determinant() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * logdet op. Logarithm of the determinant of hermitian positive matricies.
         *
         * input params:
         *    0 - the tensor with dimension (x * y * z * ::: * M * M)
         *
         * return value:
         *    tensor with dimension (x * y * z * ::: *) with log determinant for all
         * M x M matricies
         */

//         #if NOT_EXCLUDED(OP_logdet)
        @Namespace("sd::ops") public static class logdet extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public logdet(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public logdet(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public logdet position(long position) {
                return (logdet)super.position(position);
            }
            @Override public logdet getPointer(long i) {
                return new logdet((Pointer)this).position(position + i);
            }
        
                                                                                    public logdet() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * matrix_solve_ls op (lstsq) - solves one or more linear least-squares problems.
         *
         * input params:
         *    0 - the tensor with dimension (x * y * z * ::: * M * N) - left parts of equations
         *    1 - the tensor with dimension (x * y * z * ::: * M * K) - right parts of equations
         *
         * float args:
         *    0 - l2_regularizer (default 0. and only for 0 implemented)
         *
         * boolean args:
         *    0 - fast - default is true (optional) - use Cholesky decomposition instead QR decomposition of matricies.
         *
         * return value:
         *    tensor with dimension (x * y * z * ::: * N * K) with solutions
         *
         */
//         #if NOT_EXCLUDED(OP_lstsq)
        @Namespace("sd::ops") public static class lstsq extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lstsq(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lstsq(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lstsq position(long position) {
                return (lstsq)super.position(position);
            }
            @Override public lstsq getPointer(long i) {
                return new lstsq((Pointer)this).position(position + i);
            }
        
                                                                                    public lstsq() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /* solve_ls - analog of lstsq op with another solution approach
         *
         * input params:
         *    0 - the tensor with dimension (x * y * z * ::: * M * N) - left parts of equations
         *    1 - the tensor with dimension (x * y * z * ::: * M * K) - right parts of equations
         *
         * float args:
         *    0 - l2_regularizer (default 0. and only for 0 implemented)
         *
         * boolean args:
         *    0 - fast - default is true (optional) - use Cholesky decomposition instead QR decomposition of matricies.
         *
         * return value:
         *    tensor with dimension (x * y * z * ::: * N * K) with solutions
         *
         * Note: if fast is false - then l2_regularizer arg is ignored and used lstsq method due QR decomposition
         * */
//         #if NOT_EXCLUDED(OP_solve_ls)
                @Namespace("sd::ops") public static class solve_ls extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public solve_ls(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public solve_ls(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public solve_ls position(long position) {
                        return (solve_ls)super.position(position);
                    }
                    @Override public solve_ls getPointer(long i) {
                        return new solve_ls((Pointer)this).position(position + i);
                    }
                
                                                                                    public solve_ls() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * matrix_inverse op. - make inverse for all 2D square matricies found in the input tensor
         *
         * input params:
         *    0 - the tensor with dimension (x * y * z * ::: * M * M)
         *
         * return value:
         *    tensor with dimension (x * y * z * ::: * M * M) with inverse M x M matricies in it
         */
//         #if NOT_EXCLUDED(OP_matrix_inverse)
        @Namespace("sd::ops") public static class matrix_inverse extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public matrix_inverse(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public matrix_inverse(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public matrix_inverse position(long position) {
                return (matrix_inverse)super.position(position);
            }
            @Override public matrix_inverse getPointer(long i) {
                return new matrix_inverse((Pointer)this).position(position + i);
            }
        
                                                    public matrix_inverse() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * triangular_solve op. - reverse Gaussian method for solve systems of linear equations.
         *
         * input params:
         *    0 - the tensor with dimension (x * y * z * ::: * M * M) - left parts of equations
         *    1 - the tensor with dimension (x * y * z * ::: * M * K) - right parts of equations
         *
         * boolean args:
         *    0 - lower - default is true (optional) - left part is lower triangular matrix
         *    1 - adjoint - default is false (optional) - indicate input matrix or its adjoint (hermitian addition) should be used
         *
         * return value:
         *    tensor with dimension (x * y * z * ::: * M * K) with solutions
         *
         */
//         #if NOT_EXCLUDED(OP_triangular_solve)
        @Namespace("sd::ops") public static class triangular_solve extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public triangular_solve(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public triangular_solve(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public triangular_solve position(long position) {
                return (triangular_solve)super.position(position);
            }
            @Override public triangular_solve getPointer(long i) {
                return new triangular_solve((Pointer)this).position(position + i);
            }
        
                                                                                    public triangular_solve() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * solve op. - solve systems of linear equations - general method.
         *
         * input params:
         *    0 - the tensor with dimension (x * y * z * ::: * M * M) - left parts of equations
         *    1 - the tensor with dimension (x * y * z * ::: * M * K) - right parts of equations
         *
         * boolean args:
         *    0 - adjoint - default is false (optional) - indicate input matrix or its adjoint (hermitian addition) should be used
         *
         * return value:
         *    tensor with dimension (x * y * z * ::: * M * K) with solutions
         *
         */
//         #if NOT_EXCLUDED(OP_solve)
        @Namespace("sd::ops") public static class solve extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public solve(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public solve(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public solve position(long position) {
                return (solve)super.position(position);
            }
            @Override public solve getPointer(long i) {
                return new solve((Pointer)this).position(position + i);
            }
        
                                                                                    public solve() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * lu op. - make LUP decomposition of given batch of 2D square matricies
         *
         * input params:
         *    0 - float tensor with dimension (x * y * z * ::: * M * M)
         *
         * return value:
         *    0 - float tensor with dimension (x * y * z * ::: * M * M) with LU M x M matricies in it
         *    1 - int (32 or 64) batched vector of permutations with length M - shape (x * y * z * ::: * M)
         *
         * int argument:
         *    0 - data type of output permutaion vector (int32 or int64), optional, default INT32
         */

//         #if NOT_EXCLUDED(OP_matrix_inverse)
        @Namespace("sd::ops") public static class lu extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lu(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lu(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lu position(long position) {
                return (lu)super.position(position);
            }
            @Override public lu getPointer(long i) {
                return new lu((Pointer)this).position(position + i);
            }
        
                                                                                    public lu() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * sequence_mask op. - make mask for given tensor filled by (j > x[i_1, i_2,...,i_n]) -> z[i_1, i_2,...,i_n,j]
         *
         * input params:
         *    0 - the ND-tensor filled by integer-like values
         *
         * optional int param - maxlength (maxlength >= max(x)). By default maxlength = max(x).
         * return value:
         *    (N+1)D tensor filled by 0 and 1 accordingly the mask
         */
//         #if NOT_EXCLUDED(OP_sequence_mask)
        @Namespace("sd::ops") public static class sequence_mask extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sequence_mask(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sequence_mask(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sequence_mask position(long position) {
                return (sequence_mask)super.position(position);
            }
            @Override public sequence_mask getPointer(long i) {
                return new sequence_mask((Pointer)this).position(position + i);
            }
        
                                                                                    public sequence_mask() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
        /**
         * segment_max op. - make a tensor filled by max values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * return value:
         *    tensor with max values according to indices sets.
         */

//         #if NOT_EXCLUDED(OP_segment_max)
        @Namespace("sd::ops") public static class segment_max extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_max(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_max(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_max position(long position) {
                return (segment_max)super.position(position);
            }
            @Override public segment_max getPointer(long i) {
                return new segment_max((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_max() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_segment_max_bp)
        @Namespace("sd::ops") public static class segment_max_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_max_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_max_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_max_bp position(long position) {
                return (segment_max_bp)super.position(position);
            }
            @Override public segment_max_bp getPointer(long i) {
                return new segment_max_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_max_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * segment_min op. - make a tensor filled by min values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * return value:
         *    tensor with min values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_segment_min)
        @Namespace("sd::ops") public static class segment_min extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_min(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_min(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_min position(long position) {
                return (segment_min)super.position(position);
            }
            @Override public segment_min getPointer(long i) {
                return new segment_min((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_min() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_segment_min_bp)
        @Namespace("sd::ops") public static class segment_min_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_min_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_min_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_min_bp position(long position) {
                return (segment_min_bp)super.position(position);
            }
            @Override public segment_min_bp getPointer(long i) {
                return new segment_min_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_min_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * segment_sum op. - make a tensor filled by sum of values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * return value:
         *    tensor with sum of values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_segment_sum)
        @Namespace("sd::ops") public static class segment_sum extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_sum(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_sum(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_sum position(long position) {
                return (segment_sum)super.position(position);
            }
            @Override public segment_sum getPointer(long i) {
                return new segment_sum((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_sum() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_segment_sum_bp)
        @Namespace("sd::ops") public static class segment_sum_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_sum_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_sum_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_sum_bp position(long position) {
                return (segment_sum_bp)super.position(position);
            }
            @Override public segment_sum_bp getPointer(long i) {
                return new segment_sum_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_sum_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * segment_prod op. - make a tensor filled by product of values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * return value:
         *    tensor with product of values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_segment_prod)
        @Namespace("sd::ops") public static class segment_prod extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_prod(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_prod(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_prod position(long position) {
                return (segment_prod)super.position(position);
            }
            @Override public segment_prod getPointer(long i) {
                return new segment_prod((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_prod() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_segment_prod_bp)
        @Namespace("sd::ops") public static class segment_prod_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_prod_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_prod_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_prod_bp position(long position) {
                return (segment_prod_bp)super.position(position);
            }
            @Override public segment_prod_bp getPointer(long i) {
                return new segment_prod_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_prod_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
        /**
         * segment_mean op. - make a tensor filled by average of values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * return value:
         *    tensor with average of values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_segment_mean)
        @Namespace("sd::ops") public static class segment_mean extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_mean(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_mean(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_mean position(long position) {
                return (segment_mean)super.position(position);
            }
            @Override public segment_mean getPointer(long i) {
                return new segment_mean((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_mean() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_segment_mean_bp)
        @Namespace("sd::ops") public static class segment_mean_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public segment_mean_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public segment_mean_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public segment_mean_bp position(long position) {
                return (segment_mean_bp)super.position(position);
            }
            @Override public segment_mean_bp getPointer(long i) {
                return new segment_mean_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public segment_mean_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * unsorted_segment_max op. - make a tensor filled by max values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * return value:
         *    tensor with max values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_unsorted_segment_max)
        @Namespace("sd::ops") public static class unsorted_segment_max extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_max(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_max(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_max position(long position) {
                return (unsorted_segment_max)super.position(position);
            }
            @Override public unsorted_segment_max getPointer(long i) {
                return new unsorted_segment_max((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_max() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_unsorted_segment_max_bp)
        @Namespace("sd::ops") public static class unsorted_segment_max_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_max_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_max_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_max_bp position(long position) {
                return (unsorted_segment_max_bp)super.position(position);
            }
            @Override public unsorted_segment_max_bp getPointer(long i) {
                return new unsorted_segment_max_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_max_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * unsorted_segment_min op. - make a tensor filled by min values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * integer param:
         *    0 - num of segments
         *
         * return value:
         *    tensor with min values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_unsorted_segment_min_bp)
        @Namespace("sd::ops") public static class unsorted_segment_min extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_min(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_min(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_min position(long position) {
                return (unsorted_segment_min)super.position(position);
            }
            @Override public unsorted_segment_min getPointer(long i) {
                return new unsorted_segment_min((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_min() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_unsorted_segment_min_bp)
        @Namespace("sd::ops") public static class unsorted_segment_min_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_min_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_min_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_min_bp position(long position) {
                return (unsorted_segment_min_bp)super.position(position);
            }
            @Override public unsorted_segment_min_bp getPointer(long i) {
                return new unsorted_segment_min_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_min_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * unsorted_segment_sum op. - make a tensor filled by sum of values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * integer param:
         *    0 - num of segments
         *
         * return value:
         *    tensor with sum of values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_unsorted_segment_sum)
        @Namespace("sd::ops") public static class unsorted_segment_sum extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_sum(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_sum(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_sum position(long position) {
                return (unsorted_segment_sum)super.position(position);
            }
            @Override public unsorted_segment_sum getPointer(long i) {
                return new unsorted_segment_sum((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_sum() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_unsorted_segment_sum_bp)
        @Namespace("sd::ops") public static class unsorted_segment_sum_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_sum_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_sum_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_sum_bp position(long position) {
                return (unsorted_segment_sum_bp)super.position(position);
            }
            @Override public unsorted_segment_sum_bp getPointer(long i) {
                return new unsorted_segment_sum_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_sum_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * unsorted_segment_prod op. - make a tensor filled by product of values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * integer param:
         *    0 - num of segments
         *
         * return value:
         *    tensor with product of values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_unsorted_segment_prod)
        @Namespace("sd::ops") public static class unsorted_segment_prod extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_prod(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_prod(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_prod position(long position) {
                return (unsorted_segment_prod)super.position(position);
            }
            @Override public unsorted_segment_prod getPointer(long i) {
                return new unsorted_segment_prod((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_prod() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_unsorted_segment_prod_bp)
        @Namespace("sd::ops") public static class unsorted_segment_prod_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_prod_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_prod_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_prod_bp position(long position) {
                return (unsorted_segment_prod_bp)super.position(position);
            }
            @Override public unsorted_segment_prod_bp getPointer(long i) {
                return new unsorted_segment_prod_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_prod_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * unsorted_segment_mean op. - make a tensor filled by average of values according to index tensor given.
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * integer param:
         *    0 - num of segments
         *
         * return value:
         *    tensor with average of values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_unsorted_segment_mean)
        @Namespace("sd::ops") public static class unsorted_segment_mean extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_mean(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_mean(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_mean position(long position) {
                return (unsorted_segment_mean)super.position(position);
            }
            @Override public unsorted_segment_mean getPointer(long i) {
                return new unsorted_segment_mean((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_mean() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_unsorted_segment_mean_bp)
        @Namespace("sd::ops") public static class unsorted_segment_mean_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_mean_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_mean_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_mean_bp position(long position) {
                return (unsorted_segment_mean_bp)super.position(position);
            }
            @Override public unsorted_segment_mean_bp getPointer(long i) {
                return new unsorted_segment_mean_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_mean_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * unsorted_segment_sqrt_n op. - computes the sum along segments of a tensor divided by the sqrt(N).
         *
         * input params:
         *    0 - the tensor with data;
         *    1 - the tensor with indices.
         *
         * integer param:
         *    0 - num of segments
         *
         * return value:
         *    tensor with average of values according to indices sets.
         */
//         #if NOT_EXCLUDED(OP_unsorted_segment_sqrt)
        @Namespace("sd::ops") public static class unsorted_segment_sqrt_n extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_sqrt_n(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_sqrt_n(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_sqrt_n position(long position) {
                return (unsorted_segment_sqrt_n)super.position(position);
            }
            @Override public unsorted_segment_sqrt_n getPointer(long i) {
                return new unsorted_segment_sqrt_n((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_sqrt_n() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_unsorted_segment_sqrt_n_bp)
        @Namespace("sd::ops") public static class unsorted_segment_sqrt_n_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public unsorted_segment_sqrt_n_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public unsorted_segment_sqrt_n_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public unsorted_segment_sqrt_n_bp position(long position) {
                return (unsorted_segment_sqrt_n_bp)super.position(position);
            }
            @Override public unsorted_segment_sqrt_n_bp getPointer(long i) {
                return new unsorted_segment_sqrt_n_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public unsorted_segment_sqrt_n_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * extract_image_patches op - Extract patches from images and put them in the "depth" output dimension.
         *
         * input params:
         *    0 - images tensor (4D)
         *
         * int params:
         *    0 - ksize_rows
         *    1 - ksize_cols
         *    2 - strides_rows
         *    3 - strides_cols
         *    4 - rates_rows
         *    5 - rates_cols
         *    6 - padding_type - 0 - equiv 'VALID', 1 - 'SAME'
         */
//         #if NOT_EXCLUDED(OP_extract_image_patches)
        @Namespace("sd::ops") public static class extract_image_patches extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public extract_image_patches(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public extract_image_patches(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public extract_image_patches position(long position) {
                return (extract_image_patches)super.position(position);
            }
            @Override public extract_image_patches getPointer(long i) {
                return new extract_image_patches((Pointer)this).position(position + i);
            }
        
                                                                                    public extract_image_patches() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * draw_bounding_boxes op - modified input image with given colors exept given boxes.
         *
         * input params:
         *    0 - images tensor (4D) with shape {batch, width, height, channels}, where channes is 1 (BW image),
         * 3 (RGB) or 4 (RGBA)
         *    1 - boxes tensor (3D) with shape {batch, number_of_boxes, 4} where last dimension encoded as
         * (y_min, x_min, y_max, x_max), all values in between 0. and 1.
         *    2 - colours tensor (2D) with shape {number_of_boxes, channels} -- bordering color set (palette)
         *
         * output:
         *    0 - 4D tensor with same shape as images (input 0)
         */
//         #if NOT_EXCLUDED(OP_draw_bounding_boxes)
        @Namespace("sd::ops") public static class draw_bounding_boxes extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public draw_bounding_boxes(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public draw_bounding_boxes(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public draw_bounding_boxes position(long position) {
                return (draw_bounding_boxes)super.position(position);
            }
            @Override public draw_bounding_boxes getPointer(long i) {
                return new draw_bounding_boxes((Pointer)this).position(position + i);
            }
        
                                                    public draw_bounding_boxes() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * roll - op porting from numpy (https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.roll.html)
         *
         * input params:
         *    0 - NDArray
         *
         * int params:
         *    0 - shift
         *    1 - axe 1
         *    2 - axe 2
         *    ...
         *    N - axe N
         *
         *    All axes are optional and should be between 0 and input->rankOf(). Of course, all axes can be repeated.
         *
         * output:
         *    0 - NDArray with the same shape as input.
         */
//         #if NOT_EXCLUDED(OP_roll)
        @Namespace("sd::ops") public static class roll extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public roll(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public roll(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public roll position(long position) {
                return (roll)super.position(position);
            }
            @Override public roll getPointer(long i) {
                return new roll((Pointer)this).position(position + i);
            }
        
                                                                                    public roll() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * lin_space - op porting from TF (https://www.tensorflow.org/api_docs/python/tf/lin_space)
         *
         * optional input params:
         *    0 - startVal - NDArray scalar (float point)
         *    1 - finishVal - NDArray scalar (float point)
         *    2 - numOfElements - NDArray scalar (integer)
         * Optional:
         * T args
         *    0 - startVal
         *    1 - finishVal]
         *    2 - numOfElements
         * output:
         *    0 - 1D NDArray with the same type as input and length as given with numOfElements param.
         */
//         #if NOT_EXCLUDED(OP_lin_space)
        @Namespace("sd::ops") public static class lin_space extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lin_space(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lin_space(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lin_space position(long position) {
                return (lin_space)super.position(position);
            }
            @Override public lin_space getPointer(long i) {
                return new lin_space((Pointer)this).position(position + i);
            }
        
                                                                                    public lin_space() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * reduction_sum - tf.reduction_sum operation
         *
         * input params:
         *    0 - NDArray
         *
         * T_ARG param (optional):
         * 0 - keep_dims != 0.
         *
         * int params (optional):
         *    0 - axe 1
         *    1 - axe 2
         *    ...
         *    N-1 axe N
         *
         *    All axes are optional and should be between 0 and input->rankOf() - 1
         *
         * output:
         *    0 - NDArray with reduces shape accordingly to axes (the scalar in default case).
         */
//         #if NOT_EXCLUDED(OP_reduce_sum)
        @Namespace("sd::ops") public static class reduce_sum extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_sum(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_sum(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_sum position(long position) {
                return (reduce_sum)super.position(position);
            }
            @Override public reduce_sum getPointer(long i) {
                return new reduce_sum((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_sum() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_reduce_sum_bp)
        @Namespace("sd::ops") public static class reduce_sum_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_sum_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_sum_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_sum_bp position(long position) {
                return (reduce_sum_bp)super.position(position);
            }
            @Override public reduce_sum_bp getPointer(long i) {
                return new reduce_sum_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_sum_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * reduction_prod - tf.reduction_prod operation
         *
         * input params:
         *    0 - NDArray
         *
         * T_ARG param (optional):
         * 0 - keep_dims != 0.
         *
         * int params (optional):
         *    0 - axe 1
         *    1 - axe 2
         *    ...
         *    N-1 axe N
         *
         *    All axes are optional and should be between 0 and input->rankOf() - 1
         *
         * output:
         *    0 - NDArray with reduces shape accordingly to axes (the scalar in default case).
         */
//         #if NOT_EXCLUDED(OP_reduce_prod)
        @Namespace("sd::ops") public static class reduce_prod extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_prod(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_prod(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_prod position(long position) {
                return (reduce_prod)super.position(position);
            }
            @Override public reduce_prod getPointer(long i) {
                return new reduce_prod((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_prod() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_reduce_prod_bp)
        @Namespace("sd::ops") public static class reduce_prod_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_prod_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_prod_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_prod_bp position(long position) {
                return (reduce_prod_bp)super.position(position);
            }
            @Override public reduce_prod_bp getPointer(long i) {
                return new reduce_prod_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_prod_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

       /**
        * This op calculates min of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate mins for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate min along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated mins
        */
//         #if NOT_EXCLUDED(OP_reduce_min)
        @Namespace("sd::ops") public static class reduce_min extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_min(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_min(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_min position(long position) {
                return (reduce_min)super.position(position);
            }
            @Override public reduce_min getPointer(long i) {
                return new reduce_min((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_min() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_reduce_min_bp)
        @Namespace("sd::ops") public static class reduce_min_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_min_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_min_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_min_bp position(long position) {
                return (reduce_min_bp)super.position(position);
            }
            @Override public reduce_min_bp getPointer(long i) {
                return new reduce_min_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_min_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

       /**
        * This op calculates max of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate maxes for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate max along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated maxes
        */
//         #if NOT_EXCLUDED(OP_reduce_max)
        @Namespace("sd::ops") public static class reduce_max extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_max(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_max(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_max position(long position) {
                return (reduce_max)super.position(position);
            }
            @Override public reduce_max getPointer(long i) {
                return new reduce_max((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_max() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_reduce_max_bp)
        @Namespace("sd::ops") public static class reduce_max_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_max_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_max_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_max_bp position(long position) {
                return (reduce_max_bp)super.position(position);
            }
            @Override public reduce_max_bp getPointer(long i) {
                return new reduce_max_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_max_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

       /**
        * This op calculates norm1 of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate norm1 for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate norm1 along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated norm1
        */
//         #if NOT_EXCLUDED(OP_reduce_norm1)
        @Namespace("sd::ops") public static class reduce_norm1 extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_norm1(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_norm1(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_norm1 position(long position) {
                return (reduce_norm1)super.position(position);
            }
            @Override public reduce_norm1 getPointer(long i) {
                return new reduce_norm1((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_norm1() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_reduce_norm1_bp)
        @Namespace("sd::ops") public static class reduce_norm1_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_norm1_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_norm1_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_norm1_bp position(long position) {
                return (reduce_norm1_bp)super.position(position);
            }
            @Override public reduce_norm1_bp getPointer(long i) {
                return new reduce_norm1_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_norm1_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

       /**
        * This op calculates norm2 of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate norm2 for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate norm2 along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated norm2
        */
//         #if NOT_EXCLUDED(OP_reduce_norm2)
        @Namespace("sd::ops") public static class reduce_norm2 extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_norm2(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_norm2(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_norm2 position(long position) {
                return (reduce_norm2)super.position(position);
            }
            @Override public reduce_norm2 getPointer(long i) {
                return new reduce_norm2((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_norm2() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_reduce_norm2_bp)
        @Namespace("sd::ops") public static class reduce_norm2_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_norm2_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_norm2_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_norm2_bp position(long position) {
                return (reduce_norm2_bp)super.position(position);
            }
            @Override public reduce_norm2_bp getPointer(long i) {
                return new reduce_norm2_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_norm2_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


       /**
        * This op calculates squared norm of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate squared norm for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate squared norm along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated norm
        */
//         #if NOT_EXCLUDED(OP_reduce_sqnorm)
        @Namespace("sd::ops") public static class reduce_sqnorm extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_sqnorm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_sqnorm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_sqnorm position(long position) {
                return (reduce_sqnorm)super.position(position);
            }
            @Override public reduce_sqnorm getPointer(long i) {
                return new reduce_sqnorm((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_sqnorm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_reduce_sqnorm_bp)
        @Namespace("sd::ops") public static class reduce_sqnorm_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_sqnorm_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_sqnorm_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_sqnorm_bp position(long position) {
                return (reduce_sqnorm_bp)super.position(position);
            }
            @Override public reduce_sqnorm_bp getPointer(long i) {
                return new reduce_sqnorm_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_sqnorm_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

       /**
        * This op calculates norm max of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate norm max for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate norm max along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated norm
        */
//         #if NOT_EXCLUDED(OP_reduce_norm_max)
        @Namespace("sd::ops") public static class reduce_norm_max extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_norm_max(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_norm_max(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_norm_max position(long position) {
                return (reduce_norm_max)super.position(position);
            }
            @Override public reduce_norm_max getPointer(long i) {
                return new reduce_norm_max((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_norm_max() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_reduce_norm_max_bp)
        @Namespace("sd::ops") public static class reduce_norm_max_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_norm_max_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_norm_max_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_norm_max_bp position(long position) {
                return (reduce_norm_max_bp)super.position(position);
            }
            @Override public reduce_norm_max_bp getPointer(long i) {
                return new reduce_norm_max_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_norm_max_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
        * This op calculates mean of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate mean for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate mean along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated means
        */
//         #if NOT_EXCLUDED(OP_reduce_mean)
        @Namespace("sd::ops") public static class reduce_mean extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_mean(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_mean(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_mean position(long position) {
                return (reduce_mean)super.position(position);
            }
            @Override public reduce_mean getPointer(long i) {
                return new reduce_mean((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_mean() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_reduce_mean_bp)
        @Namespace("sd::ops") public static class reduce_mean_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_mean_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_mean_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_mean_bp position(long position) {
                return (reduce_mean_bp)super.position(position);
            }
            @Override public reduce_mean_bp getPointer(long i) {
                return new reduce_mean_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_mean_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                                                                                //         #endif
        /**
        * This op calculates sample variance of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate mean for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *   biasCorrected -  if non zero, then bias correction will be applied, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate mean along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated means
        */
        @Namespace("sd::ops") public static class reduce_variance extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_variance(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_variance(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_variance position(long position) {
                return (reduce_variance)super.position(position);
            }
            @Override public reduce_variance getPointer(long i) {
                return new reduce_variance((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_variance() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class reduce_variance_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_variance_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_variance_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_variance_bp position(long position) {
                return (reduce_variance_bp)super.position(position);
            }
            @Override public reduce_variance_bp getPointer(long i) {
                return new reduce_variance_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_variance_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                                                                                /**
        * This op calculates sample standard deviation of elements along given dimensions
        *
        * input array:
        *    x: tensor to calculate mean for
        *
        * float arguments:
        *   keepDims: if non zero, then keep reduced dimensions with length = 1, default value is zero
        *   biasCorrected - if non zero, then bias correction will be applied, default value is zero
        *
        * int arguments:
        *    list of integers - dimensions to calculate mean along, default corresponds to empty list in which case calculation is performed for all dimensions and scalar is returned
        *
        * output array:
        *    reduced tensor with calculated means
        */
        @Namespace("sd::ops") public static class reduce_stdev extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_stdev(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_stdev(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_stdev position(long position) {
                return (reduce_stdev)super.position(position);
            }
            @Override public reduce_stdev getPointer(long i) {
                return new reduce_stdev((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_stdev() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class reduce_stdev_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_stdev_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_stdev_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_stdev_bp position(long position) {
                return (reduce_stdev_bp)super.position(position);
            }
            @Override public reduce_stdev_bp getPointer(long i) {
                return new reduce_stdev_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_stdev_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                                                                                /**
        * This op calculates backprop dot for two tensors along given dimensions
        *
        * input array:
        *    x: tensor to calculate dot for
        *    y: tensor to calculate dot for
        *    z: tensor with gradient output of the FF dot for x and y
        *
        * int arguments:
        *   list of integers - dimensions to calculate dot along,
        *   default corresponds to empty list in which case calculation
        *   is performed for all dimensions and scalar is returned.
        *
        * output array:
        *   the tensor with calculated backproped dots
        *
        */

//         #if NOT_EXCLUDED(OP_reduce_dot_bp)
        @Namespace("sd::ops") public static class reduce_dot_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_dot_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_dot_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_dot_bp position(long position) {
                return (reduce_dot_bp)super.position(position);
            }
            @Override public reduce_dot_bp getPointer(long i) {
                return new reduce_dot_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_dot_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
        /**
         * reduce_logsumexp - tf.reduce_logsumexe operation
         *
         * input params:
         *    0 - NDArray (input)
         *    1 - 1D NDArray (axis) (optional) - integer array
         *
         * T_ARG param (optional):
         * 0 - keep_dims != 0.
         *
         * int params (optional):
         *    0 - axe 1
         *    1 - axe 2
         *    ...
         *    N-1 axe N
         *
         *  CAUTION: All axes are optional and should be between 0 and input->rankOf() - 1
         *  and put either with second param or as integers but not both
         *
         * output:
         *    0 - NDArray with reduces shape accordingly to axes (the scalar in default case).
         */
//         #if NOT_EXCLUDED(OP_reduce_logsumexp)
        @Namespace("sd::ops") public static class reduce_logsumexp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reduce_logsumexp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reduce_logsumexp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reduce_logsumexp position(long position) {
                return (reduce_logsumexp)super.position(position);
            }
            @Override public reduce_logsumexp getPointer(long i) {
                return new reduce_logsumexp((Pointer)this).position(position + i);
            }
        
                                                                                    public reduce_logsumexp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

       /**
        * Copy a tensor setting everything outside a central band in each innermost matrix
        *
        * input array:
        *    x: given tensor with shape {..., M, N} - as vector (matrix) of matricies MxN
        *
        * int arguments:
        *   lower band
        *   upper band
        *
        * output array:
        *   matrix with given bands between lower and upper diagonals
        *
        */

//         #if NOT_EXCLUDED(OP_matrix_band_part)
        @Namespace("sd::ops") public static class matrix_band_part extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public matrix_band_part(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public matrix_band_part(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public matrix_band_part position(long position) {
                return (matrix_band_part)super.position(position);
            }
            @Override public matrix_band_part getPointer(long i) {
                return new matrix_band_part((Pointer)this).position(position + i);
            }
        
                                                                                    public matrix_band_part() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


//         #if NOT_EXCLUDED(OP_Assert)
        @Namespace("sd::ops") public static class Assert extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public Assert(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public Assert(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public Assert position(long position) {
                return (Assert)super.position(position);
            }
            @Override public Assert getPointer(long i) {
                return new Assert((Pointer)this).position(position + i);
            }
        
                                                    public Assert() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

        /**
         * image.non_max_suppression ops.
         * input:
         *     0 - boxes - 2D-tensor with shape (num_boxes, 4) by float type
         *     1 - scales - 1D-tensor with shape (num_boxes) by float type
         *     2 - output_size - 0D-tensor by int type (optional)
         * float args:
         *     0 - overlap_threshold - threshold value for overlap checks (optional, by default 0.5)
         *     1 - score_threshold - the threshold for deciding when to remove boxes based on score (optional, by default -inf)
         * int args:
         *     0 - output_size - as arg 2 used for same target. Eigher this or arg 2 should be provided.
         *
         * output:
         *     - vector with size M, where M <= output_size by int type
         *
         * */
//         #if NOT_EXCLUDED(OP_image_non_max_suppression)
        @Namespace("sd::ops") public static class non_max_suppression extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public non_max_suppression(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public non_max_suppression(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public non_max_suppression position(long position) {
                return (non_max_suppression)super.position(position);
            }
            @Override public non_max_suppression getPointer(long i) {
                return new non_max_suppression((Pointer)this).position(position + i);
            }
        
                                                                                    public non_max_suppression() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
//         #if NOT_EXCLUDED(OP_image_non_max_suppression_v3)
                @Namespace("sd::ops") public static class non_max_suppression_v3 extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public non_max_suppression_v3(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public non_max_suppression_v3(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public non_max_suppression_v3 position(long position) {
                        return (non_max_suppression_v3)super.position(position);
                    }
                    @Override public non_max_suppression_v3 getPointer(long i) {
                        return new non_max_suppression_v3((Pointer)this).position(position + i);
                    }
                
                                                                                    public non_max_suppression_v3() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /*
         * image.non_max_suppression_overlaps op.
         * input:
         *     0 - boxes - 2D-tensor with shape (num_boxes, 4) by float type
         *     1 - scales - 1D-tensor with shape (num_boxes) by float type
         *     2 - output_size - 0D-tensor by int type (optional)
         * float args:
         *     0 - overlap_threshold - threshold value for overlap checks (optional, by default 0.5)
         *     1 - score_threshold - the threshold for deciding when to remove boxes based on score (optional, by default -inf)
         * int args:
         *     0 - output_size - as arg 2 used for same target. Eigher this or arg 2 should be provided.
         *
         * output:
         *     0 - 1D integer tensor with shape [M], epresenting the selected indices from the overlaps tensor, where M <= max_output_size
         * */
//         #if NOT_EXCLUDED(OP_image_non_max_suppression_overlaps)
        @Namespace("sd::ops") public static class non_max_suppression_overlaps extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public non_max_suppression_overlaps(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public non_max_suppression_overlaps(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public non_max_suppression_overlaps position(long position) {
                return (non_max_suppression_overlaps)super.position(position);
            }
            @Override public non_max_suppression_overlaps getPointer(long i) {
                return new non_max_suppression_overlaps((Pointer)this).position(position + i);
            }
        
                                                                                    public non_max_suppression_overlaps() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /*
         * cholesky op - decomposite positive square symetric matrix (or matricies when rank > 2).
         * input:
         *     0 - matricies - tensor with shape (..., N, N) by float type
         *
         * output - lower triangular matrix (matricies when rank > 2) with the same shape as input.
         * */
//         #if NOT_EXCLUDED(OP_cholesky)
        @Namespace("sd::ops") public static class cholesky extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cholesky(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cholesky(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cholesky position(long position) {
                return (cholesky)super.position(position);
            }
            @Override public cholesky getPointer(long i) {
                return new cholesky((Pointer)this).position(position + i);
            }
        
                                                    public cholesky() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif
        /*
         * nth_element - apply nth_element for last dimension of input tensor
         * input array:
         *     0 - input array
         *     1 - scalar tensor with n for operation. n should be less than last dimension
         *
         * output:
         *    0 - NDArray with the same shape as input
         */
//         #if NOT_EXCLUDED(OP_nth_element)
        @Namespace("sd::ops") public static class nth_element extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public nth_element(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public nth_element(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public nth_element position(long position) {
                return (nth_element)super.position(position);
            }
            @Override public nth_element getPointer(long i) {
                return new nth_element((Pointer)this).position(position + i);
            }
        
                                                                                    public nth_element() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op checks for Inf/NaN values within input array, and throws exception if there's at least one
         */
//         #if NOT_EXCLUDED(OP_check_numerics)
        @Namespace("sd::ops") public static class check_numerics extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public check_numerics(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public check_numerics(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public check_numerics position(long position) {
                return (check_numerics)super.position(position);
            }
            @Override public check_numerics getPointer(long i) {
                return new check_numerics((Pointer)this).position(position + i);
            }
        
                                                                                    public check_numerics() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
/**
         * fake_quant_with_min_max_vals - tf.quantization.fake_quant_with_min_max_vars
         *
         * input params:
         *    0 - NDArray (input)
         *    1 - 0D Tensor - min value
         *    2 - 0D Tensor - max value
         *
         * int params (optional):
         *    0 - num_bits (allowed interval [2, 16], default 8)
         *    1 - narrow_range (default False)
         *
         * output:
         *    0 - NDArray with the same shape as input
         */
//         #if NOT_EXCLUDED(OP_fake_quant_with_min_max_vars)
        @Namespace("sd::ops") public static class fake_quant_with_min_max_vars extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public fake_quant_with_min_max_vars(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public fake_quant_with_min_max_vars(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public fake_quant_with_min_max_vars position(long position) {
                return (fake_quant_with_min_max_vars)super.position(position);
            }
            @Override public fake_quant_with_min_max_vars getPointer(long i) {
                return new fake_quant_with_min_max_vars((Pointer)this).position(position + i);
            }
        
                                                                                    public fake_quant_with_min_max_vars() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

/**
         * fake_quant_with_min_max_vals_per_channel - tf.quantization.fake_quant_with_min_max_vars_per_channel
         *
         * input params:
         *    0 - NDArray (input) - at least 2D.
         *    1 - 1D Tensor - min values (min length equals to last dim of input)
         *    2 - 1D Tensor - max value (length equals to min)
         *
         * int params (optional):
         *    0 - num_bits (allowed interval [2, 16], default 8)
         *    1 - narrow_range (default False)
         *
         * output:
         *    0 - NDArray with the same shape as input
         */
//         #if NOT_EXCLUDED(OP_fake_quant_with_min_max_vars_per_channel)
                @Namespace("sd::ops") public static class fake_quant_with_min_max_vars_per_channel extends DeclarableOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public fake_quant_with_min_max_vars_per_channel(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public fake_quant_with_min_max_vars_per_channel(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public fake_quant_with_min_max_vars_per_channel position(long position) {
                        return (fake_quant_with_min_max_vars_per_channel)super.position(position);
                    }
                    @Override public fake_quant_with_min_max_vars_per_channel getPointer(long i) {
                        return new fake_quant_with_min_max_vars_per_channel((Pointer)this).position(position + i);
                    }
                
                                                                                    public fake_quant_with_min_max_vars_per_channel() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * compare_and_bitpack - Compare values of input to threshold and pack resulting bits into a uint8
         *
         * input params:
         *    0 - NDArray (input). Note: last dimension should be divisibly by 8
         *    1 - 0D Tensor - threshold to compare against. Note: when input is bool type, the threshold is ignored
         *
         *
         * output:
         *    0 - NDArray with the shape as {input.dim0,...input.dimLast/8} and type uint8
         */
//         #if NOT_EXCLUDED(OP_compare_and_bitpack)
        @Namespace("sd::ops") public static class compare_and_bitpack extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public compare_and_bitpack(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public compare_and_bitpack(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public compare_and_bitpack position(long position) {
                return (compare_and_bitpack)super.position(position);
            }
            @Override public compare_and_bitpack getPointer(long i) {
                return new compare_and_bitpack((Pointer)this).position(position + i);
            }
        
                                                                                    public compare_and_bitpack() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// #endif


// Parsed from ops/declarable/headers/shape.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_SHAPE_H
// #define LIBND4J_HEADERS_SHAPE_H

// #include 
//         #if NOT_EXCLUDED(OP_permute)
        @Namespace("sd::ops") public static class permute extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public permute(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public permute(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public permute position(long position) {
                return (permute)super.position(position);
            }
            @Override public permute getPointer(long i) {
                return new permute((Pointer)this).position(position + i);
            }
        
                                                                                    public permute() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_reshapeas)
        @Namespace("sd::ops") public static class reshapeas extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reshapeas(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reshapeas(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reshapeas position(long position) {
                return (reshapeas)super.position(position);
            }
            @Override public reshapeas getPointer(long i) {
                return new reshapeas((Pointer)this).position(position + i);
            }
        
                                                                                    public reshapeas() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_transpose)
        @Namespace("sd::ops") public static class transpose extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public transpose(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public transpose(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public transpose position(long position) {
                return (transpose)super.position(position);
            }
            @Override public transpose getPointer(long i) {
                return new transpose((Pointer)this).position(position + i);
            }
        
                                                                                    public transpose() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_shape_of)
        @Namespace("sd::ops") public static class shape_of extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public shape_of(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public shape_of(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public shape_of position(long position) {
                return (shape_of)super.position(position);
            }
            @Override public shape_of getPointer(long i) {
                return new shape_of((Pointer)this).position(position + i);
            }
        
                                                                                    public shape_of() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_shapes_of)
        @Namespace("sd::ops") public static class shapes_of extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public shapes_of(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public shapes_of(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public shapes_of position(long position) {
                return (shapes_of)super.position(position);
            }
            @Override public shapes_of getPointer(long i) {
                return new shapes_of((Pointer)this).position(position + i);
            }
        
                                                                                    public shapes_of() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_squeeze)
        @Namespace("sd::ops") public static class squeeze extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public squeeze(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public squeeze(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public squeeze position(long position) {
                return (squeeze)super.position(position);
            }
            @Override public squeeze getPointer(long i) {
                return new squeeze((Pointer)this).position(position + i);
            }
        
                                                                                    public squeeze() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_expand_dims)
        @Namespace("sd::ops") public static class expand_dims extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public expand_dims(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public expand_dims(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public expand_dims position(long position) {
                return (expand_dims)super.position(position);
            }
            @Override public expand_dims getPointer(long i) {
                return new expand_dims((Pointer)this).position(position + i);
            }
        
                                                                                    public expand_dims() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_flatten_2d)
            @Namespace("sd::ops") public static class flatten_2d extends DeclarableCustomOp {
                static { Loader.load(); }
                /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                public flatten_2d(Pointer p) { super(p); }
                /** Native array allocator. Access with {@link Pointer#position(long)}. */
                public flatten_2d(long size) { super((Pointer)null); allocateArray(size); }
                private native void allocateArray(long size);
                @Override public flatten_2d position(long position) {
                    return (flatten_2d)super.position(position);
                }
                @Override public flatten_2d getPointer(long i) {
                    return new flatten_2d((Pointer)this).position(position + i);
                }
            
                                                                                    public flatten_2d() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_reshape)
        @Namespace("sd::ops") public static class reshape extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public reshape(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public reshape(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public reshape position(long position) {
                return (reshape)super.position(position);
            }
            @Override public reshape getPointer(long i) {
                return new reshape((Pointer)this).position(position + i);
            }
        
                                                                                    public reshape() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_size_at)
        @Namespace("sd::ops") public static class size_at extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public size_at(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public size_at(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public size_at position(long position) {
                return (size_at)super.position(position);
            }
            @Override public size_at getPointer(long i) {
                return new size_at((Pointer)this).position(position + i);
            }
        
                                                                                    public size_at() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op changes order of given array to specified order.
         * In other words: C/F order switch
         *
         * Int args:
         * 0 - isForder. set to 1 for F order output, or 0 for C order output
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_order)
        @Namespace("sd::ops") public static class order extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public order(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public order(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public order position(long position) {
                return (order)super.position(position);
            }
            @Override public order getPointer(long i) {
                return new order((Pointer)this).position(position + i);
            }
        
                                                                                    public order() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op boosts specified input up to specified shape
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_tile_to_shape)
        @Namespace("sd::ops") public static class tile_to_shape extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tile_to_shape(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tile_to_shape(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tile_to_shape position(long position) {
                return (tile_to_shape)super.position(position);
            }
            @Override public tile_to_shape getPointer(long i) {
                return new tile_to_shape((Pointer)this).position(position + i);
            }
        
                                                                                    public tile_to_shape() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class tile_to_shape_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tile_to_shape_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tile_to_shape_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tile_to_shape_bp position(long position) {
                return (tile_to_shape_bp)super.position(position);
            }
            @Override public tile_to_shape_bp getPointer(long i) {
                return new tile_to_shape_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public tile_to_shape_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op broadcast given input up to given shape
         *  
         * inputs:
         *  input array - array to be broadcasted to given shape
         *  shape array - array containing shape be broadcasted to
         */
//         #if NOT_EXCLUDED(OP_broadcast_to)
        @Namespace("sd::ops") public static class broadcast_to extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public broadcast_to(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public broadcast_to(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public broadcast_to position(long position) {
                return (broadcast_to)super.position(position);
            }
            @Override public broadcast_to getPointer(long i) {
                return new broadcast_to((Pointer)this).position(position + i);
            }
        
                                                                                    public broadcast_to() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


//         #if NOT_EXCLUDED(OP_evaluate_reduction_shape)
        @Namespace("sd::ops") public static class evaluate_reduction_shape extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public evaluate_reduction_shape(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public evaluate_reduction_shape(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public evaluate_reduction_shape position(long position) {
                return (evaluate_reduction_shape)super.position(position);
            }
            @Override public evaluate_reduction_shape getPointer(long i) {
                return new evaluate_reduction_shape((Pointer)this).position(position + i);
            }
        
                                                                                    public evaluate_reduction_shape() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation creates new array
         * Input:
         *    array with shape values
         *
         * IArgs:
         *    order value
         *    data type value
         *
         * BArgs:
         *    initialization option
         */
//         #if NOT_EXCLUDED(OP_create)
        @Namespace("sd::ops") public static class create extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public create(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public create(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public create position(long position) {
                return (create)super.position(position);
            }
            @Override public create getPointer(long i) {
                return new create((Pointer)this).position(position + i);
            }
        
                                                                                    public create() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// #endif

// Parsed from ops/declarable/headers/random.h

/*
 *  ******************************************************************************
 *  *
 *  *
 *  * This program and the accompanying materials are made available under the
 *  * terms of the Apache License, Version 2.0 which is available at
 *  * https://www.apache.org/licenses/LICENSE-2.0.
 *  *
 *  * See the NOTICE file distributed with this work for additional
 *  * information regarding copyright ownership.
 *  * Unless required by applicable law or agreed to in writing, software
 *  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 *  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 *  * License for the specific language governing permissions and limitations
 *  * under the License.
 *  *
 *  * SPDX-License-Identifier: Apache-2.0
 *  *****************************************************************************
 */

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_RANDOM_H
// #define LIBND4J_HEADERS_RANDOM_H

// #include 
//         #if NOT_EXCLUDED(OP_set_seed)
        @Namespace("sd::ops") public static class set_seed extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public set_seed(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public set_seed(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public set_seed position(long position) {
                return (set_seed)super.position(position);
            }
            @Override public set_seed getPointer(long i) {
                return new set_seed((Pointer)this).position(position + i);
            }
        
                                                                                    public set_seed() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_get_seed)
        @Namespace("sd::ops") public static class get_seed extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public get_seed(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public get_seed(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public get_seed position(long position) {
                return (get_seed)super.position(position);
            }
            @Override public get_seed getPointer(long i) {
                return new get_seed((Pointer)this).position(position + i);
            }
        
                                                                                    public get_seed() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /*
         * random_uniform distribution for types int32,int64, float16, float and double
         * by default dtype is float32
         *
         * input:
         *    0 - shape of output (1D int tensor)
         *    1 - min val (0D of output type) - optional (0 as default)
         *    2 - max val (0D of output type) - optional (inf as default)
         *
         * output:
         *    0 - uniformly distributed values of given type (between min and max)
         */
//         #if NOT_EXCLUDED(OP_randomuniform)
        @Namespace("sd::ops") public static class randomuniform extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public randomuniform(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public randomuniform(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public randomuniform position(long position) {
                return (randomuniform)super.position(position);
            }
            @Override public randomuniform getPointer(long i) {
                return new randomuniform((Pointer)this).position(position + i);
            }
        
                                                                                    public randomuniform() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
        /*
         * multinomial (categorical) random generator draws samples from a multinomial distribution
         *
         * Input array:
         *    0 - 2D ndarray with unnormalized log-probabilities with shape [batch_size (N), num_classes (K)]
         *    1 - array with one int value of samples number, number of independent samples to draw for each experiment 1,N.
         * Int arguments:
         *    0 - optional argument, corresponds to dimension with batch_size
         *    1 - optional argument, integer type to use for the output. Default int64.
         *
         * Output array:
         *    0 - 2D ndarray with the drawn samples of shape [batch_size, num_samples]
         */
//         #if NOT_EXCLUDED(OP_random_multinomial)
        @Namespace("sd::ops") public static class random_multinomial extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public random_multinomial(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public random_multinomial(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public random_multinomial position(long position) {
                return (random_multinomial)super.position(position);
            }
            @Override public random_multinomial getPointer(long i) {
                return new random_multinomial((Pointer)this).position(position + i);
            }
        
                                                                                    public random_multinomial() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_random_normal)
        @Namespace("sd::ops") public static class random_normal extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public random_normal(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public random_normal(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public random_normal position(long position) {
                return (random_normal)super.position(position);
            }
            @Override public random_normal getPointer(long i) {
                return new random_normal((Pointer)this).position(position + i);
            }
        
                                                                                    public random_normal() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_random_bernoulli)
        @Namespace("sd::ops") public static class random_bernoulli extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public random_bernoulli(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public random_bernoulli(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public random_bernoulli position(long position) {
                return (random_bernoulli)super.position(position);
            }
            @Override public random_bernoulli getPointer(long i) {
                return new random_bernoulli((Pointer)this).position(position + i);
            }
        
                                                                                    public random_bernoulli() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_random_exponential)
        @Namespace("sd::ops") public static class random_exponential extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public random_exponential(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public random_exponential(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public random_exponential position(long position) {
                return (random_exponential)super.position(position);
            }
            @Override public random_exponential getPointer(long i) {
                return new random_exponential((Pointer)this).position(position + i);
            }
        
                                                                                    public random_exponential() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_random_crop)
        @Namespace("sd::ops") public static class random_crop extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public random_crop(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public random_crop(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public random_crop position(long position) {
                return (random_crop)super.position(position);
            }
            @Override public random_crop getPointer(long i) {
                return new random_crop((Pointer)this).position(position + i);
            }
        
                                                                                    public random_crop() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * random_gamma op.
         */
//         #if NOT_EXCLUDED(OP_random_gamma)
        @Namespace("sd::ops") public static class random_gamma extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public random_gamma(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public random_gamma(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public random_gamma position(long position) {
                return (random_gamma)super.position(position);
            }
            @Override public random_gamma getPointer(long i) {
                return new random_gamma((Pointer)this).position(position + i);
            }
        
                                                                                    public random_gamma() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * random_poisson op.
         */
//         #if NOT_EXCLUDED(OP_random_poisson)
        @Namespace("sd::ops") public static class random_poisson extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public random_poisson(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public random_poisson(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public random_poisson position(long position) {
                return (random_poisson)super.position(position);
            }
            @Override public random_poisson getPointer(long i) {
                return new random_poisson((Pointer)this).position(position + i);
            }
        
                                                                                    public random_poisson() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    


// #endif

// Parsed from ops/declarable/headers/nn.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_NN_H
// #define LIBND4J_HEADERS_NN_H

// #include 

//         #if NOT_EXCLUDED(OP_softmax)
        @Namespace("sd::ops") public static class softmax extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softmax(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softmax(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softmax position(long position) {
                return (softmax)super.position(position);
            }
            @Override public softmax getPointer(long i) {
                return new softmax((Pointer)this).position(position + i);
            }
        
                                                                                    public softmax() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class softmax_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softmax_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softmax_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softmax_bp position(long position) {
                return (softmax_bp)super.position(position);
            }
            @Override public softmax_bp getPointer(long i) {
                return new softmax_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public softmax_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Local response normalization implementation as TF.
         * input: 4D array
         *
         * T args:
         *
         * 0: bias
         * 1: alpha
         * 2: beta
         *
         * Int arg: depth - optional local radius
         *
         * output - 4D array
         */
//         #if NOT_EXCLUDED(OP_lrn)
        @Namespace("sd::ops") public static class lrn extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lrn(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lrn(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lrn position(long position) {
                return (lrn)super.position(position);
            }
            @Override public lrn getPointer(long i) {
                return new lrn((Pointer)this).position(position + i);
            }
        
                                                                                    public lrn() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * Local response normalization - backprop variant.
         * input:
         *  0 - 4D array of data
         *  1 - epsilon - 4D array of approximation
         *
         * T args:
         *
         * 0: bias
         * 1: alpha
         * 2: beta
         *
         * Int arg: depth - optional local radius
         *
         * output - next approximation as 4D array
         */
//         #if NOT_EXCLUDED(OP_lrn)
        @Namespace("sd::ops") public static class lrn_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public lrn_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public lrn_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public lrn_bp position(long position) {
                return (lrn_bp)super.position(position);
            }
            @Override public lrn_bp getPointer(long i) {
                return new lrn_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public lrn_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
        * Batch normalization implementation.
        * Reference: https://arxiv.org/abs/1502.03167v3
        *
        * Expected arguments:
        * input: input array (any number of dimensions)
        * mean:
        * variance:
        * gamma:
        * beta:
        *
        * Int args:
        * 0: apply scale
        * 1: apply offset
        *
        *
        * T args:
        * 0: epsilon
        */
//         #if NOT_EXCLUDED(OP_batchnorm)
        @Namespace("sd::ops") public static class batchnorm extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public batchnorm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public batchnorm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public batchnorm position(long position) {
                return (batchnorm)super.position(position);
            }
            @Override public batchnorm getPointer(long i) {
                return new batchnorm((Pointer)this).position(position + i);
            }
        
                                                                                    public batchnorm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
        * back prop in batch normalization
        *
        * Expected arguments:
        * input: input array (any number of dimensions)
        * mean:
        * variance:
        * gamma: optional
        * beta: optional
        * dLdOut: next epsilon
        *
        * Int args:
        * 0: apply scale
        * 1: apply offset
        *
        * T args:
        * 0: epsilon
        *
        * output arrays:
        * dL/dInput
        * dL/dMean
        * dL/dVariance
        * dL/dGamma, optional
        * dL/dBeta, optional
        */
//         #if NOT_EXCLUDED(OP_batchnorm)
        @Namespace("sd::ops") public static class batchnorm_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public batchnorm_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public batchnorm_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public batchnorm_bp position(long position) {
                return (batchnorm_bp)super.position(position);
            }
            @Override public batchnorm_bp getPointer(long i) {
                return new batchnorm_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public batchnorm_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * This operation updates parameters with provided gradients, wrt learning rate
         * Expected arguments:
         * x: parameters, any shape
         * y: gradients. same shape as x
         * lr: optional, learning rate
         *
         * T args:
         * 0: optional, learning rate
         */
//         #if NOT_EXCLUDED(OP_apply_sgd)
        @Namespace("sd::ops") public static class apply_sgd extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public apply_sgd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public apply_sgd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public apply_sgd position(long position) {
                return (apply_sgd)super.position(position);
            }
            @Override public apply_sgd getPointer(long i) {
                return new apply_sgd((Pointer)this).position(position + i);
            }
        
                                                                                    public apply_sgd() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation performs batch normalization of layer, it is based on following article https://arxiv.org/abs/1502.03167.
         * Expected arguments:
         * x: input 4D array of shape [bS,iH,iW,iD] (data format = NHWC) or [bS,iD,iH,iW] (data format = NCHW), where
         *    bS - batch size
         *    iH - input height
         *    iW - input width
         *    iD - input depth (or number of channels)
         * scale:  1D input array of scale factors, shape [iD]
         * offset: 1D input array of offsets (shifts), shape [iD]
         * mean: 1D input array of population mean used for inference, shape [iD], this array is required only if isTraining = false
         * variance: 1D input array of population mean used for inference, shape [iD], this array is required only if isTraining = false
         *
         * T input arguments:
         * 0: epsilon, it is optional argument, default value is 0.001, this is small number to be added to the variance of x
         *
         * integer input arguments:
         * 0: dataFormat, may have two values: zero -> NHWC, unity -> NCHW
         * 1: isTraining, may have two values: zero -> inference, unity -> training
         */
//         #if NOT_EXCLUDED(OP_fused_batch_norm)
        @Namespace("sd::ops") public static class fused_batch_norm extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public fused_batch_norm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public fused_batch_norm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public fused_batch_norm position(long position) {
                return (fused_batch_norm)super.position(position);
            }
            @Override public fused_batch_norm getPointer(long i) {
                return new fused_batch_norm((Pointer)this).position(position + i);
            }
        
                                                                                    public fused_batch_norm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_log_softmax)
        @Namespace("sd::ops") public static class log_softmax extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public log_softmax(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public log_softmax(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public log_softmax position(long position) {
                return (log_softmax)super.position(position);
            }
            @Override public log_softmax getPointer(long i) {
                return new log_softmax((Pointer)this).position(position + i);
            }
        
                                                                                    public log_softmax() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class log_softmax_bp extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public log_softmax_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public log_softmax_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public log_softmax_bp position(long position) {
                return (log_softmax_bp)super.position(position);
            }
            @Override public log_softmax_bp getPointer(long i) {
                return new log_softmax_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public log_softmax_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * relu_layer = relu(x*w + b)
         */
        @Namespace("sd::ops") public static class relu_layer extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public relu_layer(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public relu_layer(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public relu_layer position(long position) {
                return (relu_layer)super.position(position);
            }
            @Override public relu_layer getPointer(long i) {
                return new relu_layer((Pointer)this).position(position + i);
            }
        
                                                                                    public relu_layer() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }

        /**
         * applies layer normalization to input
         * y = g * standardize(x) + b
         *
         * see sd::ops::standardize
         *
         */
//         #if NOT_EXCLUDED(OP_layer_norm)
                @Namespace("sd::ops") public static class layer_norm extends DeclarableOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public layer_norm(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public layer_norm(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public layer_norm position(long position) {
                        return (layer_norm)super.position(position);
                    }
                    @Override public layer_norm getPointer(long i) {
                        return new layer_norm((Pointer)this).position(position + i);
                    }
                
                                                                                    public layer_norm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                @Namespace("sd::ops") public static class layer_norm_bp extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public layer_norm_bp(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public layer_norm_bp(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public layer_norm_bp position(long position) {
                        return (layer_norm_bp)super.position(position);
                    }
                    @Override public layer_norm_bp getPointer(long i) {
                        return new layer_norm_bp((Pointer)this).position(position + i);
                    }
                
                                                                                    public layer_norm_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation performs dot product attention on the given timeseries input with the given queries
         * out = sum(similarity(k_i, q) * v_i)
         *
         * similarity(k, q) = softmax(k * q) where x * q is the dot product of x and q
         *
         * Optionally with normalization step:
         * similarity(k, q) = softmax(k * q / sqrt(size(q))
         *
         * See also "Attention is all you need" (https://arxiv.org/abs/1706.03762, p. 4, eq. 1)
         *
         * Note: This supports multiple queries at once, if only one query is available the queries vector still has to
         * be 3D but can have queryCount = 1
         *
         * Note: keys and values usually is the same array. If you want to use it as the same array, simply pass it for
         * both.
         *
         * Expected arguments:
         * q: input 3D array "queries" of shape [batchSize, featureKeys, queryCount] or 4D array of shape [batchSize, numHeads, featureKeys, queryCount]
         * k: input 3D array "keys" of shape [batchSize, featureKeys, timesteps] or 4D array of shape [batchSize, numHeads, featureKeys, timesteps]
         * v: input 3D array "values" of shape [batchSize, featureValues, timesteps] or 4D array of shape [batchSize, numHeads, featureValues, timesteps]
         * mask: OPTIONAL; array that defines which values should be skipped of shape [batchSize, timesteps]
         *
         * integer input arguments:
         * 0: normalization, may have two values: zero -> do not apply normalization, one -> apply normalization
         * 1: withWeights, may have two values: zero -> do not return weights, one -> return weights
         *
         * Output Arrays:
         * 0: Attention result arrays of shape [batchSize, featureValues, queryCount] or [batchSize, numHeads, featureValues, queryCount]
         * 1: OPTIONAL; Attention weights of shape [batchSize, timesteps, queryCount] or [batchSize, numHeads, timesteps, queryCount]
         */
//         #if NOT_EXCLUDED(OP_dot_product_attention)
                @Namespace("sd::ops") public static class dot_product_attention extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public dot_product_attention(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public dot_product_attention(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public dot_product_attention position(long position) {
                        return (dot_product_attention)super.position(position);
                    }
                    @Override public dot_product_attention getPointer(long i) {
                        return new dot_product_attention((Pointer)this).position(position + i);
                    }
                
                                                                                    public dot_product_attention() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                @Namespace("sd::ops") public static class dot_product_attention_bp extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public dot_product_attention_bp(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public dot_product_attention_bp(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public dot_product_attention_bp position(long position) {
                        return (dot_product_attention_bp)super.position(position);
                    }
                    @Override public dot_product_attention_bp getPointer(long i) {
                        return new dot_product_attention_bp((Pointer)this).position(position + i);
                    }
                
                                                                                    public dot_product_attention_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * This performs multi-headed dot product attention on the given timeseries input
         * out = concat(head_1, head_2, ..., head_n) * Wo
         * head_i = dot_product_attention(Wq_i*q, Wk_i*k, Wv_i*v)
         *
         * Optionally with normalization when calculating the attention for each head.
         *
         * See also "Attention is all you need" (https://arxiv.org/abs/1706.03762, pp. 4,5, "3.2.2 Multi-Head Attention")
         *
         * This makes use of dot_product_attention OP support for rank 4 inputs.
         *
         * Expected arguments:
         * q: input 3D array "queries" of shape [batchSize, featureKeys, queryCount]
         * k: input 3D array "keys" of shape [batchSize, featureKeys, timesteps]
         * v: input 3D array "values" of shape [batchSize, featureValues, timesteps]
         * Wq: input query projection weights of shape [numHeads, projectedKeys, featureKeys]
         * Wk: input key projection weights of shape [numHeads, projectedKeys, featureKeys]
         * Wv: input value projection weights of shape [numHeads, projectedValues, featureValues]
         * Wo: output projection weights of shape [numHeads * projectedValues, outSize]
         * mask: OPTIONAL; array that defines which values should be skipped of shape [batchSize, timesteps]
         *
         * integer input arguments:
         * 0: normalization, may have two values: zero -> do not apply normalization, one -> apply normalization
         * 1: withWeights, may have two values: zero -> do not return weights, one -> return weights
         *
         * Output Arrays:
         * 0: Attention result arrays of shape [batchSize, outSize, queryCount]
         * 1: OPTIONAL; Attention weights of shape [batchSize, numHeads, timesteps, queryCount]
         */
//         #if NOT_EXCLUDED(OP_multi_head_dot_product_attention)
                @Namespace("sd::ops") public static class multi_head_dot_product_attention extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public multi_head_dot_product_attention(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public multi_head_dot_product_attention(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public multi_head_dot_product_attention position(long position) {
                        return (multi_head_dot_product_attention)super.position(position);
                    }
                    @Override public multi_head_dot_product_attention getPointer(long i) {
                        return new multi_head_dot_product_attention((Pointer)this).position(position + i);
                    }
                
                                                                                    public multi_head_dot_product_attention() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
                @Namespace("sd::ops") public static class multi_head_dot_product_attention_bp extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public multi_head_dot_product_attention_bp(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public multi_head_dot_product_attention_bp(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public multi_head_dot_product_attention_bp position(long position) {
                        return (multi_head_dot_product_attention_bp)super.position(position);
                    }
                    @Override public multi_head_dot_product_attention_bp getPointer(long i) {
                        return new multi_head_dot_product_attention_bp((Pointer)this).position(position + i);
                    }
                
                                                                                    public multi_head_dot_product_attention_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// #endif

// Parsed from ops/declarable/headers/blas.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//
// #ifndef LIBND4J_HEADERS_BLAS_H
// #define LIBND4J_HEADERS_BLAS_H

// #include 

        /**
         * This op is general matmum implementation. Depending on inputs dimensionality output result might be different.
         * matrix x matrix = BLAS gemm
         * vector x matrix = BLAS gemm
         * vector x vector = BLAS dot
         * vector x scalar = element-wise mul
         * scalar x vector = element-wise mul
         *
         * Optional T arguments:
         * 0: alpha (where applicable)
         * 1: beta (where applicable)
         *
         * Optional Integer arguments:
         * 0: transA (where applicable)
         * 1: transB (where applicable)
         */
//         #if NOT_EXCLUDED(OP_matmul)
        @Namespace("sd::ops") public static class matmul extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public matmul(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public matmul(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public matmul position(long position) {
                return (matmul)super.position(position);
            }
            @Override public matmul getPointer(long i) {
                return new matmul((Pointer)this).position(position + i);
            }
        
                                                                                    public matmul() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class matmul_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public matmul_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public matmul_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public matmul_bp position(long position) {
                return (matmul_bp)super.position(position);
            }
            @Override public matmul_bp getPointer(long i) {
                return new matmul_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public matmul_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * tensorMmul/tensorDot operation
         * takes 2 ndarrays, and 2 sets of axes
         *
         * Integer argumens map:
         * IArgs[0] - number of axes along for first array
         * IArgs[1]... axes values for first array
         * IArgs[] - number of axes along for second array
         * IArgs[1]... axes values for second array
         */
//         #if NOT_EXCLUDED(OP_tensormmul)
        @Namespace("sd::ops") public static class tensormmul extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tensormmul(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tensormmul(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tensormmul position(long position) {
                return (tensormmul)super.position(position);
            }
            @Override public tensormmul getPointer(long i) {
                return new tensormmul((Pointer)this).position(position + i);
            }
        
                                                                                    public tensormmul() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class tensormmul_bp extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public tensormmul_bp(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public tensormmul_bp(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public tensormmul_bp position(long position) {
                return (tensormmul_bp)super.position(position);
            }
            @Override public tensormmul_bp getPointer(long i) {
                return new tensormmul_bp((Pointer)this).position(position + i);
            }
        
                                                                                    public tensormmul_bp() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This op is simple implementation of BLAS AXPY method.
         * Math is: y += a * x;
         */
//         #if NOT_EXCLUDED(OP_axpy)
        @Namespace("sd::ops") public static class axpy extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public axpy(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public axpy(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public axpy position(long position) {
                return (axpy)super.position(position);
            }
            @Override public axpy getPointer(long i) {
                return new axpy((Pointer)this).position(position + i);
            }
        
                                                                                    public axpy() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation implements batched matrix multiplication
         * Expected arguments:
         * alpha: vector of T
         * beta: vector of T
         * ...: A, B matrices sequentially. i.e: AAAAABBBBB
         *
         * Integer arguments:
         * transA, transB, M, N, K, ldA, ldB, ldC - usual BLAS gemm arguments
         * batchCount - number of operations in this batch
         *
         * PLEASE NOTE: M, N, K, ldA, ldB, ldC should be equal for all matrices within batch.
         */
//         #if NOT_EXCLUDED(OP_batched_gemm)
        @Namespace("sd::ops") public static class batched_gemm extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public batched_gemm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public batched_gemm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public batched_gemm position(long position) {
                return (batched_gemm)super.position(position);
            }
            @Override public batched_gemm getPointer(long i) {
                return new batched_gemm((Pointer)this).position(position + i);
            }
        
                                                                                    public batched_gemm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * performs singular value decomposition (SVD) of one or more matrices, evaluates the SVD of each inner-most 2D matrix in input array:
         * x[..., :, :] = u[..., :, :] * s[...,:] * transpose(v[..., :, :])
         *
         * Input array:
         * x[..., Rows, Cols], the necessary condition is: rank of x >= 2
         *
         * Outputs arrays:
         * s[..., diagSize] - array with singular values which are stored in decreasing order, diagSize is smaller among Rows and Cols
         * u[..., Rows, Rows] if IArgs[1] is true, else u[..., Rows, diagSize] - array with right singular vectors
         * v[..., Cols, Cols] if IArgs[1] is true, else v[..., Cols, diagSize] - array with left singular vectors
         *
         * Integer arguments:
         * IArgs[0] - bool, whether to calculate u and v, s is calculated in any case
         * IArgs[1] - bool, whether to calculate full-sized u and v
         * IArgs[2] - the number of cols or rows which determines what algorithm to use. More precisely:
         *            if diagSize < IArgs[2] then Jacobi algorithm is used, in opposite case the Divide-And-Conquer is applied
         *            Recommended value is 16.
         */
//         #if NOT_EXCLUDED(OP_svd)
        @Namespace("sd::ops") public static class svd extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public svd(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public svd(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public svd position(long position) {
                return (svd)super.position(position);
            }
            @Override public svd getPointer(long i) {
                return new svd((Pointer)this).position(position + i);
            }
        
                                                                                    public svd() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * calculates square root of matrix such that
         * x[..., M, M] = z[..., M, M] x z[..., M, M]
         *
         * Input array:
         * x[..., M, M],  the necessary condition is: rank of x >= 2 and equality of last two dimensions
         *
         * Outputs arrays:
         * z - same shape as x
         */
//         #if NOT_EXCLUDED(OP_sqrtm)
        @Namespace("sd::ops") public static class sqrtm extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sqrtm(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sqrtm(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sqrtm position(long position) {
                return (sqrtm)super.position(position);
            }
            @Override public sqrtm getPointer(long i) {
                return new sqrtm((Pointer)this).position(position + i);
            }
        
                                                                                    public sqrtm() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// #endif

// Parsed from ops/declarable/headers/tests.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//
// #include 
//         #if NOT_EXCLUDED(OP_test_output_reshape)
        @Namespace("sd::ops") public static class test_output_reshape extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public test_output_reshape(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public test_output_reshape(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public test_output_reshape position(long position) {
                return (test_output_reshape)super.position(position);
            }
            @Override public test_output_reshape getPointer(long i) {
                return new test_output_reshape((Pointer)this).position(position + i);
            }
        
                                                    public test_output_reshape() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_test_scalar)
        @Namespace("sd::ops") public static class test_scalar extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public test_scalar(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public test_scalar(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public test_scalar position(long position) {
                return (test_scalar)super.position(position);
            }
            @Override public test_scalar getPointer(long i) {
                return new test_scalar((Pointer)this).position(position + i);
            }
        
                                                                                    public test_scalar() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_testreduction)
        @Namespace("sd::ops") public static class testreduction extends DeclarableReductionOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public testreduction(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public testreduction(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public testreduction position(long position) {
                return (testreduction)super.position(position);
            }
            @Override public testreduction getPointer(long i) {
                return new testreduction((Pointer)this).position(position + i);
            }
        
                                                                                    public testreduction() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_noop)
        @Namespace("sd::ops") public static class noop extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public noop(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public noop(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public noop position(long position) {
                return (noop)super.position(position);
            }
            @Override public noop getPointer(long i) {
                return new noop((Pointer)this).position(position + i);
            }
        
                                                    public noop() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_testop2i2o)
        @Namespace("sd::ops") public static class testop2i2o extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public testop2i2o(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public testop2i2o(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public testop2i2o position(long position) {
                return (testop2i2o)super.position(position);
            }
            @Override public testop2i2o getPointer(long i) {
                return new testop2i2o((Pointer)this).position(position + i);
            }
        
                                                    public testop2i2o() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif

//         #if NOT_EXCLUDED(OP_testcustom)
        @Namespace("sd::ops") public static class testcustom extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public testcustom(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public testcustom(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public testcustom position(long position) {
                return (testcustom)super.position(position);
            }
            @Override public testcustom getPointer(long i) {
                return new testcustom((Pointer)this).position(position + i);
            }
        
                                                                                    public testcustom() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// Parsed from ops/declarable/headers/bitwise.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_BITWISE_H
// #define LIBND4J_HEADERS_BITWISE_H

// #include 
        /**
         * This operation toggles individual bits of each element in array
         * 
         * PLEASE NOTE: This operation is possible only on integer data types
         * 
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_toggle_bits)
        @Namespace("sd::ops") public static class toggle_bits extends DeclarableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public toggle_bits(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public toggle_bits(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public toggle_bits position(long position) {
                return (toggle_bits)super.position(position);
            }
            @Override public toggle_bits getPointer(long i) {
                return new toggle_bits((Pointer)this).position(position + i);
            }
        
                                                    public toggle_bits() { super((Pointer)null); allocate(); }
                                                    private native void allocate();
                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                }
//         #endif


        /**
         * This operation shift individual bits of each element in array to the left: <<
         *
         * PLEASE NOTE: This operation is applicable only to integer data types
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_shift_bits)
        @Namespace("sd::ops") public static class shift_bits extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public shift_bits(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public shift_bits(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public shift_bits position(long position) {
                return (shift_bits)super.position(position);
            }
            @Override public shift_bits getPointer(long i) {
                return new shift_bits((Pointer)this).position(position + i);
            }
        
                                                                                    public shift_bits() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This operation shift individual bits of each element in array to the right: >>
         *
         * PLEASE NOTE: This operation is applicable only to integer data types
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_rshift_bits)
        @Namespace("sd::ops") public static class rshift_bits extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public rshift_bits(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public rshift_bits(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public rshift_bits position(long position) {
                return (rshift_bits)super.position(position);
            }
            @Override public rshift_bits getPointer(long i) {
                return new rshift_bits((Pointer)this).position(position + i);
            }
        
                                                                                    public rshift_bits() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This operation shift individual bits of each element in array, shifting to the left
         *
         * PLEASE NOTE: This operation is applicable only to integer data types
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_cyclic_shift_bits)
        @Namespace("sd::ops") public static class cyclic_shift_bits extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cyclic_shift_bits(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cyclic_shift_bits(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cyclic_shift_bits position(long position) {
                return (cyclic_shift_bits)super.position(position);
            }
            @Override public cyclic_shift_bits getPointer(long i) {
                return new cyclic_shift_bits((Pointer)this).position(position + i);
            }
        
                                                                                    public cyclic_shift_bits() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This operation shift individual bits of each element in array, shifting to the right
         *
         * PLEASE NOTE: This operation is applicable only to integer data types
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_cyclic_rshift_bits)
        @Namespace("sd::ops") public static class cyclic_rshift_bits extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cyclic_rshift_bits(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cyclic_rshift_bits(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cyclic_rshift_bits position(long position) {
                return (cyclic_rshift_bits)super.position(position);
            }
            @Override public cyclic_rshift_bits getPointer(long i) {
                return new cyclic_rshift_bits((Pointer)this).position(position + i);
            }
        
                                                                                    public cyclic_rshift_bits() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This operation applies bitwise AND
         *
         * PLEASE NOTE: This operation is applicable only to integer data types
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_bitwise_and)
        @Namespace("sd::ops") public static class bitwise_and extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public bitwise_and(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public bitwise_and(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public bitwise_and position(long position) {
                return (bitwise_and)super.position(position);
            }
            @Override public bitwise_and getPointer(long i) {
                return new bitwise_and((Pointer)this).position(position + i);
            }
        
                                                                                    public bitwise_and() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This operation applies bitwise OR
         *
         * PLEASE NOTE: This operation is applicable only to integer data types
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_bitwise_or)
        @Namespace("sd::ops") public static class bitwise_or extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public bitwise_or(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public bitwise_or(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public bitwise_or position(long position) {
                return (bitwise_or)super.position(position);
            }
            @Override public bitwise_or getPointer(long i) {
                return new bitwise_or((Pointer)this).position(position + i);
            }
        
                                                                                    public bitwise_or() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This operation applies bitwise XOR
         *
         * PLEASE NOTE: This operation is applicable only to integer data types
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_bitwise_xor)
        @Namespace("sd::ops") public static class bitwise_xor extends BroadcastableOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public bitwise_xor(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public bitwise_xor(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public bitwise_xor position(long position) {
                return (bitwise_xor)super.position(position);
            }
            @Override public bitwise_xor getPointer(long i) {
                return new bitwise_xor((Pointer)this).position(position + i);
            }
        
                                                                                    public bitwise_xor() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                }
//         #endif

        /**
         * This operation returns hamming distance based on bits
         *
         * PLEASE NOTE: This operation is applicable only to integer data types
         *
         * \tparam T
         */
//         #if NOT_EXCLUDED(OP_bits_hamming_distance)
        @Namespace("sd::ops") public static class bits_hamming_distance extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public bits_hamming_distance(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public bits_hamming_distance(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public bits_hamming_distance position(long position) {
                return (bits_hamming_distance)super.position(position);
            }
            @Override public bits_hamming_distance getPointer(long i) {
                return new bits_hamming_distance((Pointer)this).position(position + i);
            }
        
                                                                                    public bits_hamming_distance() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// #endif

// Parsed from ops/declarable/headers/loss.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_LOSS_H
// #define LIBND4J_HEADERS_LOSS_H

// #include 
    
    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of hinge loss function max(0, 1 - labels*logits)
       * 
       * Input arrays: 
       *    0: logits - logits, type float
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels and must be broadcastable to labels.
       *    2: labels - ground truth vales, expected to be 0. or 1., type float.
       *       Must have the same shape as logits.    
       *  
       *  Input integer arguments:
       *    0: type of reduction to apply to loss
       *       0 - "none", unreduced weighted losses with the same shape as logits.
       *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
       *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
       *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
       *
       * Output array: 
       *    0: loss values, type float.
       *       Can be an array with the same shape as logits or just single scalar, depending on reduction mode (see input integer argument)
       */               
//         #if NOT_EXCLUDED(OP_hinge_loss)
        @Namespace("sd::ops") public static class hinge_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public hinge_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public hinge_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public hinge_loss position(long position) {
                return (hinge_loss)super.position(position);
            }
            @Override public hinge_loss getPointer(long i) {
                return new hinge_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public hinge_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class hinge_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public hinge_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public hinge_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public hinge_loss_grad position(long position) {
                return (hinge_loss_grad)super.position(position);
            }
            @Override public hinge_loss_grad getPointer(long i) {
                return new hinge_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public hinge_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of Huber loss function:
       *    0.5 * (labels-predictions)^2                                if |labels-predictions| <= delta
       *    0.5 * delta^2 + delta * (|labels-predictions| - delta)      if |labels-predictions| >  delta
       * 
       * Input arrays: 
       *    0: predictions - the predicted values, type float
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels, and must be broadcastable to labels.
       *    2: labels - ground truth vales, type float.
       *       Must have the same shape as predictions.    
       *  
       *  Input integer arguments:
       *    0: type of reduction to apply to loss
       *       0 - "none", unreduced weighted losses with the same shape as predictions
       *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
       *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
       *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
       *
       *  Input float arguments:
       *    0: point where the huber loss function changes from a quadratic to linear.
       *
       * Output array: 
       *    0: loss values, type float.
       *       Can be an array with the same shape as predictions or just single scalar, depending on reduction mode (see input integer argument)
       */      
//         #if NOT_EXCLUDED(OP_huber_loss)
        @Namespace("sd::ops") public static class huber_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public huber_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public huber_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public huber_loss position(long position) {
                return (huber_loss)super.position(position);
            }
            @Override public huber_loss getPointer(long i) {
                return new huber_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public huber_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class huber_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public huber_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public huber_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public huber_loss_grad position(long position) {
                return (huber_loss_grad)super.position(position);
            }
            @Override public huber_loss_grad getPointer(long i) {
                return new huber_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public huber_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    
    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of logarithmic loss function ( y_i * log(p_i) + (1 - y_i) * log(1 - p_i) )
       * 
       * Input arrays: 
       *    0: predictions - the predicted values, type float
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels, and must be broadcastable to labels.
       *    2: labels - ground truth vales, type float.
       *       Must have the same shape as predictions.    
       *  
       *  Input integer arguments:
       *    0: type of reduction to apply to loss
       *       0 - "none", unreduced weighted losses with the same shape as predictions
       *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
       *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
       *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
       *
       *  Input float arguments:
       *    0: a small increment to add to avoid taking a log of zero. 
       *
       * Output array: 
       *    0: loss values, type float.
       *       Can be an array with the same shape as predictions or just single scalar, depending on reduction mode (see input integer argument)
       */      
//         #if NOT_EXCLUDED(OP_log_loss)
        @Namespace("sd::ops") public static class log_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public log_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public log_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public log_loss position(long position) {
                return (log_loss)super.position(position);
            }
            @Override public log_loss getPointer(long i) {
                return new log_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public log_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class log_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public log_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public log_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public log_loss_grad position(long position) {
                return (log_loss_grad)super.position(position);
            }
            @Override public log_loss_grad getPointer(long i) {
                return new log_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public log_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * l2_loss op.
         * compute a l2 norm for given array.
         *
         * input param - an array (tensor)
         * output value - a real number with given type (e.g. float or double)
         */
//         #if NOT_EXCLUDED(OP_l2_loss)
        @Namespace("sd::ops") public static class l2_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public l2_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public l2_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public l2_loss position(long position) {
                return (l2_loss)super.position(position);
            }
            @Override public l2_loss getPointer(long i) {
                return new l2_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public l2_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


        /**
         * This op calculates logarithmic loss of poisson distributed input.
         * Input arrays:
         *    0: log_predictions - must be already pre-transformed to log(x)
         *    1: weights - is used for weighting (multiplying) of loss values, type float.
         *       Can be single scalar or has the same rank as labels and must be broadcastable to labels.
         *    2: labels - ground truth vales, expected to be 0. or 1., type float.
         *       Must have the same shape as logits.
         *
         *  Input integer arguments:
         *    0: type of reduction to apply to loss
         *       0 - "none", unreduced weighted losses with the same shape as logits.
         *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
         *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
         *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
         *    1: optional - boolean value compute_full_loss: 0 (default) or 1 (compute)
         *
         * Output array:
         *    0: loss values, type float.
         *       Can be an array with the same shape as log_predictions or just single scalar, depending on reduction mode (see input integer argument)
         */
//         #if NOT_EXCLUDED(OP_log_poisson_loss)
        @Namespace("sd::ops") public static class log_poisson_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public log_poisson_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public log_poisson_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public log_poisson_loss position(long position) {
                return (log_poisson_loss)super.position(position);
            }
            @Override public log_poisson_loss getPointer(long i) {
                return new log_poisson_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public log_poisson_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class log_poisson_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public log_poisson_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public log_poisson_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public log_poisson_loss_grad position(long position) {
                return (log_poisson_loss_grad)super.position(position);
            }
            @Override public log_poisson_loss_grad getPointer(long i) {
                return new log_poisson_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public log_poisson_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of pairwise-errors-squared loss function 
       * 
       * Input arrays: 
       *    0: predictions - the predicted values, type float.
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels and must be broadcastable to labels.
       *    2: labels - ground truth vales, type float.
       *       Must have the same shape as predictions.    
       *  
       * Output array: 
       *    0: loss value, it is just single scalar, type float.
       */     
//         #if NOT_EXCLUDED(OP_mean_pairwssqerr_loss)
        @Namespace("sd::ops") public static class mean_pairwssqerr_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mean_pairwssqerr_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mean_pairwssqerr_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mean_pairwssqerr_loss position(long position) {
                return (mean_pairwssqerr_loss)super.position(position);
            }
            @Override public mean_pairwssqerr_loss getPointer(long i) {
                return new mean_pairwssqerr_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public mean_pairwssqerr_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class mean_pairwssqerr_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mean_pairwssqerr_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mean_pairwssqerr_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mean_pairwssqerr_loss_grad position(long position) {
                return (mean_pairwssqerr_loss_grad)super.position(position);
            }
            @Override public mean_pairwssqerr_loss_grad getPointer(long i) {
                return new mean_pairwssqerr_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public mean_pairwssqerr_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

    
    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of Sum-of-Squares loss function 1/N * sum_{i}^{N}(predictions_i - labels_i)^2
       * 
       * Input arrays: 
       *    0: predictions - the predicted values, type float
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels and must be broadcastable to labels.
       *    2: labels - ground truth vales, type float.
       *       Must have the same shape as predictions.    
       *  
       *  Input integer arguments:
       *    0: type of reduction to apply to loss
       *       0 - "none", unreduced weighted losses with the same shape as predictions
       *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
       *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
       *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
       *
       * Output array: 
       *    0: loss values, type float.
       *       Can be an array with the same shape as predictions or just single scalar, depending on reduction mode (see input integer argument)
       */      
//         #if NOT_EXCLUDED(OP_mean_sqerr_loss)
        @Namespace("sd::ops") public static class mean_sqerr_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mean_sqerr_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mean_sqerr_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mean_sqerr_loss position(long position) {
                return (mean_sqerr_loss)super.position(position);
            }
            @Override public mean_sqerr_loss getPointer(long i) {
                return new mean_sqerr_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public mean_sqerr_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class mean_sqerr_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public mean_sqerr_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public mean_sqerr_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public mean_sqerr_loss_grad position(long position) {
                return (mean_sqerr_loss_grad)super.position(position);
            }
            @Override public mean_sqerr_loss_grad getPointer(long i) {
                return new mean_sqerr_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public mean_sqerr_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of sigmoid cross-entropy loss function max(logits, 0.) - logits * labels + log(1. + exp(-abs(logits))); 
       * 
       * Input arrays: 
       *    0: logits - logits, type float
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels, and must be broadcastable to labels.
       *    2: labels - ground truth vales, expected to be 0. or 1., type float.
       *       Must have the same shape as logits.    
       *  
       *  Input integer arguments:
       *    0: type of reduction to apply to loss
       *       0 - "none", unreduced weighted losses with the same shape as logits.
       *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
       *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
       *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
       *
       *  Input float arguments:
       *    0: smoothing value, if it is greater than 0 then apply smoothing to the labels (smooth the labels towards 1/2): new_labels = labels * (1 - labelsSmoothing)+ 0.5 * labelsSmoothing
       *
       * Output array: 
       *    0: loss values, type float.
       *       Can be an array with the same shape as logits or just single scalar, depending on reduction mode (see input integer argument)
       */      
//         #if NOT_EXCLUDED(OP_sigm_cross_entropy_loss)
        @Namespace("sd::ops") public static class sigm_cross_entropy_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sigm_cross_entropy_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sigm_cross_entropy_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sigm_cross_entropy_loss position(long position) {
                return (sigm_cross_entropy_loss)super.position(position);
            }
            @Override public sigm_cross_entropy_loss getPointer(long i) {
                return new sigm_cross_entropy_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public sigm_cross_entropy_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class sigm_cross_entropy_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sigm_cross_entropy_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sigm_cross_entropy_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sigm_cross_entropy_loss_grad position(long position) {
                return (sigm_cross_entropy_loss_grad)super.position(position);
            }
            @Override public sigm_cross_entropy_loss_grad getPointer(long i) {
                return new sigm_cross_entropy_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public sigm_cross_entropy_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    

    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of softmax cross-entropy loss function max(logits, 0.) - logits * labels + log(1. + exp(-abs(logits))); 
       * 
       * Input arrays: 
       *    0: logits - logits, type float
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels, and must be broadcastable to labels.
       *    2: labels - ground truth vales, expected to be 0. or 1., type float.
       *       Must have the same shape as logits.    
       *  
       *  Input integer arguments:
       *    0: type of reduction to apply to loss
       *       0 - "none", unreduced weighted losses with the same shape as logits.
       *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
       *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
       *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
       *
       *  Input float arguments:
       *    0: smoothing value, if it is greater than 0 then apply smoothing to the labels (smooth the labels towards 1/numClasses):  new_labels = labels * (1 - labelsSmoothing) + labelsSmoothing / numClasses
       *
       * Output array: 
       *    0: loss values, type float.
       *       Can be an array with shape as in logits except last dimension is equal to unity or just single scalar, depending on reduction mode (see input integer argument)
       */      
//         #if NOT_EXCLUDED(OP_softmax_cross_entropy_loss)
        @Namespace("sd::ops") public static class softmax_cross_entropy_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softmax_cross_entropy_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softmax_cross_entropy_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softmax_cross_entropy_loss position(long position) {
                return (softmax_cross_entropy_loss)super.position(position);
            }
            @Override public softmax_cross_entropy_loss getPointer(long i) {
                return new softmax_cross_entropy_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public softmax_cross_entropy_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }  
        @Namespace("sd::ops") public static class softmax_cross_entropy_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softmax_cross_entropy_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softmax_cross_entropy_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softmax_cross_entropy_loss_grad position(long position) {
                return (softmax_cross_entropy_loss_grad)super.position(position);
            }
            @Override public softmax_cross_entropy_loss_grad getPointer(long i) {
                return new softmax_cross_entropy_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public softmax_cross_entropy_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }  
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of Absolute Difference loss function |predictions - labels|
       * 
       * Input arrays: 
       *    0: predictions - the predicted values, type float.
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels and must be broadcastable to labels.
       *    2: labels - ground truth vales, type float.
       *       Must have the same shape as predictions.    
       *  
       *  Input integer arguments:
       *    0: type of reduction to apply to loss
       *       0 - "none", unreduced weighted losses with the same shape as predictions
       *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
       *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
       *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
       *
       * Output array: 
       *    0: loss values, type float.
       *       Can be an array with the same shape as predictions or just single scalar, depending on reduction mode (see input integer argument)
       */      
//         #if NOT_EXCLUDED(OP_absolute_difference_loss)
        @Namespace("sd::ops") public static class absolute_difference_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public absolute_difference_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public absolute_difference_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public absolute_difference_loss position(long position) {
                return (absolute_difference_loss)super.position(position);
            }
            @Override public absolute_difference_loss getPointer(long i) {
                return new absolute_difference_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public absolute_difference_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class absolute_difference_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public absolute_difference_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public absolute_difference_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public absolute_difference_loss_grad position(long position) {
                return (absolute_difference_loss_grad)super.position(position);
            }
            @Override public absolute_difference_loss_grad getPointer(long i) {
                return new absolute_difference_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public absolute_difference_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif


    //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of cosine-distance loss function 1. - (predictions * labels).reduce_sum_along(dimension)
       * 
       * Input arrays: 
       *    0: predictions - the predicted values, type float
       *    1: weights - is used for weighting (multiplying) of loss values, type float. 
       *       Can be single scalar or has the same rank as labels and must be broadcastable to labels.
       *    2: labels - ground truth vales, type float.
       *       Must have the same shape as predictions.    
       *  
       *  Input integer arguments:
       *    0: type of reduction to apply to loss
       *       0 - "none", unreduced weighted losses with the same shape as predictions
       *       1 - "weighted_sum", output is scalar and equal to sum of all elements of weightedLosses array
       *       2 - "weighted_mean", output is scalar and equal to sum of all elements of weightedLosses array divided by sum of all elements of weightsBroad array
       *       3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of weightedLosses array divided by number of non-zero weights
       *    1: dimension along which the cosine distance is computed.
       *
       * Output array: 
       *    0: loss values, type float.
       *       Can be an array with the same shape as predictions or just single scalar, depending on reduction mode (see input integer argument)
       */         
//         #if NOT_EXCLUDED(OP_cosine_distance_loss)
        @Namespace("sd::ops") public static class cosine_distance_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cosine_distance_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cosine_distance_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cosine_distance_loss position(long position) {
                return (cosine_distance_loss)super.position(position);
            }
            @Override public cosine_distance_loss getPointer(long i) {
                return new cosine_distance_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public cosine_distance_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class cosine_distance_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cosine_distance_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cosine_distance_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cosine_distance_loss_grad position(long position) {
                return (cosine_distance_loss_grad)super.position(position);
            }
            @Override public cosine_distance_loss_grad getPointer(long i) {
                return new cosine_distance_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public cosine_distance_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of softmax cross-entropy loss function 
       * 
       * Input arrays: 
       *    0: logits - logits, type float
       *    1: labels - ground truth vales, expected to be 0. or 1., type float.
       *       Must have the same shape as logits.    
       *  
       *  Input integer arguments:
       *    0: optional (default is last dimension) dimension with classes
       *
       * Output array: 
       *    0: loss values, type float. An array with shape resulting from reducing of logits shape along dimension with classes
       */      
//         #if NOT_EXCLUDED(OP_softmax_cross_entropy_loss_with_logits)
        @Namespace("sd::ops") public static class softmax_cross_entropy_loss_with_logits extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softmax_cross_entropy_loss_with_logits(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softmax_cross_entropy_loss_with_logits(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softmax_cross_entropy_loss_with_logits position(long position) {
                return (softmax_cross_entropy_loss_with_logits)super.position(position);
            }
            @Override public softmax_cross_entropy_loss_with_logits getPointer(long i) {
                return new softmax_cross_entropy_loss_with_logits((Pointer)this).position(position + i);
            }
        
                                                                                    public softmax_cross_entropy_loss_with_logits() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class softmax_cross_entropy_loss_with_logits_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public softmax_cross_entropy_loss_with_logits_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public softmax_cross_entropy_loss_with_logits_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public softmax_cross_entropy_loss_with_logits_grad position(long position) {
                return (softmax_cross_entropy_loss_with_logits_grad)super.position(position);
            }
            @Override public softmax_cross_entropy_loss_with_logits_grad getPointer(long i) {
                return new softmax_cross_entropy_loss_with_logits_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public softmax_cross_entropy_loss_with_logits_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        //////////////////////////////////////////////////////////////////////////
    /**
       * Implementation of sparse softmax cross-entropy loss function
       * 
       * Input arrays:        
       *    0: labels - ground truth vales, expected to be within range [0, num_classes), type float.
       *       Must have rank equal logits rank minus 1.
       *    1: logits - logits, type float
       *  
       * Output array: 
       *    0: loss values, type float. Has the same shape as labels
       */      
//         #if NOT_EXCLUDED(OP_sparse_softmax_cross_entropy_loss_with_logits)
        @Namespace("sd::ops") public static class sparse_softmax_cross_entropy_loss_with_logits extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sparse_softmax_cross_entropy_loss_with_logits(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sparse_softmax_cross_entropy_loss_with_logits(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sparse_softmax_cross_entropy_loss_with_logits position(long position) {
                return (sparse_softmax_cross_entropy_loss_with_logits)super.position(position);
            }
            @Override public sparse_softmax_cross_entropy_loss_with_logits getPointer(long i) {
                return new sparse_softmax_cross_entropy_loss_with_logits((Pointer)this).position(position + i);
            }
        
                                                                                    public sparse_softmax_cross_entropy_loss_with_logits() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class sparse_softmax_cross_entropy_loss_with_logits_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public sparse_softmax_cross_entropy_loss_with_logits_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public sparse_softmax_cross_entropy_loss_with_logits_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public sparse_softmax_cross_entropy_loss_with_logits_grad position(long position) {
                return (sparse_softmax_cross_entropy_loss_with_logits_grad)super.position(position);
            }
            @Override public sparse_softmax_cross_entropy_loss_with_logits_grad getPointer(long i) {
                return new sparse_softmax_cross_entropy_loss_with_logits_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public sparse_softmax_cross_entropy_loss_with_logits_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

      /**
       * Implementation of CTC loss function
       *
       * Input arrays:
       *    0: labels - labels NDArray {BATCH_LEN, MAX_TARGET_LEN}, type integer
       *    1: logits - logits NDArray {BATCH_LEN, FRAME_LEN, CLASS_LEN }. log softmax of  rnn output. It should include a blank label as well, type float
       *    2: targetLabelLengths - Length of label sequence in labels NDArray {BATCH_LEN}, type integer
       *    3: logitsLengths - Length of input sequence in logits NDArray {BATCH_LEN}, type integer
       *
       *
       *  Input integer arguments:
       *    0: blank index - index of the blank label in logits
       *
       * Output array:
       *    0: loss values, type float. NDArray {BATCH_LEN} negative log probabilities of loss
       */
//         #if NOT_EXCLUDED(OP_ctc_loss)
        @Namespace("sd::ops") public static class ctc_loss extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public ctc_loss(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public ctc_loss(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public ctc_loss position(long position) {
                return (ctc_loss)super.position(position);
            }
            @Override public ctc_loss getPointer(long i) {
                return new ctc_loss((Pointer)this).position(position + i);
            }
        
                                                                                    public ctc_loss() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
        @Namespace("sd::ops") public static class ctc_loss_grad extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public ctc_loss_grad(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public ctc_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public ctc_loss_grad position(long position) {
                return (ctc_loss_grad)super.position(position);
            }
            @Override public ctc_loss_grad getPointer(long i) {
                return new ctc_loss_grad((Pointer)this).position(position + i);
            }
        
                                                                                    public ctc_loss_grad() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif





// #endif

// Parsed from ops/declarable/headers/datatypes.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//
// #ifndef LIBND4J_HEADERS_DTYPE_H
// #define LIBND4J_HEADERS_DTYPE_H

// #include 
        /**
         * This operation casts elements of input array to double data type
         * 
         * PLEASE NOTE: This op is disabled atm, and reserved for future releases.
         */
//         #if NOT_EXCLUDED(OP_to_double)
        @Namespace("sd::ops") public static class to_double extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public to_double(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public to_double(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public to_double position(long position) {
                return (to_double)super.position(position);
            }
            @Override public to_double getPointer(long i) {
                return new to_double((Pointer)this).position(position + i);
            }
        
                                                                                    public to_double() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation casts elements of input array to float16 data type
         * 
         * PLEASE NOTE: This op is disabled atm, and reserved for future releases.
         */
//         #if NOT_EXCLUDED(OP_to_float16)
        @Namespace("sd::ops") public static class to_float16 extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public to_float16(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public to_float16(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public to_float16 position(long position) {
                return (to_float16)super.position(position);
            }
            @Override public to_float16 getPointer(long i) {
                return new to_float16((Pointer)this).position(position + i);
            }
        
                                                                                    public to_float16() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation casts elements of input array to float data type
         * 
         * PLEASE NOTE: This op is disabled atm, and reserved for future releases.
         */
//         #if NOT_EXCLUDED(OP_to_float32)
        @Namespace("sd::ops") public static class to_float32 extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public to_float32(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public to_float32(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public to_float32 position(long position) {
                return (to_float32)super.position(position);
            }
            @Override public to_float32 getPointer(long i) {
                return new to_float32((Pointer)this).position(position + i);
            }
        
                                                                                    public to_float32() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation casts elements of input array to int32 data type
         * 
         * PLEASE NOTE: This op is disabled atm, and reserved for future releases.
         */
//         #if NOT_EXCLUDED(OP_to_int32)
        @Namespace("sd::ops") public static class to_int32 extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public to_int32(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public to_int32(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public to_int32 position(long position) {
                return (to_int32)super.position(position);
            }
            @Override public to_int32 getPointer(long i) {
                return new to_int32((Pointer)this).position(position + i);
            }
        
                                                                                    public to_int32() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation casts elements of input array to int64 (aka long long) data type
         * 
         * PLEASE NOTE: This op is disabled atm, and reserved for future releases.
         */
//         #if NOT_EXCLUDED(OP_to_int64)
        @Namespace("sd::ops") public static class to_int64 extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public to_int64(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public to_int64(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public to_int64 position(long position) {
                return (to_int64)super.position(position);
            }
            @Override public to_int64 getPointer(long i) {
                return new to_int64((Pointer)this).position(position + i);
            }
        
                                                                                    public to_int64() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation casts elements of input array to unsinged int32 data type
         * 
         * PLEASE NOTE: This op is disabled atm, and reserved for future releases.
         */
//         #if NOT_EXCLUDED(OP_to_uint32)
        @Namespace("sd::ops") public static class to_uint32 extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public to_uint32(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public to_uint32(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public to_uint32 position(long position) {
                return (to_uint32)super.position(position);
            }
            @Override public to_uint32 getPointer(long i) {
                return new to_uint32((Pointer)this).position(position + i);
            }
        
                                                                                    public to_uint32() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation casts elements of input array to unsigned int64 (aka unsigned long long) data type
         * 
         * PLEASE NOTE: This op is disabled atm, and reserved for future releases.
         */
//         #if NOT_EXCLUDED(OP_to_uint64)
        @Namespace("sd::ops") public static class to_uint64 extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public to_uint64(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public to_uint64(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public to_uint64 position(long position) {
                return (to_uint64)super.position(position);
            }
            @Override public to_uint64 getPointer(long i) {
                return new to_uint64((Pointer)this).position(position + i);
            }
        
                                                                                    public to_uint64() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif

        /**
         * This operation casts elements of input array to specified data type
         * 
         * PLEASE NOTE: This op is disabled atm, and reserved for future releases.
         * 
         * 
         * Int args:
         * 0: target DataType
         */
//         #if NOT_EXCLUDED(OP_cast)
        @Namespace("sd::ops") public static class cast extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public cast(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public cast(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public cast position(long position) {
                return (cast)super.position(position);
            }
            @Override public cast getPointer(long i) {
                return new cast((Pointer)this).position(position + i);
            }
        
                                                                                    public cast() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
        /**
         * This operation change type of input and modified shape of output to conform with given data type
         *
         * all as above op
         * */
//         #if NOT_EXCLUDED(OP_bitcast)
                @Namespace("sd::ops") public static class bitcast extends DeclarableCustomOp {
                    static { Loader.load(); }
                    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
                    public bitcast(Pointer p) { super(p); }
                    /** Native array allocator. Access with {@link Pointer#position(long)}. */
                    public bitcast(long size) { super((Pointer)null); allocateArray(size); }
                    private native void allocateArray(long size);
                    @Override public bitcast position(long position) {
                        return (bitcast)super.position(position);
                    }
                    @Override public bitcast getPointer(long i) {
                        return new bitcast((Pointer)this).position(position + i);
                    }
                
                                                                                    public bitcast() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// #endif

// Parsed from execution/ContextBuffers.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// @author [email protected]
//

// #ifndef LIBND4J_CONTEXTBUFFERS_H
// #define LIBND4J_CONTEXTBUFFERS_H

// #include 
// #include 
// #include 
    @Namespace("sd") @NoOffset public static class ContextBuffers extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public ContextBuffers(Pointer p) { super(p); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public ContextBuffers(long size) { super((Pointer)null); allocateArray(size); }
        private native void allocateArray(long size);
        @Override public ContextBuffers position(long position) {
            return (ContextBuffers)super.position(position);
        }
        @Override public ContextBuffers getPointer(long i) {
            return new ContextBuffers((Pointer)this).position(position + i);
        }
    
        public ContextBuffers() { super((Pointer)null); allocate(); }
        private native void allocate();
        public ContextBuffers(@Const @ByRef ContextBuffers other) { super((Pointer)null); allocate(other); }
        private native void allocate(@Const @ByRef ContextBuffers other);
        public ContextBuffers(Pointer rPointer, Pointer sPointer, Pointer aPointer, @Cast("bool") boolean isOwner/*=false*/) { super((Pointer)null); allocate(rPointer, sPointer, aPointer, isOwner); }
        private native void allocate(Pointer rPointer, Pointer sPointer, Pointer aPointer, @Cast("bool") boolean isOwner/*=false*/);
        public ContextBuffers(Pointer rPointer, Pointer sPointer, Pointer aPointer) { super((Pointer)null); allocate(rPointer, sPointer, aPointer); }
        private native void allocate(Pointer rPointer, Pointer sPointer, Pointer aPointer);

        public native @ByRef @Name("operator =") ContextBuffers put(@Const @ByRef ContextBuffers other);

        public native void release();

        public native Pointer reductionBuffer();
        public native Pointer scalarBuffer();
        public native Pointer allocationBuffer();

        public native Pointer execStream();
        public native Pointer specialStream();

        public native void setReductionBuffer(Pointer pointer);
        public native void setScalarBuffer(Pointer pointer);
        public native void setAllocationBuffer(Pointer pointer);

        public native ErrorReference errorReference();

        public native void triggerOwnership(@Cast("bool") boolean isOwner);

        public native int deviceId();

        public native @Cast("bool") boolean isInitialized();
    }



// #endif //DEV_TESTS_CONTEXTBUFFERS_H


// Parsed from execution/LaunchContext.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by raver119 on 30.11.17.
//

// #ifndef LIBND4J_CUDACONTEXT_H
// #define LIBND4J_CUDACONTEXT_H


// #ifdef __CUDABLAS__
// #endif

// used for MKLDNN etc
// #if !defined(__STANDALONE_BUILD__)
// #include "config.h"
// #endif

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

@Namespace("sd") @NoOffset public static class LaunchContext extends Pointer {
    static { Loader.load(); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public LaunchContext(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public LaunchContext position(long position) {
        return (LaunchContext)super.position(position);
    }
    @Override public LaunchContext getPointer(long i) {
        return new LaunchContext((Pointer)this).position(position + i);
    }

// #ifdef __CUDABLAS__

// #endif // CUDA
		public LaunchContext(@Cast("Nd4jPointer") Pointer cudaStream, @Cast("Nd4jPointer") Pointer reductionPointer/*=nullptr*/, @Cast("Nd4jPointer") Pointer scalarPointer/*=nullptr*/, @Cast("Nd4jPointer") Pointer allocationPointer/*=nullptr*/) { super((Pointer)null); allocate(cudaStream, reductionPointer, scalarPointer, allocationPointer); }
		private native void allocate(@Cast("Nd4jPointer") Pointer cudaStream, @Cast("Nd4jPointer") Pointer reductionPointer/*=nullptr*/, @Cast("Nd4jPointer") Pointer scalarPointer/*=nullptr*/, @Cast("Nd4jPointer") Pointer allocationPointer/*=nullptr*/);
		public LaunchContext(@Cast("Nd4jPointer") Pointer cudaStream) { super((Pointer)null); allocate(cudaStream); }
		private native void allocate(@Cast("Nd4jPointer") Pointer cudaStream);
    	public LaunchContext() { super((Pointer)null); allocate(); }
    	private native void allocate();
    	public native Workspace getWorkspace();
    	public native void setWorkspace(Workspace theWorkspace);

    	public native Pointer engine();

    	public native int getDeviceID();
    	public native void setDeviceID(int deviceID);
        public native ErrorReference errorReference();

// #ifndef __JAVACPP_HACK__

// #endif

    	public native @Cast("bool") boolean isInitialized();
    	public native void releaseBuffers();


	    public native LaunchContext defaultContext();


    	public native void swapContextBuffers(@ByRef ContextBuffers buffers);

}




// #endif //LIBND4J_CUDACONTEXT_H


// Parsed from array/ShapeDescriptor.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//  @author AbdelRauf 

// #ifndef DEV_TESTS_SHAPEDESCRIPTOR_H
// #define DEV_TESTS_SHAPEDESCRIPTOR_H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 


public static final int SHAPE_DESC_OK = 0;
public static final int SHAPE_DESC_INCORRECT_STRIDES = 1; //strides does not match shapes
public static final int SHAPE_DESC_INCORRECT_EWS = 2; //ews neither matches stride nor continuity
public static final int SHAPE_DESC_INCORRECT_RANK = 4; //rank > 32 or shape size and rank does not match

@Namespace("sd") @NoOffset public static class ShapeDescriptor extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ShapeDescriptor(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public ShapeDescriptor(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public ShapeDescriptor position(long position) {
        return (ShapeDescriptor)super.position(position);
    }
    @Override public ShapeDescriptor getPointer(long i) {
        return new ShapeDescriptor((Pointer)this).position(position + i);
    }

        public ShapeDescriptor(@Const @ByRef ShapeDescriptor other) { super((Pointer)null); allocate(other); }
        private native void allocate(@Const @ByRef ShapeDescriptor other);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("bool") boolean inheritDtype/*=true*/) { super((Pointer)null); allocate(shapeInfo, inheritDtype); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("bool") boolean inheritDtype/*=true*/);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongPointer shapeInfo) { super((Pointer)null); allocate(shapeInfo); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("bool") boolean inheritDtype/*=true*/) { super((Pointer)null); allocate(shapeInfo, inheritDtype); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("bool") boolean inheritDtype/*=true*/);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongBuffer shapeInfo) { super((Pointer)null); allocate(shapeInfo); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
        public ShapeDescriptor(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("bool") boolean inheritDtype/*=true*/) { super((Pointer)null); allocate(shapeInfo, inheritDtype); }
        private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("bool") boolean inheritDtype/*=true*/);
        public ShapeDescriptor(@Cast("const Nd4jLong*") long[] shapeInfo) { super((Pointer)null); allocate(shapeInfo); }
        private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const sd::DataType") int dtypeOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const sd::DataType") int dtypeOverride);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const sd::DataType") int dtypeOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const sd::DataType") int dtypeOverride);
        public ShapeDescriptor(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const sd::DataType") int dtypeOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride); }
        private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const sd::DataType") int dtypeOverride);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer dtypeOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer dtypeOverride);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer dtypeOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer dtypeOverride);
        public ShapeDescriptor(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] dtypeOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride); }
        private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] dtypeOverride);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer dtypeOverride, @Cast("const Nd4jLong*") LongPointer orderOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride, orderOverride); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer dtypeOverride, @Cast("const Nd4jLong*") LongPointer orderOverride);
        public ShapeDescriptor(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer dtypeOverride, @Cast("const Nd4jLong*") LongBuffer orderOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride, orderOverride); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer dtypeOverride, @Cast("const Nd4jLong*") LongBuffer orderOverride);
        public ShapeDescriptor(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] dtypeOverride, @Cast("const Nd4jLong*") long[] orderOverride) { super((Pointer)null); allocate(shapeInfo, dtypeOverride, orderOverride); }
        private native void allocate(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] dtypeOverride, @Cast("const Nd4jLong*") long[] orderOverride);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, @Cast("const Nd4jLong") long length) { super((Pointer)null); allocate(type, length); }
        private native void allocate(@Cast("const sd::DataType") int type, @Cast("const Nd4jLong") long length);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") LongPointer shape, int rank) { super((Pointer)null); allocate(type, order, shape, rank); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") LongPointer shape, int rank);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") LongBuffer shape, int rank) { super((Pointer)null); allocate(type, order, shape, rank); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") LongBuffer shape, int rank);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") long[] shape, int rank) { super((Pointer)null); allocate(type, order, shape, rank); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") long[] shape, int rank);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape) { super((Pointer)null); allocate(type, order, shape); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape) { super((Pointer)null); allocate(type, order, shape); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector long[] shape) { super((Pointer)null); allocate(type, order, shape); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector long[] shape);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("Nd4jLong*") @StdVector LongPointer strides) { super((Pointer)null); allocate(type, order, shape, strides); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("Nd4jLong*") @StdVector LongPointer strides);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("Nd4jLong*") @StdVector LongBuffer strides) { super((Pointer)null); allocate(type, order, shape, strides); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("Nd4jLong*") @StdVector LongBuffer strides);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("Nd4jLong*") @StdVector long[] strides) { super((Pointer)null); allocate(type, order, shape, strides); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("Nd4jLong*") @StdVector long[] strides);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("Nd4jLong*") @StdVector LongPointer strides, @Cast("const Nd4jLong") long ews) { super((Pointer)null); allocate(type, order, shape, strides, ews); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("Nd4jLong*") @StdVector LongPointer strides, @Cast("const Nd4jLong") long ews);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("Nd4jLong*") @StdVector LongBuffer strides, @Cast("const Nd4jLong") long ews) { super((Pointer)null); allocate(type, order, shape, strides, ews); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("Nd4jLong*") @StdVector LongBuffer strides, @Cast("const Nd4jLong") long ews);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("Nd4jLong*") @StdVector long[] strides, @Cast("const Nd4jLong") long ews) { super((Pointer)null); allocate(type, order, shape, strides, ews); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("Nd4jLong*") @StdVector long[] strides, @Cast("const Nd4jLong") long ews);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer strides, int rank, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras) { super((Pointer)null); allocate(type, order, shape, strides, rank, ews, extras); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer strides, int rank, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer strides, int rank, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras) { super((Pointer)null); allocate(type, order, shape, strides, rank, ews, extras); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer strides, int rank, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras);
        public ShapeDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] strides, int rank, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras) { super((Pointer)null); allocate(type, order, shape, strides, rank, ews, extras); }
        private native void allocate(@Cast("const sd::DataType") int type, byte order, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] strides, int rank, @Cast("Nd4jLong") long ews, @Cast("Nd4jLong") long extras);

        public ShapeDescriptor() { super((Pointer)null); allocate(); }
        private native void allocate();

        public native int rank();
        public native @Cast("Nd4jLong") long ews();
        public native @Cast("Nd4jLong") long arrLength();
        public native char order();
        public native @Cast("sd::DataType") int dataType();
        public native @Cast("bool") boolean isEmpty();
        public native @Cast("Nd4jLong*") @StdVector LongPointer shape();
        public native @Cast("Nd4jLong*") @StdVector LongPointer strides();

        //returns minimal allocation length
        public native @Cast("Nd4jLong") long allocLength();

        //returns Status for the correctness
        public native @Cast("Nd4jLong") long validate();

        // we use default copy assignment operator
        public native @ByRef @Name("operator =") ShapeDescriptor put(@Const @ByRef ShapeDescriptor other);

        // we use default move assignment operator

        // equal to operator
        public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ShapeDescriptor other);

        // less than operator
        public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef ShapeDescriptor other);

        public native @Cast("Nd4jLong*") LongPointer toShapeInfo();



        public native @ByVal ShapeDescriptor emptyDescriptor(@Cast("const sd::DataType") int type);
        public native @ByVal ShapeDescriptor scalarDescriptor(@Cast("const sd::DataType") int type);
        public native @ByVal ShapeDescriptor vectorDescriptor(@Cast("const Nd4jLong") long length, @Cast("const sd::DataType") int type);

        //create Descriptor with padded buffer.
        public native @ByVal ShapeDescriptor paddedBufferDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongPointer shape, @Cast("Nd4jLong*") @StdVector LongPointer paddings);
        public native @ByVal ShapeDescriptor paddedBufferDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector LongBuffer shape, @Cast("Nd4jLong*") @StdVector LongBuffer paddings);
        public native @ByVal ShapeDescriptor paddedBufferDescriptor(@Cast("const sd::DataType") int type, byte order, @Cast("Nd4jLong*") @StdVector long[] shape, @Cast("Nd4jLong*") @StdVector long[] paddings);
    }


// #ifndef __JAVACPP_HACK__

// #endif


// #endif //DEV_TESTS_SHAPEDESCRIPTOR_H


// Parsed from array/TadDescriptor.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef DEV_TESTS_TADDESCRIPTOR_H
// #define DEV_TESTS_TADDESCRIPTOR_H

// #include "ShapeDescriptor.h"
// #include 
    @Namespace("sd") @NoOffset public static class TadDescriptor extends Pointer {
        static { Loader.load(); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public TadDescriptor(Pointer p) { super(p); }
    
        public TadDescriptor(@Cast("const Nd4jLong*") LongPointer originalShape, @Const IntPointer dimensions, int length, @Cast("const bool") boolean keepUnitiesInShape/*=false*/) { super((Pointer)null); allocate(originalShape, dimensions, length, keepUnitiesInShape); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer originalShape, @Const IntPointer dimensions, int length, @Cast("const bool") boolean keepUnitiesInShape/*=false*/);
        public TadDescriptor(@Cast("const Nd4jLong*") LongPointer originalShape, @Const IntPointer dimensions, int length) { super((Pointer)null); allocate(originalShape, dimensions, length); }
        private native void allocate(@Cast("const Nd4jLong*") LongPointer originalShape, @Const IntPointer dimensions, int length);
        public TadDescriptor(@Cast("const Nd4jLong*") LongBuffer originalShape, @Const IntBuffer dimensions, int length, @Cast("const bool") boolean keepUnitiesInShape/*=false*/) { super((Pointer)null); allocate(originalShape, dimensions, length, keepUnitiesInShape); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer originalShape, @Const IntBuffer dimensions, int length, @Cast("const bool") boolean keepUnitiesInShape/*=false*/);
        public TadDescriptor(@Cast("const Nd4jLong*") LongBuffer originalShape, @Const IntBuffer dimensions, int length) { super((Pointer)null); allocate(originalShape, dimensions, length); }
        private native void allocate(@Cast("const Nd4jLong*") LongBuffer originalShape, @Const IntBuffer dimensions, int length);
        public TadDescriptor(@Cast("const Nd4jLong*") long[] originalShape, @Const int[] dimensions, int length, @Cast("const bool") boolean keepUnitiesInShape/*=false*/) { super((Pointer)null); allocate(originalShape, dimensions, length, keepUnitiesInShape); }
        private native void allocate(@Cast("const Nd4jLong*") long[] originalShape, @Const int[] dimensions, int length, @Cast("const bool") boolean keepUnitiesInShape/*=false*/);
        public TadDescriptor(@Cast("const Nd4jLong*") long[] originalShape, @Const int[] dimensions, int length) { super((Pointer)null); allocate(originalShape, dimensions, length); }
        private native void allocate(@Cast("const Nd4jLong*") long[] originalShape, @Const int[] dimensions, int length);
        public TadDescriptor(@Const @ByRef ShapeDescriptor descriptor, @StdVector IntPointer dimensions, @Cast("const bool") boolean keepUnitiesInShape/*=false*/) { super((Pointer)null); allocate(descriptor, dimensions, keepUnitiesInShape); }
        private native void allocate(@Const @ByRef ShapeDescriptor descriptor, @StdVector IntPointer dimensions, @Cast("const bool") boolean keepUnitiesInShape/*=false*/);
        public TadDescriptor(@Const @ByRef ShapeDescriptor descriptor, @StdVector IntPointer dimensions) { super((Pointer)null); allocate(descriptor, dimensions); }
        private native void allocate(@Const @ByRef ShapeDescriptor descriptor, @StdVector IntPointer dimensions);
        public TadDescriptor(@Const @ByRef ShapeDescriptor descriptor, @StdVector IntBuffer dimensions, @Cast("const bool") boolean keepUnitiesInShape/*=false*/) { super((Pointer)null); allocate(descriptor, dimensions, keepUnitiesInShape); }
        private native void allocate(@Const @ByRef ShapeDescriptor descriptor, @StdVector IntBuffer dimensions, @Cast("const bool") boolean keepUnitiesInShape/*=false*/);
        public TadDescriptor(@Const @ByRef ShapeDescriptor descriptor, @StdVector IntBuffer dimensions) { super((Pointer)null); allocate(descriptor, dimensions); }
        private native void allocate(@Const @ByRef ShapeDescriptor descriptor, @StdVector IntBuffer dimensions);
        public TadDescriptor(@Const @ByRef ShapeDescriptor descriptor, @StdVector int[] dimensions, @Cast("const bool") boolean keepUnitiesInShape/*=false*/) { super((Pointer)null); allocate(descriptor, dimensions, keepUnitiesInShape); }
        private native void allocate(@Const @ByRef ShapeDescriptor descriptor, @StdVector int[] dimensions, @Cast("const bool") boolean keepUnitiesInShape/*=false*/);
        public TadDescriptor(@Const @ByRef ShapeDescriptor descriptor, @StdVector int[] dimensions) { super((Pointer)null); allocate(descriptor, dimensions); }
        private native void allocate(@Const @ByRef ShapeDescriptor descriptor, @StdVector int[] dimensions);
        public TadDescriptor(@Const @ByRef TadDescriptor other) { super((Pointer)null); allocate(other); }
        private native void allocate(@Const @ByRef TadDescriptor other);

        // we use default copy assignment operator
        public native @ByRef @Name("operator =") TadDescriptor put(@Const @ByRef TadDescriptor other);

        // we use default move assignment operator

        // equal to operator
        public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TadDescriptor other);

        // less than operator
        public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef TadDescriptor other);

        public native @StdVector IntPointer axis();
        public native @ByRef ShapeDescriptor originalShape();
        public native @Const @ByRef ShapeDescriptor originalShapeConst();
        public native @Cast("bool") boolean areUnitiesinShape();
    }


// #ifndef __JAVACPP_HACK__

// #endif


// #endif //DEV_TESTS_TADDESCRIPTOR_H


// Parsed from helpers/DebugInfo.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
// Created by GS aka shugeo  on 3/12/19.
//

// #ifndef LIBND4J__DEBUG_INFO_HELPER__H
// #define LIBND4J__DEBUG_INFO_HELPER__H

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 

// #ifdef __CUDACC__

// #endif
    @Namespace("sd") public static class DebugInfo extends Pointer {
        static { Loader.load(); }
        /** Default native constructor. */
        public DebugInfo() { super((Pointer)null); allocate(); }
        /** Native array allocator. Access with {@link Pointer#position(long)}. */
        public DebugInfo(long size) { super((Pointer)null); allocateArray(size); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public DebugInfo(Pointer p) { super(p); }
        private native void allocate();
        private native void allocateArray(long size);
        @Override public DebugInfo position(long position) {
            return (DebugInfo)super.position(position);
        }
        @Override public DebugInfo getPointer(long i) {
            return new DebugInfo((Pointer)this).position(position + i);
        }
    
       public native double _minValue(); public native DebugInfo _minValue(double setter);
       public native double _maxValue(); public native DebugInfo _maxValue(double setter);
       public native double _meanValue(); public native DebugInfo _meanValue(double setter);
       public native double _stdDevValue(); public native DebugInfo _stdDevValue(double setter);
       public native @Cast("Nd4jLong") long _zeroCount(); public native DebugInfo _zeroCount(long setter);
       public native @Cast("Nd4jLong") long _positiveCount(); public native DebugInfo _positiveCount(long setter);
       public native @Cast("Nd4jLong") long _negativeCount(); public native DebugInfo _negativeCount(long setter);
       public native @Cast("Nd4jLong") long _infCount(); public native DebugInfo _infCount(long setter);
       public native @Cast("Nd4jLong") long _nanCount(); public native DebugInfo _nanCount(long setter);
    }

    @Namespace("sd") public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef DebugInfo first, @Const @ByRef DebugInfo second);




// #endif //LIBND4J_DEBUGHELPER_H


// Parsed from ops/declarable/headers/third_party.h

/* ******************************************************************************
 *
 *
 * This program and the accompanying materials are made available under the
 * terms of the Apache License, Version 2.0 which is available at
 * https://www.apache.org/licenses/LICENSE-2.0.
 *
 *  See the NOTICE file distributed with this work for additional
 *  information regarding copyright ownership.
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 ******************************************************************************/

//
//  @author [email protected]
//

// #ifndef LIBND4J_HEADERS_TPARTY_H
// #define LIBND4J_HEADERS_TPARTY_H

// #include 
//         #if NOT_EXCLUDED(OP_firas_sparse)
        @Namespace("sd::ops") public static class firas_sparse extends DeclarableCustomOp {
            static { Loader.load(); }
            /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
            public firas_sparse(Pointer p) { super(p); }
            /** Native array allocator. Access with {@link Pointer#position(long)}. */
            public firas_sparse(long size) { super((Pointer)null); allocateArray(size); }
            private native void allocateArray(long size);
            @Override public firas_sparse position(long position) {
                return (firas_sparse)super.position(position);
            }
            @Override public firas_sparse getPointer(long i) {
                return new firas_sparse((Pointer)this).position(position + i);
            }
        
                                                                                    public firas_sparse() { super((Pointer)null); allocate(); }
                                                                                    private native void allocate();
                                                                                    public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
                                                                                }
//         #endif
    


// #endif

}